Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.685
      1 /*	$NetBSD: if_wm.c,v 1.685 2020/08/05 03:17:18 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.685 2020/08/05 03:17:18 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/workqueue.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <net/rss_config.h>
    121 
    122 #include <netinet/in.h>			/* XXX for struct ip */
    123 #include <netinet/in_systm.h>		/* XXX for struct ip */
    124 #include <netinet/ip.h>			/* XXX for struct ip */
    125 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    126 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    127 
    128 #include <sys/bus.h>
    129 #include <sys/intr.h>
    130 #include <machine/endian.h>
    131 
    132 #include <dev/mii/mii.h>
    133 #include <dev/mii/mdio.h>
    134 #include <dev/mii/miivar.h>
    135 #include <dev/mii/miidevs.h>
    136 #include <dev/mii/mii_bitbang.h>
    137 #include <dev/mii/ikphyreg.h>
    138 #include <dev/mii/igphyreg.h>
    139 #include <dev/mii/igphyvar.h>
    140 #include <dev/mii/inbmphyreg.h>
    141 #include <dev/mii/ihphyreg.h>
    142 #include <dev/mii/makphyreg.h>
    143 
    144 #include <dev/pci/pcireg.h>
    145 #include <dev/pci/pcivar.h>
    146 #include <dev/pci/pcidevs.h>
    147 
    148 #include <dev/pci/if_wmreg.h>
    149 #include <dev/pci/if_wmvar.h>
    150 
    151 #ifdef WM_DEBUG
    152 #define	WM_DEBUG_LINK		__BIT(0)
    153 #define	WM_DEBUG_TX		__BIT(1)
    154 #define	WM_DEBUG_RX		__BIT(2)
    155 #define	WM_DEBUG_GMII		__BIT(3)
    156 #define	WM_DEBUG_MANAGE		__BIT(4)
    157 #define	WM_DEBUG_NVM		__BIT(5)
    158 #define	WM_DEBUG_INIT		__BIT(6)
    159 #define	WM_DEBUG_LOCK		__BIT(7)
    160 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    161     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    162 #define	DPRINTF(x, y)	do { if (wm_debug & (x)) printf y; } while (0)
    163 #else
    164 #define	DPRINTF(x, y)	__nothing
    165 #endif /* WM_DEBUG */
    166 
    167 #ifdef NET_MPSAFE
    168 #define WM_MPSAFE	1
    169 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
    170 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
    171 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    172 #else
    173 #define WM_CALLOUT_FLAGS	0
    174 #define WM_SOFTINT_FLAGS	0
    175 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    176 #endif
    177 
    178 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    179 
    180 /*
    181  * This device driver's max interrupt numbers.
    182  */
    183 #define WM_MAX_NQUEUEINTR	16
    184 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    185 
    186 #ifndef WM_DISABLE_MSI
    187 #define	WM_DISABLE_MSI 0
    188 #endif
    189 #ifndef WM_DISABLE_MSIX
    190 #define	WM_DISABLE_MSIX 0
    191 #endif
    192 
    193 int wm_disable_msi = WM_DISABLE_MSI;
    194 int wm_disable_msix = WM_DISABLE_MSIX;
    195 
    196 #ifndef WM_WATCHDOG_TIMEOUT
    197 #define WM_WATCHDOG_TIMEOUT 5
    198 #endif
    199 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    200 
    201 /*
    202  * Transmit descriptor list size.  Due to errata, we can only have
    203  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    204  * on >= 82544. We tell the upper layers that they can queue a lot
    205  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    206  * of them at a time.
    207  *
    208  * We allow up to 64 DMA segments per packet.  Pathological packet
    209  * chains containing many small mbufs have been observed in zero-copy
    210  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    211  * m_defrag() is called to reduce it.
    212  */
    213 #define	WM_NTXSEGS		64
    214 #define	WM_IFQUEUELEN		256
    215 #define	WM_TXQUEUELEN_MAX	64
    216 #define	WM_TXQUEUELEN_MAX_82547	16
    217 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    218 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    219 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    220 #define	WM_NTXDESC_82542	256
    221 #define	WM_NTXDESC_82544	4096
    222 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    223 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    224 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    225 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    226 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    227 
    228 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    229 
    230 #define	WM_TXINTERQSIZE		256
    231 
    232 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    233 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    234 #endif
    235 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    236 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    237 #endif
    238 
    239 /*
    240  * Receive descriptor list size.  We have one Rx buffer for normal
    241  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    242  * packet.  We allocate 256 receive descriptors, each with a 2k
    243  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    244  */
    245 #define	WM_NRXDESC		256U
    246 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    247 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    248 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    249 
    250 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    251 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    252 #endif
    253 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    254 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    255 #endif
    256 
    257 typedef union txdescs {
    258 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    259 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    260 } txdescs_t;
    261 
    262 typedef union rxdescs {
    263 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    264 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    265 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    266 } rxdescs_t;
    267 
    268 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    269 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    270 
    271 /*
    272  * Software state for transmit jobs.
    273  */
    274 struct wm_txsoft {
    275 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    276 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    277 	int txs_firstdesc;		/* first descriptor in packet */
    278 	int txs_lastdesc;		/* last descriptor in packet */
    279 	int txs_ndesc;			/* # of descriptors used */
    280 };
    281 
    282 /*
    283  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    284  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    285  * them together.
    286  */
    287 struct wm_rxsoft {
    288 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    289 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    290 };
    291 
    292 #define WM_LINKUP_TIMEOUT	50
    293 
    294 static uint16_t swfwphysem[] = {
    295 	SWFW_PHY0_SM,
    296 	SWFW_PHY1_SM,
    297 	SWFW_PHY2_SM,
    298 	SWFW_PHY3_SM
    299 };
    300 
    301 static const uint32_t wm_82580_rxpbs_table[] = {
    302 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    303 };
    304 
    305 struct wm_softc;
    306 
    307 #ifdef WM_EVENT_COUNTERS
    308 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    309 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    310 	struct evcnt qname##_ev_##evname;
    311 
    312 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    313 	do {								\
    314 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    315 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    316 		    "%s%02d%s", #qname, (qnum), #evname);		\
    317 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    318 		    (evtype), NULL, (xname),				\
    319 		    (q)->qname##_##evname##_evcnt_name);		\
    320 	} while (0)
    321 
    322 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    323 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    324 
    325 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    326 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    327 
    328 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    329 	evcnt_detach(&(q)->qname##_ev_##evname);
    330 #endif /* WM_EVENT_COUNTERS */
    331 
    332 struct wm_txqueue {
    333 	kmutex_t *txq_lock;		/* lock for tx operations */
    334 
    335 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    336 
    337 	/* Software state for the transmit descriptors. */
    338 	int txq_num;			/* must be a power of two */
    339 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    340 
    341 	/* TX control data structures. */
    342 	int txq_ndesc;			/* must be a power of two */
    343 	size_t txq_descsize;		/* a tx descriptor size */
    344 	txdescs_t *txq_descs_u;
    345 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    346 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    347 	int txq_desc_rseg;		/* real number of control segment */
    348 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    349 #define	txq_descs	txq_descs_u->sctxu_txdescs
    350 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    351 
    352 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    353 
    354 	int txq_free;			/* number of free Tx descriptors */
    355 	int txq_next;			/* next ready Tx descriptor */
    356 
    357 	int txq_sfree;			/* number of free Tx jobs */
    358 	int txq_snext;			/* next free Tx job */
    359 	int txq_sdirty;			/* dirty Tx jobs */
    360 
    361 	/* These 4 variables are used only on the 82547. */
    362 	int txq_fifo_size;		/* Tx FIFO size */
    363 	int txq_fifo_head;		/* current head of FIFO */
    364 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    365 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    366 
    367 	/*
    368 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    369 	 * CPUs. This queue intermediate them without block.
    370 	 */
    371 	pcq_t *txq_interq;
    372 
    373 	/*
    374 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    375 	 * to manage Tx H/W queue's busy flag.
    376 	 */
    377 	int txq_flags;			/* flags for H/W queue, see below */
    378 #define	WM_TXQ_NO_SPACE	0x1
    379 
    380 	bool txq_stopping;
    381 
    382 	bool txq_sending;
    383 	time_t txq_lastsent;
    384 
    385 	/* Checksum flags used for previous packet */
    386 	uint32_t 	txq_last_hw_cmd;
    387 	uint8_t 	txq_last_hw_fields;
    388 	uint16_t	txq_last_hw_ipcs;
    389 	uint16_t	txq_last_hw_tucs;
    390 
    391 	uint32_t txq_packets;		/* for AIM */
    392 	uint32_t txq_bytes;		/* for AIM */
    393 #ifdef WM_EVENT_COUNTERS
    394 	/* TX event counters */
    395 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    396 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    397 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    398 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    399 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    400 					    /* XXX not used? */
    401 
    402 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    403 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    404 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    405 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    406 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    407 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    408 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    409 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    410 					    /* other than toomanyseg */
    411 
    412 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    413 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    414 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    415 	WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
    416 
    417 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    418 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    419 #endif /* WM_EVENT_COUNTERS */
    420 };
    421 
    422 struct wm_rxqueue {
    423 	kmutex_t *rxq_lock;		/* lock for rx operations */
    424 
    425 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    426 
    427 	/* Software state for the receive descriptors. */
    428 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    429 
    430 	/* RX control data structures. */
    431 	int rxq_ndesc;			/* must be a power of two */
    432 	size_t rxq_descsize;		/* a rx descriptor size */
    433 	rxdescs_t *rxq_descs_u;
    434 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    435 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    436 	int rxq_desc_rseg;		/* real number of control segment */
    437 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    438 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    439 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    440 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    441 
    442 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    443 
    444 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    445 	int rxq_discard;
    446 	int rxq_len;
    447 	struct mbuf *rxq_head;
    448 	struct mbuf *rxq_tail;
    449 	struct mbuf **rxq_tailp;
    450 
    451 	bool rxq_stopping;
    452 
    453 	uint32_t rxq_packets;		/* for AIM */
    454 	uint32_t rxq_bytes;		/* for AIM */
    455 #ifdef WM_EVENT_COUNTERS
    456 	/* RX event counters */
    457 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    458 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    459 
    460 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    461 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    462 #endif
    463 };
    464 
    465 struct wm_queue {
    466 	int wmq_id;			/* index of TX/RX queues */
    467 	int wmq_intr_idx;		/* index of MSI-X tables */
    468 
    469 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    470 	bool wmq_set_itr;
    471 
    472 	struct wm_txqueue wmq_txq;
    473 	struct wm_rxqueue wmq_rxq;
    474 
    475 	bool wmq_txrx_use_workqueue;
    476 	struct work wmq_cookie;
    477 	void *wmq_si;
    478 };
    479 
    480 struct wm_phyop {
    481 	int (*acquire)(struct wm_softc *);
    482 	void (*release)(struct wm_softc *);
    483 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    484 	int (*writereg_locked)(device_t, int, int, uint16_t);
    485 	int reset_delay_us;
    486 	bool no_errprint;
    487 };
    488 
    489 struct wm_nvmop {
    490 	int (*acquire)(struct wm_softc *);
    491 	void (*release)(struct wm_softc *);
    492 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    493 };
    494 
    495 /*
    496  * Software state per device.
    497  */
    498 struct wm_softc {
    499 	device_t sc_dev;		/* generic device information */
    500 	bus_space_tag_t sc_st;		/* bus space tag */
    501 	bus_space_handle_t sc_sh;	/* bus space handle */
    502 	bus_size_t sc_ss;		/* bus space size */
    503 	bus_space_tag_t sc_iot;		/* I/O space tag */
    504 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    505 	bus_size_t sc_ios;		/* I/O space size */
    506 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    507 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    508 	bus_size_t sc_flashs;		/* flash registers space size */
    509 	off_t sc_flashreg_offset;	/*
    510 					 * offset to flash registers from
    511 					 * start of BAR
    512 					 */
    513 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    514 
    515 	struct ethercom sc_ethercom;	/* ethernet common data */
    516 	struct mii_data sc_mii;		/* MII/media information */
    517 
    518 	pci_chipset_tag_t sc_pc;
    519 	pcitag_t sc_pcitag;
    520 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    521 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    522 
    523 	uint16_t sc_pcidevid;		/* PCI device ID */
    524 	wm_chip_type sc_type;		/* MAC type */
    525 	int sc_rev;			/* MAC revision */
    526 	wm_phy_type sc_phytype;		/* PHY type */
    527 	uint8_t sc_sfptype;		/* SFP type */
    528 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    529 #define	WM_MEDIATYPE_UNKNOWN		0x00
    530 #define	WM_MEDIATYPE_FIBER		0x01
    531 #define	WM_MEDIATYPE_COPPER		0x02
    532 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    533 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    534 	int sc_flags;			/* flags; see below */
    535 	u_short sc_if_flags;		/* last if_flags */
    536 	int sc_ec_capenable;		/* last ec_capenable */
    537 	int sc_flowflags;		/* 802.3x flow control flags */
    538 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    539 	int sc_align_tweak;
    540 
    541 	void *sc_ihs[WM_MAX_NINTR];	/*
    542 					 * interrupt cookie.
    543 					 * - legacy and msi use sc_ihs[0] only
    544 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    545 					 */
    546 	pci_intr_handle_t *sc_intrs;	/*
    547 					 * legacy and msi use sc_intrs[0] only
    548 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    549 					 */
    550 	int sc_nintrs;			/* number of interrupts */
    551 
    552 	int sc_link_intr_idx;		/* index of MSI-X tables */
    553 
    554 	callout_t sc_tick_ch;		/* tick callout */
    555 	bool sc_core_stopping;
    556 
    557 	int sc_nvm_ver_major;
    558 	int sc_nvm_ver_minor;
    559 	int sc_nvm_ver_build;
    560 	int sc_nvm_addrbits;		/* NVM address bits */
    561 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    562 	int sc_ich8_flash_base;
    563 	int sc_ich8_flash_bank_size;
    564 	int sc_nvm_k1_enabled;
    565 
    566 	int sc_nqueues;
    567 	struct wm_queue *sc_queue;
    568 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    569 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    570 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    571 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    572 	struct workqueue *sc_queue_wq;
    573 	bool sc_txrx_use_workqueue;
    574 
    575 	int sc_affinity_offset;
    576 
    577 #ifdef WM_EVENT_COUNTERS
    578 	/* Event counters. */
    579 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    580 
    581 	/* WM_T_82542_2_1 only */
    582 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    583 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    584 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    585 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    586 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    587 #endif /* WM_EVENT_COUNTERS */
    588 
    589 	struct sysctllog *sc_sysctllog;
    590 
    591 	/* This variable are used only on the 82547. */
    592 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    593 
    594 	uint32_t sc_ctrl;		/* prototype CTRL register */
    595 #if 0
    596 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    597 #endif
    598 	uint32_t sc_icr;		/* prototype interrupt bits */
    599 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    600 	uint32_t sc_tctl;		/* prototype TCTL register */
    601 	uint32_t sc_rctl;		/* prototype RCTL register */
    602 	uint32_t sc_txcw;		/* prototype TXCW register */
    603 	uint32_t sc_tipg;		/* prototype TIPG register */
    604 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    605 	uint32_t sc_pba;		/* prototype PBA register */
    606 
    607 	int sc_tbi_linkup;		/* TBI link status */
    608 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    609 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    610 
    611 	int sc_mchash_type;		/* multicast filter offset */
    612 
    613 	krndsource_t rnd_source;	/* random source */
    614 
    615 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    616 
    617 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    618 	kmutex_t *sc_ich_phymtx;	/*
    619 					 * 82574/82583/ICH/PCH specific PHY
    620 					 * mutex. For 82574/82583, the mutex
    621 					 * is used for both PHY and NVM.
    622 					 */
    623 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    624 
    625 	struct wm_phyop phy;
    626 	struct wm_nvmop nvm;
    627 };
    628 
    629 #define WM_CORE_LOCK(_sc)						\
    630 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    631 #define WM_CORE_UNLOCK(_sc)						\
    632 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    633 #define WM_CORE_LOCKED(_sc)						\
    634 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    635 
    636 #define	WM_RXCHAIN_RESET(rxq)						\
    637 do {									\
    638 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    639 	*(rxq)->rxq_tailp = NULL;					\
    640 	(rxq)->rxq_len = 0;						\
    641 } while (/*CONSTCOND*/0)
    642 
    643 #define	WM_RXCHAIN_LINK(rxq, m)						\
    644 do {									\
    645 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    646 	(rxq)->rxq_tailp = &(m)->m_next;				\
    647 } while (/*CONSTCOND*/0)
    648 
    649 #ifdef WM_EVENT_COUNTERS
    650 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    651 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    652 
    653 #define WM_Q_EVCNT_INCR(qname, evname)			\
    654 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    655 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    656 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    657 #else /* !WM_EVENT_COUNTERS */
    658 #define	WM_EVCNT_INCR(ev)	/* nothing */
    659 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    660 
    661 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    662 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    663 #endif /* !WM_EVENT_COUNTERS */
    664 
    665 #define	CSR_READ(sc, reg)						\
    666 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    667 #define	CSR_WRITE(sc, reg, val)						\
    668 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    669 #define	CSR_WRITE_FLUSH(sc)						\
    670 	(void)CSR_READ((sc), WMREG_STATUS)
    671 
    672 #define ICH8_FLASH_READ32(sc, reg)					\
    673 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    674 	    (reg) + sc->sc_flashreg_offset)
    675 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    676 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    677 	    (reg) + sc->sc_flashreg_offset, (data))
    678 
    679 #define ICH8_FLASH_READ16(sc, reg)					\
    680 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    681 	    (reg) + sc->sc_flashreg_offset)
    682 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    683 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    684 	    (reg) + sc->sc_flashreg_offset, (data))
    685 
    686 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    687 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    688 
    689 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    690 #define	WM_CDTXADDR_HI(txq, x)						\
    691 	(sizeof(bus_addr_t) == 8 ?					\
    692 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    693 
    694 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    695 #define	WM_CDRXADDR_HI(rxq, x)						\
    696 	(sizeof(bus_addr_t) == 8 ?					\
    697 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    698 
    699 /*
    700  * Register read/write functions.
    701  * Other than CSR_{READ|WRITE}().
    702  */
    703 #if 0
    704 static inline uint32_t wm_io_read(struct wm_softc *, int);
    705 #endif
    706 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    707 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    708     uint32_t, uint32_t);
    709 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    710 
    711 /*
    712  * Descriptor sync/init functions.
    713  */
    714 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    715 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    716 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    717 
    718 /*
    719  * Device driver interface functions and commonly used functions.
    720  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    721  */
    722 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    723 static int	wm_match(device_t, cfdata_t, void *);
    724 static void	wm_attach(device_t, device_t, void *);
    725 static int	wm_detach(device_t, int);
    726 static bool	wm_suspend(device_t, const pmf_qual_t *);
    727 static bool	wm_resume(device_t, const pmf_qual_t *);
    728 static void	wm_watchdog(struct ifnet *);
    729 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    730     uint16_t *);
    731 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    732     uint16_t *);
    733 static void	wm_tick(void *);
    734 static int	wm_ifflags_cb(struct ethercom *);
    735 static int	wm_ioctl(struct ifnet *, u_long, void *);
    736 /* MAC address related */
    737 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    738 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    739 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    740 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    741 static int	wm_rar_count(struct wm_softc *);
    742 static void	wm_set_filter(struct wm_softc *);
    743 /* Reset and init related */
    744 static void	wm_set_vlan(struct wm_softc *);
    745 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    746 static void	wm_get_auto_rd_done(struct wm_softc *);
    747 static void	wm_lan_init_done(struct wm_softc *);
    748 static void	wm_get_cfg_done(struct wm_softc *);
    749 static int	wm_phy_post_reset(struct wm_softc *);
    750 static int	wm_write_smbus_addr(struct wm_softc *);
    751 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    752 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    753 static void	wm_initialize_hardware_bits(struct wm_softc *);
    754 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    755 static int	wm_reset_phy(struct wm_softc *);
    756 static void	wm_flush_desc_rings(struct wm_softc *);
    757 static void	wm_reset(struct wm_softc *);
    758 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    759 static void	wm_rxdrain(struct wm_rxqueue *);
    760 static void	wm_init_rss(struct wm_softc *);
    761 static void	wm_adjust_qnum(struct wm_softc *, int);
    762 static inline bool	wm_is_using_msix(struct wm_softc *);
    763 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    764 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    765 static int	wm_setup_legacy(struct wm_softc *);
    766 static int	wm_setup_msix(struct wm_softc *);
    767 static int	wm_init(struct ifnet *);
    768 static int	wm_init_locked(struct ifnet *);
    769 static void	wm_init_sysctls(struct wm_softc *);
    770 static void	wm_unset_stopping_flags(struct wm_softc *);
    771 static void	wm_set_stopping_flags(struct wm_softc *);
    772 static void	wm_stop(struct ifnet *, int);
    773 static void	wm_stop_locked(struct ifnet *, bool, bool);
    774 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    775 static void	wm_82547_txfifo_stall(void *);
    776 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    777 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    778 /* DMA related */
    779 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    780 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    781 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    782 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    783     struct wm_txqueue *);
    784 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    785 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    786 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    787     struct wm_rxqueue *);
    788 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    789 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    790 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    791 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    792 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    793 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    794 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    795     struct wm_txqueue *);
    796 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    797     struct wm_rxqueue *);
    798 static int	wm_alloc_txrx_queues(struct wm_softc *);
    799 static void	wm_free_txrx_queues(struct wm_softc *);
    800 static int	wm_init_txrx_queues(struct wm_softc *);
    801 /* Start */
    802 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    803     struct wm_txsoft *, uint32_t *, uint8_t *);
    804 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    805 static void	wm_start(struct ifnet *);
    806 static void	wm_start_locked(struct ifnet *);
    807 static int	wm_transmit(struct ifnet *, struct mbuf *);
    808 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    809 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    810 		    bool);
    811 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    812     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    813 static void	wm_nq_start(struct ifnet *);
    814 static void	wm_nq_start_locked(struct ifnet *);
    815 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    816 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    817 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    818 		    bool);
    819 static void	wm_deferred_start_locked(struct wm_txqueue *);
    820 static void	wm_handle_queue(void *);
    821 static void	wm_handle_queue_work(struct work *, void *);
    822 /* Interrupt */
    823 static bool	wm_txeof(struct wm_txqueue *, u_int);
    824 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    825 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    826 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    827 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    828 static void	wm_linkintr(struct wm_softc *, uint32_t);
    829 static int	wm_intr_legacy(void *);
    830 static inline void	wm_txrxintr_disable(struct wm_queue *);
    831 static inline void	wm_txrxintr_enable(struct wm_queue *);
    832 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    833 static int	wm_txrxintr_msix(void *);
    834 static int	wm_linkintr_msix(void *);
    835 
    836 /*
    837  * Media related.
    838  * GMII, SGMII, TBI, SERDES and SFP.
    839  */
    840 /* Common */
    841 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    842 /* GMII related */
    843 static void	wm_gmii_reset(struct wm_softc *);
    844 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    845 static int	wm_get_phy_id_82575(struct wm_softc *);
    846 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    847 static int	wm_gmii_mediachange(struct ifnet *);
    848 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    849 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    850 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    851 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    852 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    853 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    854 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    855 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    856 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    857 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    858 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    859 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    860 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    861 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    862 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    863 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    864 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    865 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    866 	bool);
    867 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    868 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    869 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    870 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    871 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    872 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    873 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    874 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    875 static void	wm_gmii_statchg(struct ifnet *);
    876 /*
    877  * kumeran related (80003, ICH* and PCH*).
    878  * These functions are not for accessing MII registers but for accessing
    879  * kumeran specific registers.
    880  */
    881 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    882 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    883 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    884 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    885 /* EMI register related */
    886 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    887 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    888 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    889 /* SGMII */
    890 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    891 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    892 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    893 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    894 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    895 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    896 /* TBI related */
    897 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    898 static void	wm_tbi_mediainit(struct wm_softc *);
    899 static int	wm_tbi_mediachange(struct ifnet *);
    900 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    901 static int	wm_check_for_link(struct wm_softc *);
    902 static void	wm_tbi_tick(struct wm_softc *);
    903 /* SERDES related */
    904 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    905 static int	wm_serdes_mediachange(struct ifnet *);
    906 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    907 static void	wm_serdes_tick(struct wm_softc *);
    908 /* SFP related */
    909 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    910 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    911 
    912 /*
    913  * NVM related.
    914  * Microwire, SPI (w/wo EERD) and Flash.
    915  */
    916 /* Misc functions */
    917 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    918 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    919 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    920 /* Microwire */
    921 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    922 /* SPI */
    923 static int	wm_nvm_ready_spi(struct wm_softc *);
    924 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    925 /* Using with EERD */
    926 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    927 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    928 /* Flash */
    929 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    930     unsigned int *);
    931 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    932 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    933 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    934     uint32_t *);
    935 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    936 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    937 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    938 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    939 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    940 /* iNVM */
    941 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    942 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    943 /* Lock, detecting NVM type, validate checksum and read */
    944 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    945 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    946 static int	wm_nvm_validate_checksum(struct wm_softc *);
    947 static void	wm_nvm_version_invm(struct wm_softc *);
    948 static void	wm_nvm_version(struct wm_softc *);
    949 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    950 
    951 /*
    952  * Hardware semaphores.
    953  * Very complexed...
    954  */
    955 static int	wm_get_null(struct wm_softc *);
    956 static void	wm_put_null(struct wm_softc *);
    957 static int	wm_get_eecd(struct wm_softc *);
    958 static void	wm_put_eecd(struct wm_softc *);
    959 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    960 static void	wm_put_swsm_semaphore(struct wm_softc *);
    961 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    962 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    963 static int	wm_get_nvm_80003(struct wm_softc *);
    964 static void	wm_put_nvm_80003(struct wm_softc *);
    965 static int	wm_get_nvm_82571(struct wm_softc *);
    966 static void	wm_put_nvm_82571(struct wm_softc *);
    967 static int	wm_get_phy_82575(struct wm_softc *);
    968 static void	wm_put_phy_82575(struct wm_softc *);
    969 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    970 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    971 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    972 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    973 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    974 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    975 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    976 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    977 
    978 /*
    979  * Management mode and power management related subroutines.
    980  * BMC, AMT, suspend/resume and EEE.
    981  */
    982 #if 0
    983 static int	wm_check_mng_mode(struct wm_softc *);
    984 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    985 static int	wm_check_mng_mode_82574(struct wm_softc *);
    986 static int	wm_check_mng_mode_generic(struct wm_softc *);
    987 #endif
    988 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    989 static bool	wm_phy_resetisblocked(struct wm_softc *);
    990 static void	wm_get_hw_control(struct wm_softc *);
    991 static void	wm_release_hw_control(struct wm_softc *);
    992 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    993 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    994 static void	wm_init_manageability(struct wm_softc *);
    995 static void	wm_release_manageability(struct wm_softc *);
    996 static void	wm_get_wakeup(struct wm_softc *);
    997 static int	wm_ulp_disable(struct wm_softc *);
    998 static int	wm_enable_phy_wakeup(struct wm_softc *);
    999 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1000 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1001 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1002 static void	wm_enable_wakeup(struct wm_softc *);
   1003 static void	wm_disable_aspm(struct wm_softc *);
   1004 /* LPLU (Low Power Link Up) */
   1005 static void	wm_lplu_d0_disable(struct wm_softc *);
   1006 /* EEE */
   1007 static int	wm_set_eee_i350(struct wm_softc *);
   1008 static int	wm_set_eee_pchlan(struct wm_softc *);
   1009 static int	wm_set_eee(struct wm_softc *);
   1010 
   1011 /*
   1012  * Workarounds (mainly PHY related).
   1013  * Basically, PHY's workarounds are in the PHY drivers.
   1014  */
   1015 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1016 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1017 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1018 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1019 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1020 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1021 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1022 static int	wm_k1_workaround_lv(struct wm_softc *);
   1023 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1024 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1025 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1026 static void	wm_reset_init_script_82575(struct wm_softc *);
   1027 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1028 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1029 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1030 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1031 static int	wm_pll_workaround_i210(struct wm_softc *);
   1032 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1033 
   1034 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1035     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1036 
   1037 /*
   1038  * Devices supported by this driver.
   1039  */
   1040 static const struct wm_product {
   1041 	pci_vendor_id_t		wmp_vendor;
   1042 	pci_product_id_t	wmp_product;
   1043 	const char		*wmp_name;
   1044 	wm_chip_type		wmp_type;
   1045 	uint32_t		wmp_flags;
   1046 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1047 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1048 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1049 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1050 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1051 } wm_products[] = {
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1053 	  "Intel i82542 1000BASE-X Ethernet",
   1054 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1057 	  "Intel i82543GC 1000BASE-X Ethernet",
   1058 	  WM_T_82543,		WMP_F_FIBER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1061 	  "Intel i82543GC 1000BASE-T Ethernet",
   1062 	  WM_T_82543,		WMP_F_COPPER },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1065 	  "Intel i82544EI 1000BASE-T Ethernet",
   1066 	  WM_T_82544,		WMP_F_COPPER },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1069 	  "Intel i82544EI 1000BASE-X Ethernet",
   1070 	  WM_T_82544,		WMP_F_FIBER },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1073 	  "Intel i82544GC 1000BASE-T Ethernet",
   1074 	  WM_T_82544,		WMP_F_COPPER },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1077 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1078 	  WM_T_82544,		WMP_F_COPPER },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1081 	  "Intel i82540EM 1000BASE-T Ethernet",
   1082 	  WM_T_82540,		WMP_F_COPPER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1085 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1086 	  WM_T_82540,		WMP_F_COPPER },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1089 	  "Intel i82540EP 1000BASE-T Ethernet",
   1090 	  WM_T_82540,		WMP_F_COPPER },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1093 	  "Intel i82540EP 1000BASE-T Ethernet",
   1094 	  WM_T_82540,		WMP_F_COPPER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1097 	  "Intel i82540EP 1000BASE-T Ethernet",
   1098 	  WM_T_82540,		WMP_F_COPPER },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1101 	  "Intel i82545EM 1000BASE-T Ethernet",
   1102 	  WM_T_82545,		WMP_F_COPPER },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1105 	  "Intel i82545GM 1000BASE-T Ethernet",
   1106 	  WM_T_82545_3,		WMP_F_COPPER },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1109 	  "Intel i82545GM 1000BASE-X Ethernet",
   1110 	  WM_T_82545_3,		WMP_F_FIBER },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1113 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1114 	  WM_T_82545_3,		WMP_F_SERDES },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1117 	  "Intel i82546EB 1000BASE-T Ethernet",
   1118 	  WM_T_82546,		WMP_F_COPPER },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1121 	  "Intel i82546EB 1000BASE-T Ethernet",
   1122 	  WM_T_82546,		WMP_F_COPPER },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1125 	  "Intel i82545EM 1000BASE-X Ethernet",
   1126 	  WM_T_82545,		WMP_F_FIBER },
   1127 
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1129 	  "Intel i82546EB 1000BASE-X Ethernet",
   1130 	  WM_T_82546,		WMP_F_FIBER },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1133 	  "Intel i82546GB 1000BASE-T Ethernet",
   1134 	  WM_T_82546_3,		WMP_F_COPPER },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1137 	  "Intel i82546GB 1000BASE-X Ethernet",
   1138 	  WM_T_82546_3,		WMP_F_FIBER },
   1139 
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1141 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1142 	  WM_T_82546_3,		WMP_F_SERDES },
   1143 
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1145 	  "i82546GB quad-port Gigabit Ethernet",
   1146 	  WM_T_82546_3,		WMP_F_COPPER },
   1147 
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1149 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1150 	  WM_T_82546_3,		WMP_F_COPPER },
   1151 
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1153 	  "Intel PRO/1000MT (82546GB)",
   1154 	  WM_T_82546_3,		WMP_F_COPPER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1157 	  "Intel i82541EI 1000BASE-T Ethernet",
   1158 	  WM_T_82541,		WMP_F_COPPER },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1161 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1162 	  WM_T_82541,		WMP_F_COPPER },
   1163 
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1165 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1166 	  WM_T_82541,		WMP_F_COPPER },
   1167 
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1169 	  "Intel i82541ER 1000BASE-T Ethernet",
   1170 	  WM_T_82541_2,		WMP_F_COPPER },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1173 	  "Intel i82541GI 1000BASE-T Ethernet",
   1174 	  WM_T_82541_2,		WMP_F_COPPER },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1177 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1178 	  WM_T_82541_2,		WMP_F_COPPER },
   1179 
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1181 	  "Intel i82541PI 1000BASE-T Ethernet",
   1182 	  WM_T_82541_2,		WMP_F_COPPER },
   1183 
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1185 	  "Intel i82547EI 1000BASE-T Ethernet",
   1186 	  WM_T_82547,		WMP_F_COPPER },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1189 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1190 	  WM_T_82547,		WMP_F_COPPER },
   1191 
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1193 	  "Intel i82547GI 1000BASE-T Ethernet",
   1194 	  WM_T_82547_2,		WMP_F_COPPER },
   1195 
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1197 	  "Intel PRO/1000 PT (82571EB)",
   1198 	  WM_T_82571,		WMP_F_COPPER },
   1199 
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1201 	  "Intel PRO/1000 PF (82571EB)",
   1202 	  WM_T_82571,		WMP_F_FIBER },
   1203 
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1205 	  "Intel PRO/1000 PB (82571EB)",
   1206 	  WM_T_82571,		WMP_F_SERDES },
   1207 
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1209 	  "Intel PRO/1000 QT (82571EB)",
   1210 	  WM_T_82571,		WMP_F_COPPER },
   1211 
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1213 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1214 	  WM_T_82571,		WMP_F_COPPER },
   1215 
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1217 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1218 	  WM_T_82571,		WMP_F_COPPER },
   1219 
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1221 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1222 	  WM_T_82571,		WMP_F_SERDES },
   1223 
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1225 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1226 	  WM_T_82571,		WMP_F_SERDES },
   1227 
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1229 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1230 	  WM_T_82571,		WMP_F_FIBER },
   1231 
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1233 	  "Intel i82572EI 1000baseT Ethernet",
   1234 	  WM_T_82572,		WMP_F_COPPER },
   1235 
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1237 	  "Intel i82572EI 1000baseX Ethernet",
   1238 	  WM_T_82572,		WMP_F_FIBER },
   1239 
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1241 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1242 	  WM_T_82572,		WMP_F_SERDES },
   1243 
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1245 	  "Intel i82572EI 1000baseT Ethernet",
   1246 	  WM_T_82572,		WMP_F_COPPER },
   1247 
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1249 	  "Intel i82573E",
   1250 	  WM_T_82573,		WMP_F_COPPER },
   1251 
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1253 	  "Intel i82573E IAMT",
   1254 	  WM_T_82573,		WMP_F_COPPER },
   1255 
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1257 	  "Intel i82573L Gigabit Ethernet",
   1258 	  WM_T_82573,		WMP_F_COPPER },
   1259 
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1261 	  "Intel i82574L",
   1262 	  WM_T_82574,		WMP_F_COPPER },
   1263 
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1265 	  "Intel i82574L",
   1266 	  WM_T_82574,		WMP_F_COPPER },
   1267 
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1269 	  "Intel i82583V",
   1270 	  WM_T_82583,		WMP_F_COPPER },
   1271 
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1273 	  "i80003 dual 1000baseT Ethernet",
   1274 	  WM_T_80003,		WMP_F_COPPER },
   1275 
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1277 	  "i80003 dual 1000baseX Ethernet",
   1278 	  WM_T_80003,		WMP_F_COPPER },
   1279 
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1281 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1282 	  WM_T_80003,		WMP_F_SERDES },
   1283 
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1285 	  "Intel i80003 1000baseT Ethernet",
   1286 	  WM_T_80003,		WMP_F_COPPER },
   1287 
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1289 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1290 	  WM_T_80003,		WMP_F_SERDES },
   1291 
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1293 	  "Intel i82801H (M_AMT) LAN Controller",
   1294 	  WM_T_ICH8,		WMP_F_COPPER },
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1296 	  "Intel i82801H (AMT) LAN Controller",
   1297 	  WM_T_ICH8,		WMP_F_COPPER },
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1299 	  "Intel i82801H LAN Controller",
   1300 	  WM_T_ICH8,		WMP_F_COPPER },
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1302 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1303 	  WM_T_ICH8,		WMP_F_COPPER },
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1305 	  "Intel i82801H (M) LAN Controller",
   1306 	  WM_T_ICH8,		WMP_F_COPPER },
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1308 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1309 	  WM_T_ICH8,		WMP_F_COPPER },
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1311 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1312 	  WM_T_ICH8,		WMP_F_COPPER },
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1314 	  "82567V-3 LAN Controller",
   1315 	  WM_T_ICH8,		WMP_F_COPPER },
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1317 	  "82801I (AMT) LAN Controller",
   1318 	  WM_T_ICH9,		WMP_F_COPPER },
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1320 	  "82801I 10/100 LAN Controller",
   1321 	  WM_T_ICH9,		WMP_F_COPPER },
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1323 	  "82801I (G) 10/100 LAN Controller",
   1324 	  WM_T_ICH9,		WMP_F_COPPER },
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1326 	  "82801I (GT) 10/100 LAN Controller",
   1327 	  WM_T_ICH9,		WMP_F_COPPER },
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1329 	  "82801I (C) LAN Controller",
   1330 	  WM_T_ICH9,		WMP_F_COPPER },
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1332 	  "82801I mobile LAN Controller",
   1333 	  WM_T_ICH9,		WMP_F_COPPER },
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1335 	  "82801I mobile (V) LAN Controller",
   1336 	  WM_T_ICH9,		WMP_F_COPPER },
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1338 	  "82801I mobile (AMT) LAN Controller",
   1339 	  WM_T_ICH9,		WMP_F_COPPER },
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1341 	  "82567LM-4 LAN Controller",
   1342 	  WM_T_ICH9,		WMP_F_COPPER },
   1343 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1344 	  "82567LM-2 LAN Controller",
   1345 	  WM_T_ICH10,		WMP_F_COPPER },
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1347 	  "82567LF-2 LAN Controller",
   1348 	  WM_T_ICH10,		WMP_F_COPPER },
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1350 	  "82567LM-3 LAN Controller",
   1351 	  WM_T_ICH10,		WMP_F_COPPER },
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1353 	  "82567LF-3 LAN Controller",
   1354 	  WM_T_ICH10,		WMP_F_COPPER },
   1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1356 	  "82567V-2 LAN Controller",
   1357 	  WM_T_ICH10,		WMP_F_COPPER },
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1359 	  "82567V-3? LAN Controller",
   1360 	  WM_T_ICH10,		WMP_F_COPPER },
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1362 	  "HANKSVILLE LAN Controller",
   1363 	  WM_T_ICH10,		WMP_F_COPPER },
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1365 	  "PCH LAN (82577LM) Controller",
   1366 	  WM_T_PCH,		WMP_F_COPPER },
   1367 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1368 	  "PCH LAN (82577LC) Controller",
   1369 	  WM_T_PCH,		WMP_F_COPPER },
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1371 	  "PCH LAN (82578DM) Controller",
   1372 	  WM_T_PCH,		WMP_F_COPPER },
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1374 	  "PCH LAN (82578DC) Controller",
   1375 	  WM_T_PCH,		WMP_F_COPPER },
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1377 	  "PCH2 LAN (82579LM) Controller",
   1378 	  WM_T_PCH2,		WMP_F_COPPER },
   1379 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1380 	  "PCH2 LAN (82579V) Controller",
   1381 	  WM_T_PCH2,		WMP_F_COPPER },
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1383 	  "82575EB dual-1000baseT Ethernet",
   1384 	  WM_T_82575,		WMP_F_COPPER },
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1386 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1387 	  WM_T_82575,		WMP_F_SERDES },
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1389 	  "82575GB quad-1000baseT Ethernet",
   1390 	  WM_T_82575,		WMP_F_COPPER },
   1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1392 	  "82575GB quad-1000baseT Ethernet (PM)",
   1393 	  WM_T_82575,		WMP_F_COPPER },
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1395 	  "82576 1000BaseT Ethernet",
   1396 	  WM_T_82576,		WMP_F_COPPER },
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1398 	  "82576 1000BaseX Ethernet",
   1399 	  WM_T_82576,		WMP_F_FIBER },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1402 	  "82576 gigabit Ethernet (SERDES)",
   1403 	  WM_T_82576,		WMP_F_SERDES },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1406 	  "82576 quad-1000BaseT Ethernet",
   1407 	  WM_T_82576,		WMP_F_COPPER },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1410 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1411 	  WM_T_82576,		WMP_F_COPPER },
   1412 
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1414 	  "82576 gigabit Ethernet",
   1415 	  WM_T_82576,		WMP_F_COPPER },
   1416 
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1418 	  "82576 gigabit Ethernet (SERDES)",
   1419 	  WM_T_82576,		WMP_F_SERDES },
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1421 	  "82576 quad-gigabit Ethernet (SERDES)",
   1422 	  WM_T_82576,		WMP_F_SERDES },
   1423 
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1425 	  "82580 1000BaseT Ethernet",
   1426 	  WM_T_82580,		WMP_F_COPPER },
   1427 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1428 	  "82580 1000BaseX Ethernet",
   1429 	  WM_T_82580,		WMP_F_FIBER },
   1430 
   1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1432 	  "82580 1000BaseT Ethernet (SERDES)",
   1433 	  WM_T_82580,		WMP_F_SERDES },
   1434 
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1436 	  "82580 gigabit Ethernet (SGMII)",
   1437 	  WM_T_82580,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1439 	  "82580 dual-1000BaseT Ethernet",
   1440 	  WM_T_82580,		WMP_F_COPPER },
   1441 
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1443 	  "82580 quad-1000BaseX Ethernet",
   1444 	  WM_T_82580,		WMP_F_FIBER },
   1445 
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1447 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1448 	  WM_T_82580,		WMP_F_COPPER },
   1449 
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1451 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1452 	  WM_T_82580,		WMP_F_SERDES },
   1453 
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1455 	  "DH89XXCC 1000BASE-KX Ethernet",
   1456 	  WM_T_82580,		WMP_F_SERDES },
   1457 
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1459 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1460 	  WM_T_82580,		WMP_F_SERDES },
   1461 
   1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1463 	  "I350 Gigabit Network Connection",
   1464 	  WM_T_I350,		WMP_F_COPPER },
   1465 
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1467 	  "I350 Gigabit Fiber Network Connection",
   1468 	  WM_T_I350,		WMP_F_FIBER },
   1469 
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1471 	  "I350 Gigabit Backplane Connection",
   1472 	  WM_T_I350,		WMP_F_SERDES },
   1473 
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1475 	  "I350 Quad Port Gigabit Ethernet",
   1476 	  WM_T_I350,		WMP_F_SERDES },
   1477 
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1479 	  "I350 Gigabit Connection",
   1480 	  WM_T_I350,		WMP_F_COPPER },
   1481 
   1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1483 	  "I354 Gigabit Ethernet (KX)",
   1484 	  WM_T_I354,		WMP_F_SERDES },
   1485 
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1487 	  "I354 Gigabit Ethernet (SGMII)",
   1488 	  WM_T_I354,		WMP_F_COPPER },
   1489 
   1490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1491 	  "I354 Gigabit Ethernet (2.5G)",
   1492 	  WM_T_I354,		WMP_F_COPPER },
   1493 
   1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1495 	  "I210-T1 Ethernet Server Adapter",
   1496 	  WM_T_I210,		WMP_F_COPPER },
   1497 
   1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1499 	  "I210 Ethernet (Copper OEM)",
   1500 	  WM_T_I210,		WMP_F_COPPER },
   1501 
   1502 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1503 	  "I210 Ethernet (Copper IT)",
   1504 	  WM_T_I210,		WMP_F_COPPER },
   1505 
   1506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1507 	  "I210 Ethernet (Copper, FLASH less)",
   1508 	  WM_T_I210,		WMP_F_COPPER },
   1509 
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1511 	  "I210 Gigabit Ethernet (Fiber)",
   1512 	  WM_T_I210,		WMP_F_FIBER },
   1513 
   1514 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1515 	  "I210 Gigabit Ethernet (SERDES)",
   1516 	  WM_T_I210,		WMP_F_SERDES },
   1517 
   1518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1519 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1520 	  WM_T_I210,		WMP_F_SERDES },
   1521 
   1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1523 	  "I210 Gigabit Ethernet (SGMII)",
   1524 	  WM_T_I210,		WMP_F_COPPER },
   1525 
   1526 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1527 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1528 	  WM_T_I210,		WMP_F_COPPER },
   1529 
   1530 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1531 	  "I211 Ethernet (COPPER)",
   1532 	  WM_T_I211,		WMP_F_COPPER },
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1534 	  "I217 V Ethernet Connection",
   1535 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1537 	  "I217 LM Ethernet Connection",
   1538 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1540 	  "I218 V Ethernet Connection",
   1541 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1543 	  "I218 V Ethernet Connection",
   1544 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1545 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1546 	  "I218 V Ethernet Connection",
   1547 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1548 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1549 	  "I218 LM Ethernet Connection",
   1550 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1552 	  "I218 LM Ethernet Connection",
   1553 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1555 	  "I218 LM Ethernet Connection",
   1556 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1557 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1558 	  "I219 LM Ethernet Connection",
   1559 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1560 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1561 	  "I219 LM Ethernet Connection",
   1562 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1563 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1564 	  "I219 LM Ethernet Connection",
   1565 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1566 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1567 	  "I219 LM Ethernet Connection",
   1568 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1569 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1570 	  "I219 LM Ethernet Connection",
   1571 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1573 	  "I219 LM Ethernet Connection",
   1574 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1575 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1576 	  "I219 LM Ethernet Connection",
   1577 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1578 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1579 	  "I219 LM Ethernet Connection",
   1580 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1581 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1582 	  "I219 LM Ethernet Connection",
   1583 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1584 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1585 	  "I219 LM Ethernet Connection",
   1586 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1587 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1588 	  "I219 LM Ethernet Connection",
   1589 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1590 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1591 	  "I219 LM Ethernet Connection",
   1592 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1593 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1594 	  "I219 LM Ethernet Connection",
   1595 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1596 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1597 	  "I219 LM Ethernet Connection",
   1598 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1599 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1600 	  "I219 LM Ethernet Connection",
   1601 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1602 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1603 	  "I219 V Ethernet Connection",
   1604 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1605 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1606 	  "I219 V Ethernet Connection",
   1607 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1608 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1609 	  "I219 V Ethernet Connection",
   1610 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1611 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1612 	  "I219 V Ethernet Connection",
   1613 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1614 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1615 	  "I219 V Ethernet Connection",
   1616 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1617 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1618 	  "I219 V Ethernet Connection",
   1619 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1620 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1621 	  "I219 V Ethernet Connection",
   1622 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1623 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1624 	  "I219 V Ethernet Connection",
   1625 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1626 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1627 	  "I219 V Ethernet Connection",
   1628 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1629 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1630 	  "I219 V Ethernet Connection",
   1631 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1632 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1633 	  "I219 V Ethernet Connection",
   1634 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1635 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1636 	  "I219 V Ethernet Connection",
   1637 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1638 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1639 	  "I219 V Ethernet Connection",
   1640 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1641 	{ 0,			0,
   1642 	  NULL,
   1643 	  0,			0 },
   1644 };
   1645 
   1646 /*
   1647  * Register read/write functions.
   1648  * Other than CSR_{READ|WRITE}().
   1649  */
   1650 
   1651 #if 0 /* Not currently used */
   1652 static inline uint32_t
   1653 wm_io_read(struct wm_softc *sc, int reg)
   1654 {
   1655 
   1656 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1657 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1658 }
   1659 #endif
   1660 
   1661 static inline void
   1662 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1663 {
   1664 
   1665 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1666 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1667 }
   1668 
   1669 static inline void
   1670 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1671     uint32_t data)
   1672 {
   1673 	uint32_t regval;
   1674 	int i;
   1675 
   1676 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1677 
   1678 	CSR_WRITE(sc, reg, regval);
   1679 
   1680 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1681 		delay(5);
   1682 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1683 			break;
   1684 	}
   1685 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1686 		aprint_error("%s: WARNING:"
   1687 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1688 		    device_xname(sc->sc_dev), reg);
   1689 	}
   1690 }
   1691 
   1692 static inline void
   1693 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1694 {
   1695 	wa->wa_low = htole32(v & 0xffffffffU);
   1696 	if (sizeof(bus_addr_t) == 8)
   1697 		wa->wa_high = htole32((uint64_t) v >> 32);
   1698 	else
   1699 		wa->wa_high = 0;
   1700 }
   1701 
   1702 /*
   1703  * Descriptor sync/init functions.
   1704  */
   1705 static inline void
   1706 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1707 {
   1708 	struct wm_softc *sc = txq->txq_sc;
   1709 
   1710 	/* If it will wrap around, sync to the end of the ring. */
   1711 	if ((start + num) > WM_NTXDESC(txq)) {
   1712 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1713 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1714 		    (WM_NTXDESC(txq) - start), ops);
   1715 		num -= (WM_NTXDESC(txq) - start);
   1716 		start = 0;
   1717 	}
   1718 
   1719 	/* Now sync whatever is left. */
   1720 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1721 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1722 }
   1723 
   1724 static inline void
   1725 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1726 {
   1727 	struct wm_softc *sc = rxq->rxq_sc;
   1728 
   1729 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1730 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1731 }
   1732 
   1733 static inline void
   1734 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1735 {
   1736 	struct wm_softc *sc = rxq->rxq_sc;
   1737 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1738 	struct mbuf *m = rxs->rxs_mbuf;
   1739 
   1740 	/*
   1741 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1742 	 * so that the payload after the Ethernet header is aligned
   1743 	 * to a 4-byte boundary.
   1744 
   1745 	 * XXX BRAINDAMAGE ALERT!
   1746 	 * The stupid chip uses the same size for every buffer, which
   1747 	 * is set in the Receive Control register.  We are using the 2K
   1748 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1749 	 * reason, we can't "scoot" packets longer than the standard
   1750 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1751 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1752 	 * the upper layer copy the headers.
   1753 	 */
   1754 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1755 
   1756 	if (sc->sc_type == WM_T_82574) {
   1757 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1758 		rxd->erx_data.erxd_addr =
   1759 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1760 		rxd->erx_data.erxd_dd = 0;
   1761 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1762 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1763 
   1764 		rxd->nqrx_data.nrxd_paddr =
   1765 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1766 		/* Currently, split header is not supported. */
   1767 		rxd->nqrx_data.nrxd_haddr = 0;
   1768 	} else {
   1769 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1770 
   1771 		wm_set_dma_addr(&rxd->wrx_addr,
   1772 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1773 		rxd->wrx_len = 0;
   1774 		rxd->wrx_cksum = 0;
   1775 		rxd->wrx_status = 0;
   1776 		rxd->wrx_errors = 0;
   1777 		rxd->wrx_special = 0;
   1778 	}
   1779 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1780 
   1781 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1782 }
   1783 
   1784 /*
   1785  * Device driver interface functions and commonly used functions.
   1786  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1787  */
   1788 
   1789 /* Lookup supported device table */
   1790 static const struct wm_product *
   1791 wm_lookup(const struct pci_attach_args *pa)
   1792 {
   1793 	const struct wm_product *wmp;
   1794 
   1795 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1796 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1797 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1798 			return wmp;
   1799 	}
   1800 	return NULL;
   1801 }
   1802 
   1803 /* The match function (ca_match) */
   1804 static int
   1805 wm_match(device_t parent, cfdata_t cf, void *aux)
   1806 {
   1807 	struct pci_attach_args *pa = aux;
   1808 
   1809 	if (wm_lookup(pa) != NULL)
   1810 		return 1;
   1811 
   1812 	return 0;
   1813 }
   1814 
   1815 /* The attach function (ca_attach) */
   1816 static void
   1817 wm_attach(device_t parent, device_t self, void *aux)
   1818 {
   1819 	struct wm_softc *sc = device_private(self);
   1820 	struct pci_attach_args *pa = aux;
   1821 	prop_dictionary_t dict;
   1822 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1823 	pci_chipset_tag_t pc = pa->pa_pc;
   1824 	int counts[PCI_INTR_TYPE_SIZE];
   1825 	pci_intr_type_t max_type;
   1826 	const char *eetype, *xname;
   1827 	bus_space_tag_t memt;
   1828 	bus_space_handle_t memh;
   1829 	bus_size_t memsize;
   1830 	int memh_valid;
   1831 	int i, error;
   1832 	const struct wm_product *wmp;
   1833 	prop_data_t ea;
   1834 	prop_number_t pn;
   1835 	uint8_t enaddr[ETHER_ADDR_LEN];
   1836 	char buf[256];
   1837 	char wqname[MAXCOMLEN];
   1838 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1839 	pcireg_t preg, memtype;
   1840 	uint16_t eeprom_data, apme_mask;
   1841 	bool force_clear_smbi;
   1842 	uint32_t link_mode;
   1843 	uint32_t reg;
   1844 
   1845 	sc->sc_dev = self;
   1846 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
   1847 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1848 	sc->sc_core_stopping = false;
   1849 
   1850 	wmp = wm_lookup(pa);
   1851 #ifdef DIAGNOSTIC
   1852 	if (wmp == NULL) {
   1853 		printf("\n");
   1854 		panic("wm_attach: impossible");
   1855 	}
   1856 #endif
   1857 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1858 
   1859 	sc->sc_pc = pa->pa_pc;
   1860 	sc->sc_pcitag = pa->pa_tag;
   1861 
   1862 	if (pci_dma64_available(pa))
   1863 		sc->sc_dmat = pa->pa_dmat64;
   1864 	else
   1865 		sc->sc_dmat = pa->pa_dmat;
   1866 
   1867 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1868 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1869 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1870 
   1871 	sc->sc_type = wmp->wmp_type;
   1872 
   1873 	/* Set default function pointers */
   1874 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1875 	sc->phy.release = sc->nvm.release = wm_put_null;
   1876 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1877 
   1878 	if (sc->sc_type < WM_T_82543) {
   1879 		if (sc->sc_rev < 2) {
   1880 			aprint_error_dev(sc->sc_dev,
   1881 			    "i82542 must be at least rev. 2\n");
   1882 			return;
   1883 		}
   1884 		if (sc->sc_rev < 3)
   1885 			sc->sc_type = WM_T_82542_2_0;
   1886 	}
   1887 
   1888 	/*
   1889 	 * Disable MSI for Errata:
   1890 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1891 	 *
   1892 	 *  82544: Errata 25
   1893 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1894 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1895 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1896 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1897 	 *
   1898 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1899 	 *
   1900 	 *  82571 & 82572: Errata 63
   1901 	 */
   1902 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1903 	    || (sc->sc_type == WM_T_82572))
   1904 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1905 
   1906 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1907 	    || (sc->sc_type == WM_T_82580)
   1908 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1909 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1910 		sc->sc_flags |= WM_F_NEWQUEUE;
   1911 
   1912 	/* Set device properties (mactype) */
   1913 	dict = device_properties(sc->sc_dev);
   1914 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1915 
   1916 	/*
   1917 	 * Map the device.  All devices support memory-mapped acccess,
   1918 	 * and it is really required for normal operation.
   1919 	 */
   1920 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1921 	switch (memtype) {
   1922 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1923 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1924 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1925 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1926 		break;
   1927 	default:
   1928 		memh_valid = 0;
   1929 		break;
   1930 	}
   1931 
   1932 	if (memh_valid) {
   1933 		sc->sc_st = memt;
   1934 		sc->sc_sh = memh;
   1935 		sc->sc_ss = memsize;
   1936 	} else {
   1937 		aprint_error_dev(sc->sc_dev,
   1938 		    "unable to map device registers\n");
   1939 		return;
   1940 	}
   1941 
   1942 	/*
   1943 	 * In addition, i82544 and later support I/O mapped indirect
   1944 	 * register access.  It is not desirable (nor supported in
   1945 	 * this driver) to use it for normal operation, though it is
   1946 	 * required to work around bugs in some chip versions.
   1947 	 */
   1948 	if (sc->sc_type >= WM_T_82544) {
   1949 		/* First we have to find the I/O BAR. */
   1950 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1951 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1952 			if (memtype == PCI_MAPREG_TYPE_IO)
   1953 				break;
   1954 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1955 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1956 				i += 4;	/* skip high bits, too */
   1957 		}
   1958 		if (i < PCI_MAPREG_END) {
   1959 			/*
   1960 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1961 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1962 			 * It's no problem because newer chips has no this
   1963 			 * bug.
   1964 			 *
   1965 			 * The i8254x doesn't apparently respond when the
   1966 			 * I/O BAR is 0, which looks somewhat like it's not
   1967 			 * been configured.
   1968 			 */
   1969 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1970 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1971 				aprint_error_dev(sc->sc_dev,
   1972 				    "WARNING: I/O BAR at zero.\n");
   1973 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1974 					0, &sc->sc_iot, &sc->sc_ioh,
   1975 					NULL, &sc->sc_ios) == 0) {
   1976 				sc->sc_flags |= WM_F_IOH_VALID;
   1977 			} else
   1978 				aprint_error_dev(sc->sc_dev,
   1979 				    "WARNING: unable to map I/O space\n");
   1980 		}
   1981 
   1982 	}
   1983 
   1984 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1985 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1986 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1987 	if (sc->sc_type < WM_T_82542_2_1)
   1988 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1989 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1990 
   1991 	/* Power up chip */
   1992 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1993 	    && error != EOPNOTSUPP) {
   1994 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1995 		return;
   1996 	}
   1997 
   1998 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1999 	/*
   2000 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2001 	 * resource.
   2002 	 */
   2003 	if (sc->sc_nqueues > 1) {
   2004 		max_type = PCI_INTR_TYPE_MSIX;
   2005 		/*
   2006 		 *  82583 has a MSI-X capability in the PCI configuration space
   2007 		 * but it doesn't support it. At least the document doesn't
   2008 		 * say anything about MSI-X.
   2009 		 */
   2010 		counts[PCI_INTR_TYPE_MSIX]
   2011 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2012 	} else {
   2013 		max_type = PCI_INTR_TYPE_MSI;
   2014 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2015 	}
   2016 
   2017 	/* Allocation settings */
   2018 	counts[PCI_INTR_TYPE_MSI] = 1;
   2019 	counts[PCI_INTR_TYPE_INTX] = 1;
   2020 	/* overridden by disable flags */
   2021 	if (wm_disable_msi != 0) {
   2022 		counts[PCI_INTR_TYPE_MSI] = 0;
   2023 		if (wm_disable_msix != 0) {
   2024 			max_type = PCI_INTR_TYPE_INTX;
   2025 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2026 		}
   2027 	} else if (wm_disable_msix != 0) {
   2028 		max_type = PCI_INTR_TYPE_MSI;
   2029 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2030 	}
   2031 
   2032 alloc_retry:
   2033 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2034 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2035 		return;
   2036 	}
   2037 
   2038 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2039 		error = wm_setup_msix(sc);
   2040 		if (error) {
   2041 			pci_intr_release(pc, sc->sc_intrs,
   2042 			    counts[PCI_INTR_TYPE_MSIX]);
   2043 
   2044 			/* Setup for MSI: Disable MSI-X */
   2045 			max_type = PCI_INTR_TYPE_MSI;
   2046 			counts[PCI_INTR_TYPE_MSI] = 1;
   2047 			counts[PCI_INTR_TYPE_INTX] = 1;
   2048 			goto alloc_retry;
   2049 		}
   2050 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2051 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2052 		error = wm_setup_legacy(sc);
   2053 		if (error) {
   2054 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2055 			    counts[PCI_INTR_TYPE_MSI]);
   2056 
   2057 			/* The next try is for INTx: Disable MSI */
   2058 			max_type = PCI_INTR_TYPE_INTX;
   2059 			counts[PCI_INTR_TYPE_INTX] = 1;
   2060 			goto alloc_retry;
   2061 		}
   2062 	} else {
   2063 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2064 		error = wm_setup_legacy(sc);
   2065 		if (error) {
   2066 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2067 			    counts[PCI_INTR_TYPE_INTX]);
   2068 			return;
   2069 		}
   2070 	}
   2071 
   2072 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2073 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2074 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2075 	    WM_WORKQUEUE_FLAGS);
   2076 	if (error) {
   2077 		aprint_error_dev(sc->sc_dev,
   2078 		    "unable to create workqueue\n");
   2079 		goto out;
   2080 	}
   2081 
   2082 	/*
   2083 	 * Check the function ID (unit number of the chip).
   2084 	 */
   2085 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2086 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2087 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2088 	    || (sc->sc_type == WM_T_82580)
   2089 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2090 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2091 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2092 	else
   2093 		sc->sc_funcid = 0;
   2094 
   2095 	/*
   2096 	 * Determine a few things about the bus we're connected to.
   2097 	 */
   2098 	if (sc->sc_type < WM_T_82543) {
   2099 		/* We don't really know the bus characteristics here. */
   2100 		sc->sc_bus_speed = 33;
   2101 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2102 		/*
   2103 		 * CSA (Communication Streaming Architecture) is about as fast
   2104 		 * a 32-bit 66MHz PCI Bus.
   2105 		 */
   2106 		sc->sc_flags |= WM_F_CSA;
   2107 		sc->sc_bus_speed = 66;
   2108 		aprint_verbose_dev(sc->sc_dev,
   2109 		    "Communication Streaming Architecture\n");
   2110 		if (sc->sc_type == WM_T_82547) {
   2111 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
   2112 			callout_setfunc(&sc->sc_txfifo_ch,
   2113 			    wm_82547_txfifo_stall, sc);
   2114 			aprint_verbose_dev(sc->sc_dev,
   2115 			    "using 82547 Tx FIFO stall work-around\n");
   2116 		}
   2117 	} else if (sc->sc_type >= WM_T_82571) {
   2118 		sc->sc_flags |= WM_F_PCIE;
   2119 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2120 		    && (sc->sc_type != WM_T_ICH10)
   2121 		    && (sc->sc_type != WM_T_PCH)
   2122 		    && (sc->sc_type != WM_T_PCH2)
   2123 		    && (sc->sc_type != WM_T_PCH_LPT)
   2124 		    && (sc->sc_type != WM_T_PCH_SPT)
   2125 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2126 			/* ICH* and PCH* have no PCIe capability registers */
   2127 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2128 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2129 				NULL) == 0)
   2130 				aprint_error_dev(sc->sc_dev,
   2131 				    "unable to find PCIe capability\n");
   2132 		}
   2133 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2134 	} else {
   2135 		reg = CSR_READ(sc, WMREG_STATUS);
   2136 		if (reg & STATUS_BUS64)
   2137 			sc->sc_flags |= WM_F_BUS64;
   2138 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2139 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2140 
   2141 			sc->sc_flags |= WM_F_PCIX;
   2142 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2143 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2144 				aprint_error_dev(sc->sc_dev,
   2145 				    "unable to find PCIX capability\n");
   2146 			else if (sc->sc_type != WM_T_82545_3 &&
   2147 				 sc->sc_type != WM_T_82546_3) {
   2148 				/*
   2149 				 * Work around a problem caused by the BIOS
   2150 				 * setting the max memory read byte count
   2151 				 * incorrectly.
   2152 				 */
   2153 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2154 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2155 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2156 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2157 
   2158 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2159 				    PCIX_CMD_BYTECNT_SHIFT;
   2160 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2161 				    PCIX_STATUS_MAXB_SHIFT;
   2162 				if (bytecnt > maxb) {
   2163 					aprint_verbose_dev(sc->sc_dev,
   2164 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2165 					    512 << bytecnt, 512 << maxb);
   2166 					pcix_cmd = (pcix_cmd &
   2167 					    ~PCIX_CMD_BYTECNT_MASK) |
   2168 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2169 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2170 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2171 					    pcix_cmd);
   2172 				}
   2173 			}
   2174 		}
   2175 		/*
   2176 		 * The quad port adapter is special; it has a PCIX-PCIX
   2177 		 * bridge on the board, and can run the secondary bus at
   2178 		 * a higher speed.
   2179 		 */
   2180 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2181 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2182 								      : 66;
   2183 		} else if (sc->sc_flags & WM_F_PCIX) {
   2184 			switch (reg & STATUS_PCIXSPD_MASK) {
   2185 			case STATUS_PCIXSPD_50_66:
   2186 				sc->sc_bus_speed = 66;
   2187 				break;
   2188 			case STATUS_PCIXSPD_66_100:
   2189 				sc->sc_bus_speed = 100;
   2190 				break;
   2191 			case STATUS_PCIXSPD_100_133:
   2192 				sc->sc_bus_speed = 133;
   2193 				break;
   2194 			default:
   2195 				aprint_error_dev(sc->sc_dev,
   2196 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2197 				    reg & STATUS_PCIXSPD_MASK);
   2198 				sc->sc_bus_speed = 66;
   2199 				break;
   2200 			}
   2201 		} else
   2202 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2203 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2204 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2205 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2206 	}
   2207 
   2208 	/* clear interesting stat counters */
   2209 	CSR_READ(sc, WMREG_COLC);
   2210 	CSR_READ(sc, WMREG_RXERRC);
   2211 
   2212 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2213 	    || (sc->sc_type >= WM_T_ICH8))
   2214 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2215 	if (sc->sc_type >= WM_T_ICH8)
   2216 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2217 
   2218 	/* Set PHY, NVM mutex related stuff */
   2219 	switch (sc->sc_type) {
   2220 	case WM_T_82542_2_0:
   2221 	case WM_T_82542_2_1:
   2222 	case WM_T_82543:
   2223 	case WM_T_82544:
   2224 		/* Microwire */
   2225 		sc->nvm.read = wm_nvm_read_uwire;
   2226 		sc->sc_nvm_wordsize = 64;
   2227 		sc->sc_nvm_addrbits = 6;
   2228 		break;
   2229 	case WM_T_82540:
   2230 	case WM_T_82545:
   2231 	case WM_T_82545_3:
   2232 	case WM_T_82546:
   2233 	case WM_T_82546_3:
   2234 		/* Microwire */
   2235 		sc->nvm.read = wm_nvm_read_uwire;
   2236 		reg = CSR_READ(sc, WMREG_EECD);
   2237 		if (reg & EECD_EE_SIZE) {
   2238 			sc->sc_nvm_wordsize = 256;
   2239 			sc->sc_nvm_addrbits = 8;
   2240 		} else {
   2241 			sc->sc_nvm_wordsize = 64;
   2242 			sc->sc_nvm_addrbits = 6;
   2243 		}
   2244 		sc->sc_flags |= WM_F_LOCK_EECD;
   2245 		sc->nvm.acquire = wm_get_eecd;
   2246 		sc->nvm.release = wm_put_eecd;
   2247 		break;
   2248 	case WM_T_82541:
   2249 	case WM_T_82541_2:
   2250 	case WM_T_82547:
   2251 	case WM_T_82547_2:
   2252 		reg = CSR_READ(sc, WMREG_EECD);
   2253 		/*
   2254 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2255 		 * on 8254[17], so set flags and functios before calling it.
   2256 		 */
   2257 		sc->sc_flags |= WM_F_LOCK_EECD;
   2258 		sc->nvm.acquire = wm_get_eecd;
   2259 		sc->nvm.release = wm_put_eecd;
   2260 		if (reg & EECD_EE_TYPE) {
   2261 			/* SPI */
   2262 			sc->nvm.read = wm_nvm_read_spi;
   2263 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2264 			wm_nvm_set_addrbits_size_eecd(sc);
   2265 		} else {
   2266 			/* Microwire */
   2267 			sc->nvm.read = wm_nvm_read_uwire;
   2268 			if ((reg & EECD_EE_ABITS) != 0) {
   2269 				sc->sc_nvm_wordsize = 256;
   2270 				sc->sc_nvm_addrbits = 8;
   2271 			} else {
   2272 				sc->sc_nvm_wordsize = 64;
   2273 				sc->sc_nvm_addrbits = 6;
   2274 			}
   2275 		}
   2276 		break;
   2277 	case WM_T_82571:
   2278 	case WM_T_82572:
   2279 		/* SPI */
   2280 		sc->nvm.read = wm_nvm_read_eerd;
   2281 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2282 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2283 		wm_nvm_set_addrbits_size_eecd(sc);
   2284 		sc->phy.acquire = wm_get_swsm_semaphore;
   2285 		sc->phy.release = wm_put_swsm_semaphore;
   2286 		sc->nvm.acquire = wm_get_nvm_82571;
   2287 		sc->nvm.release = wm_put_nvm_82571;
   2288 		break;
   2289 	case WM_T_82573:
   2290 	case WM_T_82574:
   2291 	case WM_T_82583:
   2292 		sc->nvm.read = wm_nvm_read_eerd;
   2293 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2294 		if (sc->sc_type == WM_T_82573) {
   2295 			sc->phy.acquire = wm_get_swsm_semaphore;
   2296 			sc->phy.release = wm_put_swsm_semaphore;
   2297 			sc->nvm.acquire = wm_get_nvm_82571;
   2298 			sc->nvm.release = wm_put_nvm_82571;
   2299 		} else {
   2300 			/* Both PHY and NVM use the same semaphore. */
   2301 			sc->phy.acquire = sc->nvm.acquire
   2302 			    = wm_get_swfwhw_semaphore;
   2303 			sc->phy.release = sc->nvm.release
   2304 			    = wm_put_swfwhw_semaphore;
   2305 		}
   2306 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2307 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2308 			sc->sc_nvm_wordsize = 2048;
   2309 		} else {
   2310 			/* SPI */
   2311 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2312 			wm_nvm_set_addrbits_size_eecd(sc);
   2313 		}
   2314 		break;
   2315 	case WM_T_82575:
   2316 	case WM_T_82576:
   2317 	case WM_T_82580:
   2318 	case WM_T_I350:
   2319 	case WM_T_I354:
   2320 	case WM_T_80003:
   2321 		/* SPI */
   2322 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2323 		wm_nvm_set_addrbits_size_eecd(sc);
   2324 		if ((sc->sc_type == WM_T_80003)
   2325 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2326 			sc->nvm.read = wm_nvm_read_eerd;
   2327 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2328 		} else {
   2329 			sc->nvm.read = wm_nvm_read_spi;
   2330 			sc->sc_flags |= WM_F_LOCK_EECD;
   2331 		}
   2332 		sc->phy.acquire = wm_get_phy_82575;
   2333 		sc->phy.release = wm_put_phy_82575;
   2334 		sc->nvm.acquire = wm_get_nvm_80003;
   2335 		sc->nvm.release = wm_put_nvm_80003;
   2336 		break;
   2337 	case WM_T_ICH8:
   2338 	case WM_T_ICH9:
   2339 	case WM_T_ICH10:
   2340 	case WM_T_PCH:
   2341 	case WM_T_PCH2:
   2342 	case WM_T_PCH_LPT:
   2343 		sc->nvm.read = wm_nvm_read_ich8;
   2344 		/* FLASH */
   2345 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2346 		sc->sc_nvm_wordsize = 2048;
   2347 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2348 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2349 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2350 			aprint_error_dev(sc->sc_dev,
   2351 			    "can't map FLASH registers\n");
   2352 			goto out;
   2353 		}
   2354 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2355 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2356 		    ICH_FLASH_SECTOR_SIZE;
   2357 		sc->sc_ich8_flash_bank_size =
   2358 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2359 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2360 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2361 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2362 		sc->sc_flashreg_offset = 0;
   2363 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2364 		sc->phy.release = wm_put_swflag_ich8lan;
   2365 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2366 		sc->nvm.release = wm_put_nvm_ich8lan;
   2367 		break;
   2368 	case WM_T_PCH_SPT:
   2369 	case WM_T_PCH_CNP:
   2370 		sc->nvm.read = wm_nvm_read_spt;
   2371 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2372 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2373 		sc->sc_flasht = sc->sc_st;
   2374 		sc->sc_flashh = sc->sc_sh;
   2375 		sc->sc_ich8_flash_base = 0;
   2376 		sc->sc_nvm_wordsize =
   2377 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2378 		    * NVM_SIZE_MULTIPLIER;
   2379 		/* It is size in bytes, we want words */
   2380 		sc->sc_nvm_wordsize /= 2;
   2381 		/* Assume 2 banks */
   2382 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2383 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2384 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2385 		sc->phy.release = wm_put_swflag_ich8lan;
   2386 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2387 		sc->nvm.release = wm_put_nvm_ich8lan;
   2388 		break;
   2389 	case WM_T_I210:
   2390 	case WM_T_I211:
   2391 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2392 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2393 		if (wm_nvm_flash_presence_i210(sc)) {
   2394 			sc->nvm.read = wm_nvm_read_eerd;
   2395 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2396 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2397 			wm_nvm_set_addrbits_size_eecd(sc);
   2398 		} else {
   2399 			sc->nvm.read = wm_nvm_read_invm;
   2400 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2401 			sc->sc_nvm_wordsize = INVM_SIZE;
   2402 		}
   2403 		sc->phy.acquire = wm_get_phy_82575;
   2404 		sc->phy.release = wm_put_phy_82575;
   2405 		sc->nvm.acquire = wm_get_nvm_80003;
   2406 		sc->nvm.release = wm_put_nvm_80003;
   2407 		break;
   2408 	default:
   2409 		break;
   2410 	}
   2411 
   2412 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2413 	switch (sc->sc_type) {
   2414 	case WM_T_82571:
   2415 	case WM_T_82572:
   2416 		reg = CSR_READ(sc, WMREG_SWSM2);
   2417 		if ((reg & SWSM2_LOCK) == 0) {
   2418 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2419 			force_clear_smbi = true;
   2420 		} else
   2421 			force_clear_smbi = false;
   2422 		break;
   2423 	case WM_T_82573:
   2424 	case WM_T_82574:
   2425 	case WM_T_82583:
   2426 		force_clear_smbi = true;
   2427 		break;
   2428 	default:
   2429 		force_clear_smbi = false;
   2430 		break;
   2431 	}
   2432 	if (force_clear_smbi) {
   2433 		reg = CSR_READ(sc, WMREG_SWSM);
   2434 		if ((reg & SWSM_SMBI) != 0)
   2435 			aprint_error_dev(sc->sc_dev,
   2436 			    "Please update the Bootagent\n");
   2437 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2438 	}
   2439 
   2440 	/*
   2441 	 * Defer printing the EEPROM type until after verifying the checksum
   2442 	 * This allows the EEPROM type to be printed correctly in the case
   2443 	 * that no EEPROM is attached.
   2444 	 */
   2445 	/*
   2446 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2447 	 * this for later, so we can fail future reads from the EEPROM.
   2448 	 */
   2449 	if (wm_nvm_validate_checksum(sc)) {
   2450 		/*
   2451 		 * Read twice again because some PCI-e parts fail the
   2452 		 * first check due to the link being in sleep state.
   2453 		 */
   2454 		if (wm_nvm_validate_checksum(sc))
   2455 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2456 	}
   2457 
   2458 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2459 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2460 	else {
   2461 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2462 		    sc->sc_nvm_wordsize);
   2463 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2464 			aprint_verbose("iNVM");
   2465 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2466 			aprint_verbose("FLASH(HW)");
   2467 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2468 			aprint_verbose("FLASH");
   2469 		else {
   2470 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2471 				eetype = "SPI";
   2472 			else
   2473 				eetype = "MicroWire";
   2474 			aprint_verbose("(%d address bits) %s EEPROM",
   2475 			    sc->sc_nvm_addrbits, eetype);
   2476 		}
   2477 	}
   2478 	wm_nvm_version(sc);
   2479 	aprint_verbose("\n");
   2480 
   2481 	/*
   2482 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2483 	 * incorrect.
   2484 	 */
   2485 	wm_gmii_setup_phytype(sc, 0, 0);
   2486 
   2487 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2488 	switch (sc->sc_type) {
   2489 	case WM_T_ICH8:
   2490 	case WM_T_ICH9:
   2491 	case WM_T_ICH10:
   2492 	case WM_T_PCH:
   2493 	case WM_T_PCH2:
   2494 	case WM_T_PCH_LPT:
   2495 	case WM_T_PCH_SPT:
   2496 	case WM_T_PCH_CNP:
   2497 		apme_mask = WUC_APME;
   2498 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2499 		if ((eeprom_data & apme_mask) != 0)
   2500 			sc->sc_flags |= WM_F_WOL;
   2501 		break;
   2502 	default:
   2503 		break;
   2504 	}
   2505 
   2506 	/* Reset the chip to a known state. */
   2507 	wm_reset(sc);
   2508 
   2509 	/*
   2510 	 * Check for I21[01] PLL workaround.
   2511 	 *
   2512 	 * Three cases:
   2513 	 * a) Chip is I211.
   2514 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2515 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2516 	 */
   2517 	if (sc->sc_type == WM_T_I211)
   2518 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2519 	if (sc->sc_type == WM_T_I210) {
   2520 		if (!wm_nvm_flash_presence_i210(sc))
   2521 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2522 		else if ((sc->sc_nvm_ver_major < 3)
   2523 		    || ((sc->sc_nvm_ver_major == 3)
   2524 			&& (sc->sc_nvm_ver_minor < 25))) {
   2525 			aprint_verbose_dev(sc->sc_dev,
   2526 			    "ROM image version %d.%d is older than 3.25\n",
   2527 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2528 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2529 		}
   2530 	}
   2531 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2532 		wm_pll_workaround_i210(sc);
   2533 
   2534 	wm_get_wakeup(sc);
   2535 
   2536 	/* Non-AMT based hardware can now take control from firmware */
   2537 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2538 		wm_get_hw_control(sc);
   2539 
   2540 	/*
   2541 	 * Read the Ethernet address from the EEPROM, if not first found
   2542 	 * in device properties.
   2543 	 */
   2544 	ea = prop_dictionary_get(dict, "mac-address");
   2545 	if (ea != NULL) {
   2546 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2547 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2548 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2549 	} else {
   2550 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2551 			aprint_error_dev(sc->sc_dev,
   2552 			    "unable to read Ethernet address\n");
   2553 			goto out;
   2554 		}
   2555 	}
   2556 
   2557 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2558 	    ether_sprintf(enaddr));
   2559 
   2560 	/*
   2561 	 * Read the config info from the EEPROM, and set up various
   2562 	 * bits in the control registers based on their contents.
   2563 	 */
   2564 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2565 	if (pn != NULL) {
   2566 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2567 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2568 	} else {
   2569 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2570 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2571 			goto out;
   2572 		}
   2573 	}
   2574 
   2575 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2576 	if (pn != NULL) {
   2577 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2578 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2579 	} else {
   2580 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2581 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2582 			goto out;
   2583 		}
   2584 	}
   2585 
   2586 	/* check for WM_F_WOL */
   2587 	switch (sc->sc_type) {
   2588 	case WM_T_82542_2_0:
   2589 	case WM_T_82542_2_1:
   2590 	case WM_T_82543:
   2591 		/* dummy? */
   2592 		eeprom_data = 0;
   2593 		apme_mask = NVM_CFG3_APME;
   2594 		break;
   2595 	case WM_T_82544:
   2596 		apme_mask = NVM_CFG2_82544_APM_EN;
   2597 		eeprom_data = cfg2;
   2598 		break;
   2599 	case WM_T_82546:
   2600 	case WM_T_82546_3:
   2601 	case WM_T_82571:
   2602 	case WM_T_82572:
   2603 	case WM_T_82573:
   2604 	case WM_T_82574:
   2605 	case WM_T_82583:
   2606 	case WM_T_80003:
   2607 	case WM_T_82575:
   2608 	case WM_T_82576:
   2609 		apme_mask = NVM_CFG3_APME;
   2610 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2611 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2612 		break;
   2613 	case WM_T_82580:
   2614 	case WM_T_I350:
   2615 	case WM_T_I354:
   2616 	case WM_T_I210:
   2617 	case WM_T_I211:
   2618 		apme_mask = NVM_CFG3_APME;
   2619 		wm_nvm_read(sc,
   2620 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2621 		    1, &eeprom_data);
   2622 		break;
   2623 	case WM_T_ICH8:
   2624 	case WM_T_ICH9:
   2625 	case WM_T_ICH10:
   2626 	case WM_T_PCH:
   2627 	case WM_T_PCH2:
   2628 	case WM_T_PCH_LPT:
   2629 	case WM_T_PCH_SPT:
   2630 	case WM_T_PCH_CNP:
   2631 		/* Already checked before wm_reset () */
   2632 		apme_mask = eeprom_data = 0;
   2633 		break;
   2634 	default: /* XXX 82540 */
   2635 		apme_mask = NVM_CFG3_APME;
   2636 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2637 		break;
   2638 	}
   2639 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2640 	if ((eeprom_data & apme_mask) != 0)
   2641 		sc->sc_flags |= WM_F_WOL;
   2642 
   2643 	/*
   2644 	 * We have the eeprom settings, now apply the special cases
   2645 	 * where the eeprom may be wrong or the board won't support
   2646 	 * wake on lan on a particular port
   2647 	 */
   2648 	switch (sc->sc_pcidevid) {
   2649 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2650 		sc->sc_flags &= ~WM_F_WOL;
   2651 		break;
   2652 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2653 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2654 		/* Wake events only supported on port A for dual fiber
   2655 		 * regardless of eeprom setting */
   2656 		if (sc->sc_funcid == 1)
   2657 			sc->sc_flags &= ~WM_F_WOL;
   2658 		break;
   2659 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2660 		/* If quad port adapter, disable WoL on all but port A */
   2661 		if (sc->sc_funcid != 0)
   2662 			sc->sc_flags &= ~WM_F_WOL;
   2663 		break;
   2664 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2665 		/* Wake events only supported on port A for dual fiber
   2666 		 * regardless of eeprom setting */
   2667 		if (sc->sc_funcid == 1)
   2668 			sc->sc_flags &= ~WM_F_WOL;
   2669 		break;
   2670 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2671 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2672 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2673 		/* If quad port adapter, disable WoL on all but port A */
   2674 		if (sc->sc_funcid != 0)
   2675 			sc->sc_flags &= ~WM_F_WOL;
   2676 		break;
   2677 	}
   2678 
   2679 	if (sc->sc_type >= WM_T_82575) {
   2680 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2681 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2682 			    nvmword);
   2683 			if ((sc->sc_type == WM_T_82575) ||
   2684 			    (sc->sc_type == WM_T_82576)) {
   2685 				/* Check NVM for autonegotiation */
   2686 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2687 				    != 0)
   2688 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2689 			}
   2690 			if ((sc->sc_type == WM_T_82575) ||
   2691 			    (sc->sc_type == WM_T_I350)) {
   2692 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2693 					sc->sc_flags |= WM_F_MAS;
   2694 			}
   2695 		}
   2696 	}
   2697 
   2698 	/*
   2699 	 * XXX need special handling for some multiple port cards
   2700 	 * to disable a paticular port.
   2701 	 */
   2702 
   2703 	if (sc->sc_type >= WM_T_82544) {
   2704 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2705 		if (pn != NULL) {
   2706 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2707 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2708 		} else {
   2709 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2710 				aprint_error_dev(sc->sc_dev,
   2711 				    "unable to read SWDPIN\n");
   2712 				goto out;
   2713 			}
   2714 		}
   2715 	}
   2716 
   2717 	if (cfg1 & NVM_CFG1_ILOS)
   2718 		sc->sc_ctrl |= CTRL_ILOS;
   2719 
   2720 	/*
   2721 	 * XXX
   2722 	 * This code isn't correct because pin 2 and 3 are located
   2723 	 * in different position on newer chips. Check all datasheet.
   2724 	 *
   2725 	 * Until resolve this problem, check if a chip < 82580
   2726 	 */
   2727 	if (sc->sc_type <= WM_T_82580) {
   2728 		if (sc->sc_type >= WM_T_82544) {
   2729 			sc->sc_ctrl |=
   2730 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2731 			    CTRL_SWDPIO_SHIFT;
   2732 			sc->sc_ctrl |=
   2733 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2734 			    CTRL_SWDPINS_SHIFT;
   2735 		} else {
   2736 			sc->sc_ctrl |=
   2737 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2738 			    CTRL_SWDPIO_SHIFT;
   2739 		}
   2740 	}
   2741 
   2742 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2743 		wm_nvm_read(sc,
   2744 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2745 		    1, &nvmword);
   2746 		if (nvmword & NVM_CFG3_ILOS)
   2747 			sc->sc_ctrl |= CTRL_ILOS;
   2748 	}
   2749 
   2750 #if 0
   2751 	if (sc->sc_type >= WM_T_82544) {
   2752 		if (cfg1 & NVM_CFG1_IPS0)
   2753 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2754 		if (cfg1 & NVM_CFG1_IPS1)
   2755 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2756 		sc->sc_ctrl_ext |=
   2757 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2758 		    CTRL_EXT_SWDPIO_SHIFT;
   2759 		sc->sc_ctrl_ext |=
   2760 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2761 		    CTRL_EXT_SWDPINS_SHIFT;
   2762 	} else {
   2763 		sc->sc_ctrl_ext |=
   2764 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2765 		    CTRL_EXT_SWDPIO_SHIFT;
   2766 	}
   2767 #endif
   2768 
   2769 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2770 #if 0
   2771 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2772 #endif
   2773 
   2774 	if (sc->sc_type == WM_T_PCH) {
   2775 		uint16_t val;
   2776 
   2777 		/* Save the NVM K1 bit setting */
   2778 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2779 
   2780 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2781 			sc->sc_nvm_k1_enabled = 1;
   2782 		else
   2783 			sc->sc_nvm_k1_enabled = 0;
   2784 	}
   2785 
   2786 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2787 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2788 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2789 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2790 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2791 	    || sc->sc_type == WM_T_82573
   2792 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2793 		/* Copper only */
   2794 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2795 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2796 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2797 	    || (sc->sc_type ==WM_T_I211)) {
   2798 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2799 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2800 		switch (link_mode) {
   2801 		case CTRL_EXT_LINK_MODE_1000KX:
   2802 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2803 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2804 			break;
   2805 		case CTRL_EXT_LINK_MODE_SGMII:
   2806 			if (wm_sgmii_uses_mdio(sc)) {
   2807 				aprint_normal_dev(sc->sc_dev,
   2808 				    "SGMII(MDIO)\n");
   2809 				sc->sc_flags |= WM_F_SGMII;
   2810 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2811 				break;
   2812 			}
   2813 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2814 			/*FALLTHROUGH*/
   2815 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2816 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2817 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2818 				if (link_mode
   2819 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2820 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2821 					sc->sc_flags |= WM_F_SGMII;
   2822 					aprint_verbose_dev(sc->sc_dev,
   2823 					    "SGMII\n");
   2824 				} else {
   2825 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2826 					aprint_verbose_dev(sc->sc_dev,
   2827 					    "SERDES\n");
   2828 				}
   2829 				break;
   2830 			}
   2831 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2832 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2833 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2834 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2835 				sc->sc_flags |= WM_F_SGMII;
   2836 			}
   2837 			/* Do not change link mode for 100BaseFX */
   2838 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2839 				break;
   2840 
   2841 			/* Change current link mode setting */
   2842 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2843 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2844 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2845 			else
   2846 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2847 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2848 			break;
   2849 		case CTRL_EXT_LINK_MODE_GMII:
   2850 		default:
   2851 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2852 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2853 			break;
   2854 		}
   2855 
   2856 		reg &= ~CTRL_EXT_I2C_ENA;
   2857 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2858 			reg |= CTRL_EXT_I2C_ENA;
   2859 		else
   2860 			reg &= ~CTRL_EXT_I2C_ENA;
   2861 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2862 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   2863 			wm_gmii_setup_phytype(sc, 0, 0);
   2864 			wm_reset_mdicnfg_82580(sc);
   2865 		}
   2866 	} else if (sc->sc_type < WM_T_82543 ||
   2867 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2868 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2869 			aprint_error_dev(sc->sc_dev,
   2870 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2871 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2872 		}
   2873 	} else {
   2874 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2875 			aprint_error_dev(sc->sc_dev,
   2876 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2877 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2878 		}
   2879 	}
   2880 
   2881 	if (sc->sc_type >= WM_T_PCH2)
   2882 		sc->sc_flags |= WM_F_EEE;
   2883 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2884 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2885 		/* XXX: Need special handling for I354. (not yet) */
   2886 		if (sc->sc_type != WM_T_I354)
   2887 			sc->sc_flags |= WM_F_EEE;
   2888 	}
   2889 
   2890 	/* Set device properties (macflags) */
   2891 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2892 
   2893 	if (sc->sc_flags != 0) {
   2894 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2895 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2896 	}
   2897 
   2898 #ifdef WM_MPSAFE
   2899 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2900 #else
   2901 	sc->sc_core_lock = NULL;
   2902 #endif
   2903 
   2904 	/* Initialize the media structures accordingly. */
   2905 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2906 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2907 	else
   2908 		wm_tbi_mediainit(sc); /* All others */
   2909 
   2910 	ifp = &sc->sc_ethercom.ec_if;
   2911 	xname = device_xname(sc->sc_dev);
   2912 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2913 	ifp->if_softc = sc;
   2914 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2915 #ifdef WM_MPSAFE
   2916 	ifp->if_extflags = IFEF_MPSAFE;
   2917 #endif
   2918 	ifp->if_ioctl = wm_ioctl;
   2919 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2920 		ifp->if_start = wm_nq_start;
   2921 		/*
   2922 		 * When the number of CPUs is one and the controller can use
   2923 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2924 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2925 		 * and the other is used for link status changing.
   2926 		 * In this situation, wm_nq_transmit() is disadvantageous
   2927 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2928 		 */
   2929 		if (wm_is_using_multiqueue(sc))
   2930 			ifp->if_transmit = wm_nq_transmit;
   2931 	} else {
   2932 		ifp->if_start = wm_start;
   2933 		/*
   2934 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2935 		 */
   2936 		if (wm_is_using_multiqueue(sc))
   2937 			ifp->if_transmit = wm_transmit;
   2938 	}
   2939 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2940 	ifp->if_init = wm_init;
   2941 	ifp->if_stop = wm_stop;
   2942 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2943 	IFQ_SET_READY(&ifp->if_snd);
   2944 
   2945 	/* Check for jumbo frame */
   2946 	switch (sc->sc_type) {
   2947 	case WM_T_82573:
   2948 		/* XXX limited to 9234 if ASPM is disabled */
   2949 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2950 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2951 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2952 		break;
   2953 	case WM_T_82571:
   2954 	case WM_T_82572:
   2955 	case WM_T_82574:
   2956 	case WM_T_82583:
   2957 	case WM_T_82575:
   2958 	case WM_T_82576:
   2959 	case WM_T_82580:
   2960 	case WM_T_I350:
   2961 	case WM_T_I354:
   2962 	case WM_T_I210:
   2963 	case WM_T_I211:
   2964 	case WM_T_80003:
   2965 	case WM_T_ICH9:
   2966 	case WM_T_ICH10:
   2967 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2968 	case WM_T_PCH_LPT:
   2969 	case WM_T_PCH_SPT:
   2970 	case WM_T_PCH_CNP:
   2971 		/* XXX limited to 9234 */
   2972 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2973 		break;
   2974 	case WM_T_PCH:
   2975 		/* XXX limited to 4096 */
   2976 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2977 		break;
   2978 	case WM_T_82542_2_0:
   2979 	case WM_T_82542_2_1:
   2980 	case WM_T_ICH8:
   2981 		/* No support for jumbo frame */
   2982 		break;
   2983 	default:
   2984 		/* ETHER_MAX_LEN_JUMBO */
   2985 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2986 		break;
   2987 	}
   2988 
   2989 	/* If we're a i82543 or greater, we can support VLANs. */
   2990 	if (sc->sc_type >= WM_T_82543) {
   2991 		sc->sc_ethercom.ec_capabilities |=
   2992 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2993 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   2994 	}
   2995 
   2996 	if ((sc->sc_flags & WM_F_EEE) != 0)
   2997 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   2998 
   2999 	/*
   3000 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   3001 	 * on i82543 and later.
   3002 	 */
   3003 	if (sc->sc_type >= WM_T_82543) {
   3004 		ifp->if_capabilities |=
   3005 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3006 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3007 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3008 		    IFCAP_CSUM_TCPv6_Tx |
   3009 		    IFCAP_CSUM_UDPv6_Tx;
   3010 	}
   3011 
   3012 	/*
   3013 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3014 	 *
   3015 	 *	82541GI (8086:1076) ... no
   3016 	 *	82572EI (8086:10b9) ... yes
   3017 	 */
   3018 	if (sc->sc_type >= WM_T_82571) {
   3019 		ifp->if_capabilities |=
   3020 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3021 	}
   3022 
   3023 	/*
   3024 	 * If we're a i82544 or greater (except i82547), we can do
   3025 	 * TCP segmentation offload.
   3026 	 */
   3027 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   3028 		ifp->if_capabilities |= IFCAP_TSOv4;
   3029 	}
   3030 
   3031 	if (sc->sc_type >= WM_T_82571) {
   3032 		ifp->if_capabilities |= IFCAP_TSOv6;
   3033 	}
   3034 
   3035 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3036 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3037 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3038 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3039 
   3040 	/* Attach the interface. */
   3041 	error = if_initialize(ifp);
   3042 	if (error != 0) {
   3043 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   3044 		    error);
   3045 		return; /* Error */
   3046 	}
   3047 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3048 	ether_ifattach(ifp, enaddr);
   3049 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3050 	if_register(ifp);
   3051 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3052 	    RND_FLAG_DEFAULT);
   3053 
   3054 #ifdef WM_EVENT_COUNTERS
   3055 	/* Attach event counters. */
   3056 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3057 	    NULL, xname, "linkintr");
   3058 
   3059 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3060 	    NULL, xname, "tx_xoff");
   3061 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3062 	    NULL, xname, "tx_xon");
   3063 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3064 	    NULL, xname, "rx_xoff");
   3065 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3066 	    NULL, xname, "rx_xon");
   3067 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3068 	    NULL, xname, "rx_macctl");
   3069 #endif /* WM_EVENT_COUNTERS */
   3070 
   3071 	sc->sc_txrx_use_workqueue = false;
   3072 
   3073 	wm_init_sysctls(sc);
   3074 
   3075 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3076 		pmf_class_network_register(self, ifp);
   3077 	else
   3078 		aprint_error_dev(self, "couldn't establish power handler\n");
   3079 
   3080 	sc->sc_flags |= WM_F_ATTACHED;
   3081 out:
   3082 	return;
   3083 }
   3084 
   3085 /* The detach function (ca_detach) */
   3086 static int
   3087 wm_detach(device_t self, int flags __unused)
   3088 {
   3089 	struct wm_softc *sc = device_private(self);
   3090 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3091 	int i;
   3092 
   3093 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3094 		return 0;
   3095 
   3096 	/* Stop the interface. Callouts are stopped in it. */
   3097 	wm_stop(ifp, 1);
   3098 
   3099 	pmf_device_deregister(self);
   3100 
   3101 	sysctl_teardown(&sc->sc_sysctllog);
   3102 
   3103 #ifdef WM_EVENT_COUNTERS
   3104 	evcnt_detach(&sc->sc_ev_linkintr);
   3105 
   3106 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3107 	evcnt_detach(&sc->sc_ev_tx_xon);
   3108 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3109 	evcnt_detach(&sc->sc_ev_rx_xon);
   3110 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3111 #endif /* WM_EVENT_COUNTERS */
   3112 
   3113 	rnd_detach_source(&sc->rnd_source);
   3114 
   3115 	/* Tell the firmware about the release */
   3116 	WM_CORE_LOCK(sc);
   3117 	wm_release_manageability(sc);
   3118 	wm_release_hw_control(sc);
   3119 	wm_enable_wakeup(sc);
   3120 	WM_CORE_UNLOCK(sc);
   3121 
   3122 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3123 
   3124 	ether_ifdetach(ifp);
   3125 	if_detach(ifp);
   3126 	if_percpuq_destroy(sc->sc_ipq);
   3127 
   3128 	/* Delete all remaining media. */
   3129 	ifmedia_fini(&sc->sc_mii.mii_media);
   3130 
   3131 	/* Unload RX dmamaps and free mbufs */
   3132 	for (i = 0; i < sc->sc_nqueues; i++) {
   3133 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3134 		mutex_enter(rxq->rxq_lock);
   3135 		wm_rxdrain(rxq);
   3136 		mutex_exit(rxq->rxq_lock);
   3137 	}
   3138 	/* Must unlock here */
   3139 
   3140 	/* Disestablish the interrupt handler */
   3141 	for (i = 0; i < sc->sc_nintrs; i++) {
   3142 		if (sc->sc_ihs[i] != NULL) {
   3143 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3144 			sc->sc_ihs[i] = NULL;
   3145 		}
   3146 	}
   3147 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3148 
   3149 	/* wm_stop() ensure workqueue is stopped. */
   3150 	workqueue_destroy(sc->sc_queue_wq);
   3151 
   3152 	for (i = 0; i < sc->sc_nqueues; i++)
   3153 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3154 
   3155 	wm_free_txrx_queues(sc);
   3156 
   3157 	/* Unmap the registers */
   3158 	if (sc->sc_ss) {
   3159 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3160 		sc->sc_ss = 0;
   3161 	}
   3162 	if (sc->sc_ios) {
   3163 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3164 		sc->sc_ios = 0;
   3165 	}
   3166 	if (sc->sc_flashs) {
   3167 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3168 		sc->sc_flashs = 0;
   3169 	}
   3170 
   3171 	if (sc->sc_core_lock)
   3172 		mutex_obj_free(sc->sc_core_lock);
   3173 	if (sc->sc_ich_phymtx)
   3174 		mutex_obj_free(sc->sc_ich_phymtx);
   3175 	if (sc->sc_ich_nvmmtx)
   3176 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3177 
   3178 	return 0;
   3179 }
   3180 
   3181 static bool
   3182 wm_suspend(device_t self, const pmf_qual_t *qual)
   3183 {
   3184 	struct wm_softc *sc = device_private(self);
   3185 
   3186 	wm_release_manageability(sc);
   3187 	wm_release_hw_control(sc);
   3188 	wm_enable_wakeup(sc);
   3189 
   3190 	return true;
   3191 }
   3192 
   3193 static bool
   3194 wm_resume(device_t self, const pmf_qual_t *qual)
   3195 {
   3196 	struct wm_softc *sc = device_private(self);
   3197 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3198 	pcireg_t reg;
   3199 	char buf[256];
   3200 
   3201 	reg = CSR_READ(sc, WMREG_WUS);
   3202 	if (reg != 0) {
   3203 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3204 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3205 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3206 	}
   3207 
   3208 	if (sc->sc_type >= WM_T_PCH2)
   3209 		wm_resume_workarounds_pchlan(sc);
   3210 	if ((ifp->if_flags & IFF_UP) == 0) {
   3211 		wm_reset(sc);
   3212 		/* Non-AMT based hardware can now take control from firmware */
   3213 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3214 			wm_get_hw_control(sc);
   3215 		wm_init_manageability(sc);
   3216 	} else {
   3217 		/*
   3218 		 * We called pmf_class_network_register(), so if_init() is
   3219 		 * automatically called when IFF_UP. wm_reset(),
   3220 		 * wm_get_hw_control() and wm_init_manageability() are called
   3221 		 * via wm_init().
   3222 		 */
   3223 	}
   3224 
   3225 	return true;
   3226 }
   3227 
   3228 /*
   3229  * wm_watchdog:		[ifnet interface function]
   3230  *
   3231  *	Watchdog timer handler.
   3232  */
   3233 static void
   3234 wm_watchdog(struct ifnet *ifp)
   3235 {
   3236 	int qid;
   3237 	struct wm_softc *sc = ifp->if_softc;
   3238 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3239 
   3240 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3241 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3242 
   3243 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3244 	}
   3245 
   3246 	/* IF any of queues hanged up, reset the interface. */
   3247 	if (hang_queue != 0) {
   3248 		(void)wm_init(ifp);
   3249 
   3250 		/*
   3251 		 * There are still some upper layer processing which call
   3252 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3253 		 */
   3254 		/* Try to get more packets going. */
   3255 		ifp->if_start(ifp);
   3256 	}
   3257 }
   3258 
   3259 
   3260 static void
   3261 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3262 {
   3263 
   3264 	mutex_enter(txq->txq_lock);
   3265 	if (txq->txq_sending &&
   3266 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3267 		wm_watchdog_txq_locked(ifp, txq, hang);
   3268 
   3269 	mutex_exit(txq->txq_lock);
   3270 }
   3271 
   3272 static void
   3273 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3274     uint16_t *hang)
   3275 {
   3276 	struct wm_softc *sc = ifp->if_softc;
   3277 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3278 
   3279 	KASSERT(mutex_owned(txq->txq_lock));
   3280 
   3281 	/*
   3282 	 * Since we're using delayed interrupts, sweep up
   3283 	 * before we report an error.
   3284 	 */
   3285 	wm_txeof(txq, UINT_MAX);
   3286 
   3287 	if (txq->txq_sending)
   3288 		*hang |= __BIT(wmq->wmq_id);
   3289 
   3290 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3291 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3292 		    device_xname(sc->sc_dev));
   3293 	} else {
   3294 #ifdef WM_DEBUG
   3295 		int i, j;
   3296 		struct wm_txsoft *txs;
   3297 #endif
   3298 		log(LOG_ERR,
   3299 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3300 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3301 		    txq->txq_next);
   3302 		if_statinc(ifp, if_oerrors);
   3303 #ifdef WM_DEBUG
   3304 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3305 		    i = WM_NEXTTXS(txq, i)) {
   3306 			txs = &txq->txq_soft[i];
   3307 			printf("txs %d tx %d -> %d\n",
   3308 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3309 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3310 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3311 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3312 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3313 					printf("\t %#08x%08x\n",
   3314 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3315 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3316 				} else {
   3317 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3318 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3319 					    txq->txq_descs[j].wtx_addr.wa_low);
   3320 					printf("\t %#04x%02x%02x%08x\n",
   3321 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3322 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3323 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3324 					    txq->txq_descs[j].wtx_cmdlen);
   3325 				}
   3326 				if (j == txs->txs_lastdesc)
   3327 					break;
   3328 			}
   3329 		}
   3330 #endif
   3331 	}
   3332 }
   3333 
   3334 /*
   3335  * wm_tick:
   3336  *
   3337  *	One second timer, used to check link status, sweep up
   3338  *	completed transmit jobs, etc.
   3339  */
   3340 static void
   3341 wm_tick(void *arg)
   3342 {
   3343 	struct wm_softc *sc = arg;
   3344 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3345 #ifndef WM_MPSAFE
   3346 	int s = splnet();
   3347 #endif
   3348 
   3349 	WM_CORE_LOCK(sc);
   3350 
   3351 	if (sc->sc_core_stopping) {
   3352 		WM_CORE_UNLOCK(sc);
   3353 #ifndef WM_MPSAFE
   3354 		splx(s);
   3355 #endif
   3356 		return;
   3357 	}
   3358 
   3359 	if (sc->sc_type >= WM_T_82542_2_1) {
   3360 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3361 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3362 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3363 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3364 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3365 	}
   3366 
   3367 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3368 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
   3369 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
   3370 	    + CSR_READ(sc, WMREG_CRCERRS)
   3371 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3372 	    + CSR_READ(sc, WMREG_SYMERRC)
   3373 	    + CSR_READ(sc, WMREG_RXERRC)
   3374 	    + CSR_READ(sc, WMREG_SEC)
   3375 	    + CSR_READ(sc, WMREG_CEXTERR)
   3376 	    + CSR_READ(sc, WMREG_RLEC));
   3377 	/*
   3378 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3379 	 * memory. It does not mean the number of dropped packet. Because
   3380 	 * ethernet controller can receive packets in such case if there is
   3381 	 * space in phy's FIFO.
   3382 	 *
   3383 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3384 	 * own EVCNT instead of if_iqdrops.
   3385 	 */
   3386 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
   3387 	IF_STAT_PUTREF(ifp);
   3388 
   3389 	if (sc->sc_flags & WM_F_HAS_MII)
   3390 		mii_tick(&sc->sc_mii);
   3391 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3392 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3393 		wm_serdes_tick(sc);
   3394 	else
   3395 		wm_tbi_tick(sc);
   3396 
   3397 	WM_CORE_UNLOCK(sc);
   3398 
   3399 	wm_watchdog(ifp);
   3400 
   3401 	callout_schedule(&sc->sc_tick_ch, hz);
   3402 }
   3403 
   3404 static int
   3405 wm_ifflags_cb(struct ethercom *ec)
   3406 {
   3407 	struct ifnet *ifp = &ec->ec_if;
   3408 	struct wm_softc *sc = ifp->if_softc;
   3409 	u_short iffchange;
   3410 	int ecchange;
   3411 	bool needreset = false;
   3412 	int rc = 0;
   3413 
   3414 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3415 		device_xname(sc->sc_dev), __func__));
   3416 
   3417 	WM_CORE_LOCK(sc);
   3418 
   3419 	/*
   3420 	 * Check for if_flags.
   3421 	 * Main usage is to prevent linkdown when opening bpf.
   3422 	 */
   3423 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3424 	sc->sc_if_flags = ifp->if_flags;
   3425 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3426 		needreset = true;
   3427 		goto ec;
   3428 	}
   3429 
   3430 	/* iff related updates */
   3431 	if ((iffchange & IFF_PROMISC) != 0)
   3432 		wm_set_filter(sc);
   3433 
   3434 	wm_set_vlan(sc);
   3435 
   3436 ec:
   3437 	/* Check for ec_capenable. */
   3438 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3439 	sc->sc_ec_capenable = ec->ec_capenable;
   3440 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3441 		needreset = true;
   3442 		goto out;
   3443 	}
   3444 
   3445 	/* ec related updates */
   3446 	wm_set_eee(sc);
   3447 
   3448 out:
   3449 	if (needreset)
   3450 		rc = ENETRESET;
   3451 	WM_CORE_UNLOCK(sc);
   3452 
   3453 	return rc;
   3454 }
   3455 
   3456 /*
   3457  * wm_ioctl:		[ifnet interface function]
   3458  *
   3459  *	Handle control requests from the operator.
   3460  */
   3461 static int
   3462 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3463 {
   3464 	struct wm_softc *sc = ifp->if_softc;
   3465 	struct ifreq *ifr = (struct ifreq *)data;
   3466 	struct ifaddr *ifa = (struct ifaddr *)data;
   3467 	struct sockaddr_dl *sdl;
   3468 	int s, error;
   3469 
   3470 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3471 		device_xname(sc->sc_dev), __func__));
   3472 
   3473 #ifndef WM_MPSAFE
   3474 	s = splnet();
   3475 #endif
   3476 	switch (cmd) {
   3477 	case SIOCSIFMEDIA:
   3478 		WM_CORE_LOCK(sc);
   3479 		/* Flow control requires full-duplex mode. */
   3480 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3481 		    (ifr->ifr_media & IFM_FDX) == 0)
   3482 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3483 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3484 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3485 				/* We can do both TXPAUSE and RXPAUSE. */
   3486 				ifr->ifr_media |=
   3487 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3488 			}
   3489 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3490 		}
   3491 		WM_CORE_UNLOCK(sc);
   3492 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3493 		break;
   3494 	case SIOCINITIFADDR:
   3495 		WM_CORE_LOCK(sc);
   3496 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3497 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3498 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3499 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3500 			/* Unicast address is the first multicast entry */
   3501 			wm_set_filter(sc);
   3502 			error = 0;
   3503 			WM_CORE_UNLOCK(sc);
   3504 			break;
   3505 		}
   3506 		WM_CORE_UNLOCK(sc);
   3507 		/*FALLTHROUGH*/
   3508 	default:
   3509 #ifdef WM_MPSAFE
   3510 		s = splnet();
   3511 #endif
   3512 		/* It may call wm_start, so unlock here */
   3513 		error = ether_ioctl(ifp, cmd, data);
   3514 #ifdef WM_MPSAFE
   3515 		splx(s);
   3516 #endif
   3517 		if (error != ENETRESET)
   3518 			break;
   3519 
   3520 		error = 0;
   3521 
   3522 		if (cmd == SIOCSIFCAP)
   3523 			error = (*ifp->if_init)(ifp);
   3524 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3525 			;
   3526 		else if (ifp->if_flags & IFF_RUNNING) {
   3527 			/*
   3528 			 * Multicast list has changed; set the hardware filter
   3529 			 * accordingly.
   3530 			 */
   3531 			WM_CORE_LOCK(sc);
   3532 			wm_set_filter(sc);
   3533 			WM_CORE_UNLOCK(sc);
   3534 		}
   3535 		break;
   3536 	}
   3537 
   3538 #ifndef WM_MPSAFE
   3539 	splx(s);
   3540 #endif
   3541 	return error;
   3542 }
   3543 
   3544 /* MAC address related */
   3545 
   3546 /*
   3547  * Get the offset of MAC address and return it.
   3548  * If error occured, use offset 0.
   3549  */
   3550 static uint16_t
   3551 wm_check_alt_mac_addr(struct wm_softc *sc)
   3552 {
   3553 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3554 	uint16_t offset = NVM_OFF_MACADDR;
   3555 
   3556 	/* Try to read alternative MAC address pointer */
   3557 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3558 		return 0;
   3559 
   3560 	/* Check pointer if it's valid or not. */
   3561 	if ((offset == 0x0000) || (offset == 0xffff))
   3562 		return 0;
   3563 
   3564 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3565 	/*
   3566 	 * Check whether alternative MAC address is valid or not.
   3567 	 * Some cards have non 0xffff pointer but those don't use
   3568 	 * alternative MAC address in reality.
   3569 	 *
   3570 	 * Check whether the broadcast bit is set or not.
   3571 	 */
   3572 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3573 		if (((myea[0] & 0xff) & 0x01) == 0)
   3574 			return offset; /* Found */
   3575 
   3576 	/* Not found */
   3577 	return 0;
   3578 }
   3579 
   3580 static int
   3581 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3582 {
   3583 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3584 	uint16_t offset = NVM_OFF_MACADDR;
   3585 	int do_invert = 0;
   3586 
   3587 	switch (sc->sc_type) {
   3588 	case WM_T_82580:
   3589 	case WM_T_I350:
   3590 	case WM_T_I354:
   3591 		/* EEPROM Top Level Partitioning */
   3592 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3593 		break;
   3594 	case WM_T_82571:
   3595 	case WM_T_82575:
   3596 	case WM_T_82576:
   3597 	case WM_T_80003:
   3598 	case WM_T_I210:
   3599 	case WM_T_I211:
   3600 		offset = wm_check_alt_mac_addr(sc);
   3601 		if (offset == 0)
   3602 			if ((sc->sc_funcid & 0x01) == 1)
   3603 				do_invert = 1;
   3604 		break;
   3605 	default:
   3606 		if ((sc->sc_funcid & 0x01) == 1)
   3607 			do_invert = 1;
   3608 		break;
   3609 	}
   3610 
   3611 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3612 		goto bad;
   3613 
   3614 	enaddr[0] = myea[0] & 0xff;
   3615 	enaddr[1] = myea[0] >> 8;
   3616 	enaddr[2] = myea[1] & 0xff;
   3617 	enaddr[3] = myea[1] >> 8;
   3618 	enaddr[4] = myea[2] & 0xff;
   3619 	enaddr[5] = myea[2] >> 8;
   3620 
   3621 	/*
   3622 	 * Toggle the LSB of the MAC address on the second port
   3623 	 * of some dual port cards.
   3624 	 */
   3625 	if (do_invert != 0)
   3626 		enaddr[5] ^= 1;
   3627 
   3628 	return 0;
   3629 
   3630  bad:
   3631 	return -1;
   3632 }
   3633 
   3634 /*
   3635  * wm_set_ral:
   3636  *
   3637  *	Set an entery in the receive address list.
   3638  */
   3639 static void
   3640 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3641 {
   3642 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3643 	uint32_t wlock_mac;
   3644 	int rv;
   3645 
   3646 	if (enaddr != NULL) {
   3647 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3648 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3649 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3650 		ral_hi |= RAL_AV;
   3651 	} else {
   3652 		ral_lo = 0;
   3653 		ral_hi = 0;
   3654 	}
   3655 
   3656 	switch (sc->sc_type) {
   3657 	case WM_T_82542_2_0:
   3658 	case WM_T_82542_2_1:
   3659 	case WM_T_82543:
   3660 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3661 		CSR_WRITE_FLUSH(sc);
   3662 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3663 		CSR_WRITE_FLUSH(sc);
   3664 		break;
   3665 	case WM_T_PCH2:
   3666 	case WM_T_PCH_LPT:
   3667 	case WM_T_PCH_SPT:
   3668 	case WM_T_PCH_CNP:
   3669 		if (idx == 0) {
   3670 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3671 			CSR_WRITE_FLUSH(sc);
   3672 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3673 			CSR_WRITE_FLUSH(sc);
   3674 			return;
   3675 		}
   3676 		if (sc->sc_type != WM_T_PCH2) {
   3677 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3678 			    FWSM_WLOCK_MAC);
   3679 			addrl = WMREG_SHRAL(idx - 1);
   3680 			addrh = WMREG_SHRAH(idx - 1);
   3681 		} else {
   3682 			wlock_mac = 0;
   3683 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3684 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3685 		}
   3686 
   3687 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3688 			rv = wm_get_swflag_ich8lan(sc);
   3689 			if (rv != 0)
   3690 				return;
   3691 			CSR_WRITE(sc, addrl, ral_lo);
   3692 			CSR_WRITE_FLUSH(sc);
   3693 			CSR_WRITE(sc, addrh, ral_hi);
   3694 			CSR_WRITE_FLUSH(sc);
   3695 			wm_put_swflag_ich8lan(sc);
   3696 		}
   3697 
   3698 		break;
   3699 	default:
   3700 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3701 		CSR_WRITE_FLUSH(sc);
   3702 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3703 		CSR_WRITE_FLUSH(sc);
   3704 		break;
   3705 	}
   3706 }
   3707 
   3708 /*
   3709  * wm_mchash:
   3710  *
   3711  *	Compute the hash of the multicast address for the 4096-bit
   3712  *	multicast filter.
   3713  */
   3714 static uint32_t
   3715 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3716 {
   3717 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3718 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3719 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3720 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3721 	uint32_t hash;
   3722 
   3723 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3724 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3725 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3726 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3727 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3728 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3729 		return (hash & 0x3ff);
   3730 	}
   3731 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3732 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3733 
   3734 	return (hash & 0xfff);
   3735 }
   3736 
   3737 /*
   3738  *
   3739  *
   3740  */
   3741 static int
   3742 wm_rar_count(struct wm_softc *sc)
   3743 {
   3744 	int size;
   3745 
   3746 	switch (sc->sc_type) {
   3747 	case WM_T_ICH8:
   3748 		size = WM_RAL_TABSIZE_ICH8 -1;
   3749 		break;
   3750 	case WM_T_ICH9:
   3751 	case WM_T_ICH10:
   3752 	case WM_T_PCH:
   3753 		size = WM_RAL_TABSIZE_ICH8;
   3754 		break;
   3755 	case WM_T_PCH2:
   3756 		size = WM_RAL_TABSIZE_PCH2;
   3757 		break;
   3758 	case WM_T_PCH_LPT:
   3759 	case WM_T_PCH_SPT:
   3760 	case WM_T_PCH_CNP:
   3761 		size = WM_RAL_TABSIZE_PCH_LPT;
   3762 		break;
   3763 	case WM_T_82575:
   3764 	case WM_T_I210:
   3765 	case WM_T_I211:
   3766 		size = WM_RAL_TABSIZE_82575;
   3767 		break;
   3768 	case WM_T_82576:
   3769 	case WM_T_82580:
   3770 		size = WM_RAL_TABSIZE_82576;
   3771 		break;
   3772 	case WM_T_I350:
   3773 	case WM_T_I354:
   3774 		size = WM_RAL_TABSIZE_I350;
   3775 		break;
   3776 	default:
   3777 		size = WM_RAL_TABSIZE;
   3778 	}
   3779 
   3780 	return size;
   3781 }
   3782 
   3783 /*
   3784  * wm_set_filter:
   3785  *
   3786  *	Set up the receive filter.
   3787  */
   3788 static void
   3789 wm_set_filter(struct wm_softc *sc)
   3790 {
   3791 	struct ethercom *ec = &sc->sc_ethercom;
   3792 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3793 	struct ether_multi *enm;
   3794 	struct ether_multistep step;
   3795 	bus_addr_t mta_reg;
   3796 	uint32_t hash, reg, bit;
   3797 	int i, size, ralmax;
   3798 
   3799 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3800 		device_xname(sc->sc_dev), __func__));
   3801 
   3802 	if (sc->sc_type >= WM_T_82544)
   3803 		mta_reg = WMREG_CORDOVA_MTA;
   3804 	else
   3805 		mta_reg = WMREG_MTA;
   3806 
   3807 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3808 
   3809 	if (ifp->if_flags & IFF_BROADCAST)
   3810 		sc->sc_rctl |= RCTL_BAM;
   3811 	if (ifp->if_flags & IFF_PROMISC) {
   3812 		sc->sc_rctl |= RCTL_UPE;
   3813 		ETHER_LOCK(ec);
   3814 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3815 		ETHER_UNLOCK(ec);
   3816 		goto allmulti;
   3817 	}
   3818 
   3819 	/*
   3820 	 * Set the station address in the first RAL slot, and
   3821 	 * clear the remaining slots.
   3822 	 */
   3823 	size = wm_rar_count(sc);
   3824 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3825 
   3826 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3827 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3828 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3829 		switch (i) {
   3830 		case 0:
   3831 			/* We can use all entries */
   3832 			ralmax = size;
   3833 			break;
   3834 		case 1:
   3835 			/* Only RAR[0] */
   3836 			ralmax = 1;
   3837 			break;
   3838 		default:
   3839 			/* Available SHRA + RAR[0] */
   3840 			ralmax = i + 1;
   3841 		}
   3842 	} else
   3843 		ralmax = size;
   3844 	for (i = 1; i < size; i++) {
   3845 		if (i < ralmax)
   3846 			wm_set_ral(sc, NULL, i);
   3847 	}
   3848 
   3849 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3850 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3851 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3852 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3853 		size = WM_ICH8_MC_TABSIZE;
   3854 	else
   3855 		size = WM_MC_TABSIZE;
   3856 	/* Clear out the multicast table. */
   3857 	for (i = 0; i < size; i++) {
   3858 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3859 		CSR_WRITE_FLUSH(sc);
   3860 	}
   3861 
   3862 	ETHER_LOCK(ec);
   3863 	ETHER_FIRST_MULTI(step, ec, enm);
   3864 	while (enm != NULL) {
   3865 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3866 			ec->ec_flags |= ETHER_F_ALLMULTI;
   3867 			ETHER_UNLOCK(ec);
   3868 			/*
   3869 			 * We must listen to a range of multicast addresses.
   3870 			 * For now, just accept all multicasts, rather than
   3871 			 * trying to set only those filter bits needed to match
   3872 			 * the range.  (At this time, the only use of address
   3873 			 * ranges is for IP multicast routing, for which the
   3874 			 * range is big enough to require all bits set.)
   3875 			 */
   3876 			goto allmulti;
   3877 		}
   3878 
   3879 		hash = wm_mchash(sc, enm->enm_addrlo);
   3880 
   3881 		reg = (hash >> 5);
   3882 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3883 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3884 		    || (sc->sc_type == WM_T_PCH2)
   3885 		    || (sc->sc_type == WM_T_PCH_LPT)
   3886 		    || (sc->sc_type == WM_T_PCH_SPT)
   3887 		    || (sc->sc_type == WM_T_PCH_CNP))
   3888 			reg &= 0x1f;
   3889 		else
   3890 			reg &= 0x7f;
   3891 		bit = hash & 0x1f;
   3892 
   3893 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3894 		hash |= 1U << bit;
   3895 
   3896 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3897 			/*
   3898 			 * 82544 Errata 9: Certain register cannot be written
   3899 			 * with particular alignments in PCI-X bus operation
   3900 			 * (FCAH, MTA and VFTA).
   3901 			 */
   3902 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3903 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3904 			CSR_WRITE_FLUSH(sc);
   3905 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3906 			CSR_WRITE_FLUSH(sc);
   3907 		} else {
   3908 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3909 			CSR_WRITE_FLUSH(sc);
   3910 		}
   3911 
   3912 		ETHER_NEXT_MULTI(step, enm);
   3913 	}
   3914 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   3915 	ETHER_UNLOCK(ec);
   3916 
   3917 	goto setit;
   3918 
   3919  allmulti:
   3920 	sc->sc_rctl |= RCTL_MPE;
   3921 
   3922  setit:
   3923 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3924 }
   3925 
   3926 /* Reset and init related */
   3927 
   3928 static void
   3929 wm_set_vlan(struct wm_softc *sc)
   3930 {
   3931 
   3932 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3933 		device_xname(sc->sc_dev), __func__));
   3934 
   3935 	/* Deal with VLAN enables. */
   3936 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3937 		sc->sc_ctrl |= CTRL_VME;
   3938 	else
   3939 		sc->sc_ctrl &= ~CTRL_VME;
   3940 
   3941 	/* Write the control registers. */
   3942 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3943 }
   3944 
   3945 static void
   3946 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3947 {
   3948 	uint32_t gcr;
   3949 	pcireg_t ctrl2;
   3950 
   3951 	gcr = CSR_READ(sc, WMREG_GCR);
   3952 
   3953 	/* Only take action if timeout value is defaulted to 0 */
   3954 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3955 		goto out;
   3956 
   3957 	if ((gcr & GCR_CAP_VER2) == 0) {
   3958 		gcr |= GCR_CMPL_TMOUT_10MS;
   3959 		goto out;
   3960 	}
   3961 
   3962 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3963 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3964 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3965 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3966 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3967 
   3968 out:
   3969 	/* Disable completion timeout resend */
   3970 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3971 
   3972 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3973 }
   3974 
   3975 void
   3976 wm_get_auto_rd_done(struct wm_softc *sc)
   3977 {
   3978 	int i;
   3979 
   3980 	/* wait for eeprom to reload */
   3981 	switch (sc->sc_type) {
   3982 	case WM_T_82571:
   3983 	case WM_T_82572:
   3984 	case WM_T_82573:
   3985 	case WM_T_82574:
   3986 	case WM_T_82583:
   3987 	case WM_T_82575:
   3988 	case WM_T_82576:
   3989 	case WM_T_82580:
   3990 	case WM_T_I350:
   3991 	case WM_T_I354:
   3992 	case WM_T_I210:
   3993 	case WM_T_I211:
   3994 	case WM_T_80003:
   3995 	case WM_T_ICH8:
   3996 	case WM_T_ICH9:
   3997 		for (i = 0; i < 10; i++) {
   3998 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3999 				break;
   4000 			delay(1000);
   4001 		}
   4002 		if (i == 10) {
   4003 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4004 			    "complete\n", device_xname(sc->sc_dev));
   4005 		}
   4006 		break;
   4007 	default:
   4008 		break;
   4009 	}
   4010 }
   4011 
   4012 void
   4013 wm_lan_init_done(struct wm_softc *sc)
   4014 {
   4015 	uint32_t reg = 0;
   4016 	int i;
   4017 
   4018 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4019 		device_xname(sc->sc_dev), __func__));
   4020 
   4021 	/* Wait for eeprom to reload */
   4022 	switch (sc->sc_type) {
   4023 	case WM_T_ICH10:
   4024 	case WM_T_PCH:
   4025 	case WM_T_PCH2:
   4026 	case WM_T_PCH_LPT:
   4027 	case WM_T_PCH_SPT:
   4028 	case WM_T_PCH_CNP:
   4029 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4030 			reg = CSR_READ(sc, WMREG_STATUS);
   4031 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4032 				break;
   4033 			delay(100);
   4034 		}
   4035 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4036 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4037 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4038 		}
   4039 		break;
   4040 	default:
   4041 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4042 		    __func__);
   4043 		break;
   4044 	}
   4045 
   4046 	reg &= ~STATUS_LAN_INIT_DONE;
   4047 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4048 }
   4049 
   4050 void
   4051 wm_get_cfg_done(struct wm_softc *sc)
   4052 {
   4053 	int mask;
   4054 	uint32_t reg;
   4055 	int i;
   4056 
   4057 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4058 		device_xname(sc->sc_dev), __func__));
   4059 
   4060 	/* Wait for eeprom to reload */
   4061 	switch (sc->sc_type) {
   4062 	case WM_T_82542_2_0:
   4063 	case WM_T_82542_2_1:
   4064 		/* null */
   4065 		break;
   4066 	case WM_T_82543:
   4067 	case WM_T_82544:
   4068 	case WM_T_82540:
   4069 	case WM_T_82545:
   4070 	case WM_T_82545_3:
   4071 	case WM_T_82546:
   4072 	case WM_T_82546_3:
   4073 	case WM_T_82541:
   4074 	case WM_T_82541_2:
   4075 	case WM_T_82547:
   4076 	case WM_T_82547_2:
   4077 	case WM_T_82573:
   4078 	case WM_T_82574:
   4079 	case WM_T_82583:
   4080 		/* generic */
   4081 		delay(10*1000);
   4082 		break;
   4083 	case WM_T_80003:
   4084 	case WM_T_82571:
   4085 	case WM_T_82572:
   4086 	case WM_T_82575:
   4087 	case WM_T_82576:
   4088 	case WM_T_82580:
   4089 	case WM_T_I350:
   4090 	case WM_T_I354:
   4091 	case WM_T_I210:
   4092 	case WM_T_I211:
   4093 		if (sc->sc_type == WM_T_82571) {
   4094 			/* Only 82571 shares port 0 */
   4095 			mask = EEMNGCTL_CFGDONE_0;
   4096 		} else
   4097 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4098 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4099 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4100 				break;
   4101 			delay(1000);
   4102 		}
   4103 		if (i >= WM_PHY_CFG_TIMEOUT)
   4104 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   4105 				device_xname(sc->sc_dev), __func__));
   4106 		break;
   4107 	case WM_T_ICH8:
   4108 	case WM_T_ICH9:
   4109 	case WM_T_ICH10:
   4110 	case WM_T_PCH:
   4111 	case WM_T_PCH2:
   4112 	case WM_T_PCH_LPT:
   4113 	case WM_T_PCH_SPT:
   4114 	case WM_T_PCH_CNP:
   4115 		delay(10*1000);
   4116 		if (sc->sc_type >= WM_T_ICH10)
   4117 			wm_lan_init_done(sc);
   4118 		else
   4119 			wm_get_auto_rd_done(sc);
   4120 
   4121 		/* Clear PHY Reset Asserted bit */
   4122 		reg = CSR_READ(sc, WMREG_STATUS);
   4123 		if ((reg & STATUS_PHYRA) != 0)
   4124 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4125 		break;
   4126 	default:
   4127 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4128 		    __func__);
   4129 		break;
   4130 	}
   4131 }
   4132 
   4133 int
   4134 wm_phy_post_reset(struct wm_softc *sc)
   4135 {
   4136 	device_t dev = sc->sc_dev;
   4137 	uint16_t reg;
   4138 	int rv = 0;
   4139 
   4140 	/* This function is only for ICH8 and newer. */
   4141 	if (sc->sc_type < WM_T_ICH8)
   4142 		return 0;
   4143 
   4144 	if (wm_phy_resetisblocked(sc)) {
   4145 		/* XXX */
   4146 		device_printf(dev, "PHY is blocked\n");
   4147 		return -1;
   4148 	}
   4149 
   4150 	/* Allow time for h/w to get to quiescent state after reset */
   4151 	delay(10*1000);
   4152 
   4153 	/* Perform any necessary post-reset workarounds */
   4154 	if (sc->sc_type == WM_T_PCH)
   4155 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4156 	else if (sc->sc_type == WM_T_PCH2)
   4157 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4158 	if (rv != 0)
   4159 		return rv;
   4160 
   4161 	/* Clear the host wakeup bit after lcd reset */
   4162 	if (sc->sc_type >= WM_T_PCH) {
   4163 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4164 		reg &= ~BM_WUC_HOST_WU_BIT;
   4165 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4166 	}
   4167 
   4168 	/* Configure the LCD with the extended configuration region in NVM */
   4169 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4170 		return rv;
   4171 
   4172 	/* Configure the LCD with the OEM bits in NVM */
   4173 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4174 
   4175 	if (sc->sc_type == WM_T_PCH2) {
   4176 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4177 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4178 			delay(10 * 1000);
   4179 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4180 		}
   4181 		/* Set EEE LPI Update Timer to 200usec */
   4182 		rv = sc->phy.acquire(sc);
   4183 		if (rv)
   4184 			return rv;
   4185 		rv = wm_write_emi_reg_locked(dev,
   4186 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4187 		sc->phy.release(sc);
   4188 	}
   4189 
   4190 	return rv;
   4191 }
   4192 
   4193 /* Only for PCH and newer */
   4194 static int
   4195 wm_write_smbus_addr(struct wm_softc *sc)
   4196 {
   4197 	uint32_t strap, freq;
   4198 	uint16_t phy_data;
   4199 	int rv;
   4200 
   4201 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4202 		device_xname(sc->sc_dev), __func__));
   4203 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4204 
   4205 	strap = CSR_READ(sc, WMREG_STRAP);
   4206 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4207 
   4208 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4209 	if (rv != 0)
   4210 		return -1;
   4211 
   4212 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4213 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4214 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4215 
   4216 	if (sc->sc_phytype == WMPHY_I217) {
   4217 		/* Restore SMBus frequency */
   4218 		if (freq --) {
   4219 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4220 			    | HV_SMB_ADDR_FREQ_HIGH);
   4221 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4222 			    HV_SMB_ADDR_FREQ_LOW);
   4223 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4224 			    HV_SMB_ADDR_FREQ_HIGH);
   4225 		} else
   4226 			DPRINTF(WM_DEBUG_INIT,
   4227 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4228 				device_xname(sc->sc_dev), __func__));
   4229 	}
   4230 
   4231 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4232 	    phy_data);
   4233 }
   4234 
   4235 static int
   4236 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4237 {
   4238 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4239 	uint16_t phy_page = 0;
   4240 	int rv = 0;
   4241 
   4242 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4243 		device_xname(sc->sc_dev), __func__));
   4244 
   4245 	switch (sc->sc_type) {
   4246 	case WM_T_ICH8:
   4247 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4248 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4249 			return 0;
   4250 
   4251 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4252 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4253 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4254 			break;
   4255 		}
   4256 		/* FALLTHROUGH */
   4257 	case WM_T_PCH:
   4258 	case WM_T_PCH2:
   4259 	case WM_T_PCH_LPT:
   4260 	case WM_T_PCH_SPT:
   4261 	case WM_T_PCH_CNP:
   4262 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4263 		break;
   4264 	default:
   4265 		return 0;
   4266 	}
   4267 
   4268 	if ((rv = sc->phy.acquire(sc)) != 0)
   4269 		return rv;
   4270 
   4271 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4272 	if ((reg & sw_cfg_mask) == 0)
   4273 		goto release;
   4274 
   4275 	/*
   4276 	 * Make sure HW does not configure LCD from PHY extended configuration
   4277 	 * before SW configuration
   4278 	 */
   4279 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4280 	if ((sc->sc_type < WM_T_PCH2)
   4281 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4282 		goto release;
   4283 
   4284 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4285 		device_xname(sc->sc_dev), __func__));
   4286 	/* word_addr is in DWORD */
   4287 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4288 
   4289 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4290 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4291 	if (cnf_size == 0)
   4292 		goto release;
   4293 
   4294 	if (((sc->sc_type == WM_T_PCH)
   4295 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4296 	    || (sc->sc_type > WM_T_PCH)) {
   4297 		/*
   4298 		 * HW configures the SMBus address and LEDs when the OEM and
   4299 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4300 		 * are cleared, SW will configure them instead.
   4301 		 */
   4302 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4303 			device_xname(sc->sc_dev), __func__));
   4304 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4305 			goto release;
   4306 
   4307 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4308 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4309 		    (uint16_t)reg);
   4310 		if (rv != 0)
   4311 			goto release;
   4312 	}
   4313 
   4314 	/* Configure LCD from extended configuration region. */
   4315 	for (i = 0; i < cnf_size; i++) {
   4316 		uint16_t reg_data, reg_addr;
   4317 
   4318 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4319 			goto release;
   4320 
   4321 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4322 			goto release;
   4323 
   4324 		if (reg_addr == IGPHY_PAGE_SELECT)
   4325 			phy_page = reg_data;
   4326 
   4327 		reg_addr &= IGPHY_MAXREGADDR;
   4328 		reg_addr |= phy_page;
   4329 
   4330 		KASSERT(sc->phy.writereg_locked != NULL);
   4331 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4332 		    reg_data);
   4333 	}
   4334 
   4335 release:
   4336 	sc->phy.release(sc);
   4337 	return rv;
   4338 }
   4339 
   4340 /*
   4341  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4342  *  @sc:       pointer to the HW structure
   4343  *  @d0_state: boolean if entering d0 or d3 device state
   4344  *
   4345  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4346  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4347  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4348  */
   4349 int
   4350 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4351 {
   4352 	uint32_t mac_reg;
   4353 	uint16_t oem_reg;
   4354 	int rv;
   4355 
   4356 	if (sc->sc_type < WM_T_PCH)
   4357 		return 0;
   4358 
   4359 	rv = sc->phy.acquire(sc);
   4360 	if (rv != 0)
   4361 		return rv;
   4362 
   4363 	if (sc->sc_type == WM_T_PCH) {
   4364 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4365 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4366 			goto release;
   4367 	}
   4368 
   4369 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4370 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4371 		goto release;
   4372 
   4373 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4374 
   4375 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4376 	if (rv != 0)
   4377 		goto release;
   4378 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4379 
   4380 	if (d0_state) {
   4381 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4382 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4383 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4384 			oem_reg |= HV_OEM_BITS_LPLU;
   4385 	} else {
   4386 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4387 		    != 0)
   4388 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4389 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4390 		    != 0)
   4391 			oem_reg |= HV_OEM_BITS_LPLU;
   4392 	}
   4393 
   4394 	/* Set Restart auto-neg to activate the bits */
   4395 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4396 	    && (wm_phy_resetisblocked(sc) == false))
   4397 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4398 
   4399 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4400 
   4401 release:
   4402 	sc->phy.release(sc);
   4403 
   4404 	return rv;
   4405 }
   4406 
   4407 /* Init hardware bits */
   4408 void
   4409 wm_initialize_hardware_bits(struct wm_softc *sc)
   4410 {
   4411 	uint32_t tarc0, tarc1, reg;
   4412 
   4413 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4414 		device_xname(sc->sc_dev), __func__));
   4415 
   4416 	/* For 82571 variant, 80003 and ICHs */
   4417 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4418 	    || (sc->sc_type >= WM_T_80003)) {
   4419 
   4420 		/* Transmit Descriptor Control 0 */
   4421 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4422 		reg |= TXDCTL_COUNT_DESC;
   4423 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4424 
   4425 		/* Transmit Descriptor Control 1 */
   4426 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4427 		reg |= TXDCTL_COUNT_DESC;
   4428 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4429 
   4430 		/* TARC0 */
   4431 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4432 		switch (sc->sc_type) {
   4433 		case WM_T_82571:
   4434 		case WM_T_82572:
   4435 		case WM_T_82573:
   4436 		case WM_T_82574:
   4437 		case WM_T_82583:
   4438 		case WM_T_80003:
   4439 			/* Clear bits 30..27 */
   4440 			tarc0 &= ~__BITS(30, 27);
   4441 			break;
   4442 		default:
   4443 			break;
   4444 		}
   4445 
   4446 		switch (sc->sc_type) {
   4447 		case WM_T_82571:
   4448 		case WM_T_82572:
   4449 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4450 
   4451 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4452 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4453 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4454 			/* 8257[12] Errata No.7 */
   4455 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4456 
   4457 			/* TARC1 bit 28 */
   4458 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4459 				tarc1 &= ~__BIT(28);
   4460 			else
   4461 				tarc1 |= __BIT(28);
   4462 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4463 
   4464 			/*
   4465 			 * 8257[12] Errata No.13
   4466 			 * Disable Dyamic Clock Gating.
   4467 			 */
   4468 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4469 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4470 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4471 			break;
   4472 		case WM_T_82573:
   4473 		case WM_T_82574:
   4474 		case WM_T_82583:
   4475 			if ((sc->sc_type == WM_T_82574)
   4476 			    || (sc->sc_type == WM_T_82583))
   4477 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4478 
   4479 			/* Extended Device Control */
   4480 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4481 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4482 			reg |= __BIT(22);	/* Set bit 22 */
   4483 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4484 
   4485 			/* Device Control */
   4486 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4487 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4488 
   4489 			/* PCIe Control Register */
   4490 			/*
   4491 			 * 82573 Errata (unknown).
   4492 			 *
   4493 			 * 82574 Errata 25 and 82583 Errata 12
   4494 			 * "Dropped Rx Packets":
   4495 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4496 			 */
   4497 			reg = CSR_READ(sc, WMREG_GCR);
   4498 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4499 			CSR_WRITE(sc, WMREG_GCR, reg);
   4500 
   4501 			if ((sc->sc_type == WM_T_82574)
   4502 			    || (sc->sc_type == WM_T_82583)) {
   4503 				/*
   4504 				 * Document says this bit must be set for
   4505 				 * proper operation.
   4506 				 */
   4507 				reg = CSR_READ(sc, WMREG_GCR);
   4508 				reg |= __BIT(22);
   4509 				CSR_WRITE(sc, WMREG_GCR, reg);
   4510 
   4511 				/*
   4512 				 * Apply workaround for hardware errata
   4513 				 * documented in errata docs Fixes issue where
   4514 				 * some error prone or unreliable PCIe
   4515 				 * completions are occurring, particularly
   4516 				 * with ASPM enabled. Without fix, issue can
   4517 				 * cause Tx timeouts.
   4518 				 */
   4519 				reg = CSR_READ(sc, WMREG_GCR2);
   4520 				reg |= __BIT(0);
   4521 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4522 			}
   4523 			break;
   4524 		case WM_T_80003:
   4525 			/* TARC0 */
   4526 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4527 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4528 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4529 
   4530 			/* TARC1 bit 28 */
   4531 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4532 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4533 				tarc1 &= ~__BIT(28);
   4534 			else
   4535 				tarc1 |= __BIT(28);
   4536 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4537 			break;
   4538 		case WM_T_ICH8:
   4539 		case WM_T_ICH9:
   4540 		case WM_T_ICH10:
   4541 		case WM_T_PCH:
   4542 		case WM_T_PCH2:
   4543 		case WM_T_PCH_LPT:
   4544 		case WM_T_PCH_SPT:
   4545 		case WM_T_PCH_CNP:
   4546 			/* TARC0 */
   4547 			if (sc->sc_type == WM_T_ICH8) {
   4548 				/* Set TARC0 bits 29 and 28 */
   4549 				tarc0 |= __BITS(29, 28);
   4550 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4551 				tarc0 |= __BIT(29);
   4552 				/*
   4553 				 *  Drop bit 28. From Linux.
   4554 				 * See I218/I219 spec update
   4555 				 * "5. Buffer Overrun While the I219 is
   4556 				 * Processing DMA Transactions"
   4557 				 */
   4558 				tarc0 &= ~__BIT(28);
   4559 			}
   4560 			/* Set TARC0 bits 23,24,26,27 */
   4561 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4562 
   4563 			/* CTRL_EXT */
   4564 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4565 			reg |= __BIT(22);	/* Set bit 22 */
   4566 			/*
   4567 			 * Enable PHY low-power state when MAC is at D3
   4568 			 * w/o WoL
   4569 			 */
   4570 			if (sc->sc_type >= WM_T_PCH)
   4571 				reg |= CTRL_EXT_PHYPDEN;
   4572 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4573 
   4574 			/* TARC1 */
   4575 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4576 			/* bit 28 */
   4577 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4578 				tarc1 &= ~__BIT(28);
   4579 			else
   4580 				tarc1 |= __BIT(28);
   4581 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4582 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4583 
   4584 			/* Device Status */
   4585 			if (sc->sc_type == WM_T_ICH8) {
   4586 				reg = CSR_READ(sc, WMREG_STATUS);
   4587 				reg &= ~__BIT(31);
   4588 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4589 
   4590 			}
   4591 
   4592 			/* IOSFPC */
   4593 			if (sc->sc_type == WM_T_PCH_SPT) {
   4594 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4595 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4596 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4597 			}
   4598 			/*
   4599 			 * Work-around descriptor data corruption issue during
   4600 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4601 			 * capability.
   4602 			 */
   4603 			reg = CSR_READ(sc, WMREG_RFCTL);
   4604 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4605 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4606 			break;
   4607 		default:
   4608 			break;
   4609 		}
   4610 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4611 
   4612 		switch (sc->sc_type) {
   4613 		/*
   4614 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4615 		 * Avoid RSS Hash Value bug.
   4616 		 */
   4617 		case WM_T_82571:
   4618 		case WM_T_82572:
   4619 		case WM_T_82573:
   4620 		case WM_T_80003:
   4621 		case WM_T_ICH8:
   4622 			reg = CSR_READ(sc, WMREG_RFCTL);
   4623 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4624 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4625 			break;
   4626 		case WM_T_82574:
   4627 			/* Use extened Rx descriptor. */
   4628 			reg = CSR_READ(sc, WMREG_RFCTL);
   4629 			reg |= WMREG_RFCTL_EXSTEN;
   4630 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4631 			break;
   4632 		default:
   4633 			break;
   4634 		}
   4635 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4636 		/*
   4637 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4638 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4639 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4640 		 * Correctly by the Device"
   4641 		 *
   4642 		 * I354(C2000) Errata AVR53:
   4643 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4644 		 * Hang"
   4645 		 */
   4646 		reg = CSR_READ(sc, WMREG_RFCTL);
   4647 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4648 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4649 	}
   4650 }
   4651 
   4652 static uint32_t
   4653 wm_rxpbs_adjust_82580(uint32_t val)
   4654 {
   4655 	uint32_t rv = 0;
   4656 
   4657 	if (val < __arraycount(wm_82580_rxpbs_table))
   4658 		rv = wm_82580_rxpbs_table[val];
   4659 
   4660 	return rv;
   4661 }
   4662 
   4663 /*
   4664  * wm_reset_phy:
   4665  *
   4666  *	generic PHY reset function.
   4667  *	Same as e1000_phy_hw_reset_generic()
   4668  */
   4669 static int
   4670 wm_reset_phy(struct wm_softc *sc)
   4671 {
   4672 	uint32_t reg;
   4673 
   4674 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4675 		device_xname(sc->sc_dev), __func__));
   4676 	if (wm_phy_resetisblocked(sc))
   4677 		return -1;
   4678 
   4679 	sc->phy.acquire(sc);
   4680 
   4681 	reg = CSR_READ(sc, WMREG_CTRL);
   4682 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4683 	CSR_WRITE_FLUSH(sc);
   4684 
   4685 	delay(sc->phy.reset_delay_us);
   4686 
   4687 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4688 	CSR_WRITE_FLUSH(sc);
   4689 
   4690 	delay(150);
   4691 
   4692 	sc->phy.release(sc);
   4693 
   4694 	wm_get_cfg_done(sc);
   4695 	wm_phy_post_reset(sc);
   4696 
   4697 	return 0;
   4698 }
   4699 
   4700 /*
   4701  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4702  * so it is enough to check sc->sc_queue[0] only.
   4703  */
   4704 static void
   4705 wm_flush_desc_rings(struct wm_softc *sc)
   4706 {
   4707 	pcireg_t preg;
   4708 	uint32_t reg;
   4709 	struct wm_txqueue *txq;
   4710 	wiseman_txdesc_t *txd;
   4711 	int nexttx;
   4712 	uint32_t rctl;
   4713 
   4714 	/* First, disable MULR fix in FEXTNVM11 */
   4715 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4716 	reg |= FEXTNVM11_DIS_MULRFIX;
   4717 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4718 
   4719 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4720 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4721 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4722 		return;
   4723 
   4724 	/* TX */
   4725 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
   4726 	    preg, reg);
   4727 	reg = CSR_READ(sc, WMREG_TCTL);
   4728 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4729 
   4730 	txq = &sc->sc_queue[0].wmq_txq;
   4731 	nexttx = txq->txq_next;
   4732 	txd = &txq->txq_descs[nexttx];
   4733 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4734 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4735 	txd->wtx_fields.wtxu_status = 0;
   4736 	txd->wtx_fields.wtxu_options = 0;
   4737 	txd->wtx_fields.wtxu_vlan = 0;
   4738 
   4739 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4740 	    BUS_SPACE_BARRIER_WRITE);
   4741 
   4742 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4743 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4744 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4745 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4746 	delay(250);
   4747 
   4748 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4749 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4750 		return;
   4751 
   4752 	/* RX */
   4753 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4754 	rctl = CSR_READ(sc, WMREG_RCTL);
   4755 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4756 	CSR_WRITE_FLUSH(sc);
   4757 	delay(150);
   4758 
   4759 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4760 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4761 	reg &= 0xffffc000;
   4762 	/*
   4763 	 * Update thresholds: prefetch threshold to 31, host threshold
   4764 	 * to 1 and make sure the granularity is "descriptors" and not
   4765 	 * "cache lines"
   4766 	 */
   4767 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4768 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4769 
   4770 	/* Momentarily enable the RX ring for the changes to take effect */
   4771 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4772 	CSR_WRITE_FLUSH(sc);
   4773 	delay(150);
   4774 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4775 }
   4776 
   4777 /*
   4778  * wm_reset:
   4779  *
   4780  *	Reset the i82542 chip.
   4781  */
   4782 static void
   4783 wm_reset(struct wm_softc *sc)
   4784 {
   4785 	int phy_reset = 0;
   4786 	int i, error = 0;
   4787 	uint32_t reg;
   4788 	uint16_t kmreg;
   4789 	int rv;
   4790 
   4791 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4792 		device_xname(sc->sc_dev), __func__));
   4793 	KASSERT(sc->sc_type != 0);
   4794 
   4795 	/*
   4796 	 * Allocate on-chip memory according to the MTU size.
   4797 	 * The Packet Buffer Allocation register must be written
   4798 	 * before the chip is reset.
   4799 	 */
   4800 	switch (sc->sc_type) {
   4801 	case WM_T_82547:
   4802 	case WM_T_82547_2:
   4803 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4804 		    PBA_22K : PBA_30K;
   4805 		for (i = 0; i < sc->sc_nqueues; i++) {
   4806 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4807 			txq->txq_fifo_head = 0;
   4808 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4809 			txq->txq_fifo_size =
   4810 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4811 			txq->txq_fifo_stall = 0;
   4812 		}
   4813 		break;
   4814 	case WM_T_82571:
   4815 	case WM_T_82572:
   4816 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4817 	case WM_T_80003:
   4818 		sc->sc_pba = PBA_32K;
   4819 		break;
   4820 	case WM_T_82573:
   4821 		sc->sc_pba = PBA_12K;
   4822 		break;
   4823 	case WM_T_82574:
   4824 	case WM_T_82583:
   4825 		sc->sc_pba = PBA_20K;
   4826 		break;
   4827 	case WM_T_82576:
   4828 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4829 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4830 		break;
   4831 	case WM_T_82580:
   4832 	case WM_T_I350:
   4833 	case WM_T_I354:
   4834 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4835 		break;
   4836 	case WM_T_I210:
   4837 	case WM_T_I211:
   4838 		sc->sc_pba = PBA_34K;
   4839 		break;
   4840 	case WM_T_ICH8:
   4841 		/* Workaround for a bit corruption issue in FIFO memory */
   4842 		sc->sc_pba = PBA_8K;
   4843 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4844 		break;
   4845 	case WM_T_ICH9:
   4846 	case WM_T_ICH10:
   4847 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4848 		    PBA_14K : PBA_10K;
   4849 		break;
   4850 	case WM_T_PCH:
   4851 	case WM_T_PCH2:	/* XXX 14K? */
   4852 	case WM_T_PCH_LPT:
   4853 	case WM_T_PCH_SPT:
   4854 	case WM_T_PCH_CNP:
   4855 		sc->sc_pba = PBA_26K;
   4856 		break;
   4857 	default:
   4858 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4859 		    PBA_40K : PBA_48K;
   4860 		break;
   4861 	}
   4862 	/*
   4863 	 * Only old or non-multiqueue devices have the PBA register
   4864 	 * XXX Need special handling for 82575.
   4865 	 */
   4866 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4867 	    || (sc->sc_type == WM_T_82575))
   4868 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4869 
   4870 	/* Prevent the PCI-E bus from sticking */
   4871 	if (sc->sc_flags & WM_F_PCIE) {
   4872 		int timeout = 800;
   4873 
   4874 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4875 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4876 
   4877 		while (timeout--) {
   4878 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4879 			    == 0)
   4880 				break;
   4881 			delay(100);
   4882 		}
   4883 		if (timeout == 0)
   4884 			device_printf(sc->sc_dev,
   4885 			    "failed to disable busmastering\n");
   4886 	}
   4887 
   4888 	/* Set the completion timeout for interface */
   4889 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4890 	    || (sc->sc_type == WM_T_82580)
   4891 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4892 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4893 		wm_set_pcie_completion_timeout(sc);
   4894 
   4895 	/* Clear interrupt */
   4896 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4897 	if (wm_is_using_msix(sc)) {
   4898 		if (sc->sc_type != WM_T_82574) {
   4899 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4900 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4901 		} else
   4902 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4903 	}
   4904 
   4905 	/* Stop the transmit and receive processes. */
   4906 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4907 	sc->sc_rctl &= ~RCTL_EN;
   4908 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4909 	CSR_WRITE_FLUSH(sc);
   4910 
   4911 	/* XXX set_tbi_sbp_82543() */
   4912 
   4913 	delay(10*1000);
   4914 
   4915 	/* Must acquire the MDIO ownership before MAC reset */
   4916 	switch (sc->sc_type) {
   4917 	case WM_T_82573:
   4918 	case WM_T_82574:
   4919 	case WM_T_82583:
   4920 		error = wm_get_hw_semaphore_82573(sc);
   4921 		break;
   4922 	default:
   4923 		break;
   4924 	}
   4925 
   4926 	/*
   4927 	 * 82541 Errata 29? & 82547 Errata 28?
   4928 	 * See also the description about PHY_RST bit in CTRL register
   4929 	 * in 8254x_GBe_SDM.pdf.
   4930 	 */
   4931 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4932 		CSR_WRITE(sc, WMREG_CTRL,
   4933 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4934 		CSR_WRITE_FLUSH(sc);
   4935 		delay(5000);
   4936 	}
   4937 
   4938 	switch (sc->sc_type) {
   4939 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4940 	case WM_T_82541:
   4941 	case WM_T_82541_2:
   4942 	case WM_T_82547:
   4943 	case WM_T_82547_2:
   4944 		/*
   4945 		 * On some chipsets, a reset through a memory-mapped write
   4946 		 * cycle can cause the chip to reset before completing the
   4947 		 * write cycle. This causes major headache that can be avoided
   4948 		 * by issuing the reset via indirect register writes through
   4949 		 * I/O space.
   4950 		 *
   4951 		 * So, if we successfully mapped the I/O BAR at attach time,
   4952 		 * use that. Otherwise, try our luck with a memory-mapped
   4953 		 * reset.
   4954 		 */
   4955 		if (sc->sc_flags & WM_F_IOH_VALID)
   4956 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4957 		else
   4958 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4959 		break;
   4960 	case WM_T_82545_3:
   4961 	case WM_T_82546_3:
   4962 		/* Use the shadow control register on these chips. */
   4963 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4964 		break;
   4965 	case WM_T_80003:
   4966 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4967 		sc->phy.acquire(sc);
   4968 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4969 		sc->phy.release(sc);
   4970 		break;
   4971 	case WM_T_ICH8:
   4972 	case WM_T_ICH9:
   4973 	case WM_T_ICH10:
   4974 	case WM_T_PCH:
   4975 	case WM_T_PCH2:
   4976 	case WM_T_PCH_LPT:
   4977 	case WM_T_PCH_SPT:
   4978 	case WM_T_PCH_CNP:
   4979 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4980 		if (wm_phy_resetisblocked(sc) == false) {
   4981 			/*
   4982 			 * Gate automatic PHY configuration by hardware on
   4983 			 * non-managed 82579
   4984 			 */
   4985 			if ((sc->sc_type == WM_T_PCH2)
   4986 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4987 				== 0))
   4988 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4989 
   4990 			reg |= CTRL_PHY_RESET;
   4991 			phy_reset = 1;
   4992 		} else
   4993 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   4994 		sc->phy.acquire(sc);
   4995 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4996 		/* Don't insert a completion barrier when reset */
   4997 		delay(20*1000);
   4998 		mutex_exit(sc->sc_ich_phymtx);
   4999 		break;
   5000 	case WM_T_82580:
   5001 	case WM_T_I350:
   5002 	case WM_T_I354:
   5003 	case WM_T_I210:
   5004 	case WM_T_I211:
   5005 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5006 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5007 			CSR_WRITE_FLUSH(sc);
   5008 		delay(5000);
   5009 		break;
   5010 	case WM_T_82542_2_0:
   5011 	case WM_T_82542_2_1:
   5012 	case WM_T_82543:
   5013 	case WM_T_82540:
   5014 	case WM_T_82545:
   5015 	case WM_T_82546:
   5016 	case WM_T_82571:
   5017 	case WM_T_82572:
   5018 	case WM_T_82573:
   5019 	case WM_T_82574:
   5020 	case WM_T_82575:
   5021 	case WM_T_82576:
   5022 	case WM_T_82583:
   5023 	default:
   5024 		/* Everything else can safely use the documented method. */
   5025 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5026 		break;
   5027 	}
   5028 
   5029 	/* Must release the MDIO ownership after MAC reset */
   5030 	switch (sc->sc_type) {
   5031 	case WM_T_82573:
   5032 	case WM_T_82574:
   5033 	case WM_T_82583:
   5034 		if (error == 0)
   5035 			wm_put_hw_semaphore_82573(sc);
   5036 		break;
   5037 	default:
   5038 		break;
   5039 	}
   5040 
   5041 	/* Set Phy Config Counter to 50msec */
   5042 	if (sc->sc_type == WM_T_PCH2) {
   5043 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5044 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5045 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5046 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5047 	}
   5048 
   5049 	if (phy_reset != 0)
   5050 		wm_get_cfg_done(sc);
   5051 
   5052 	/* Reload EEPROM */
   5053 	switch (sc->sc_type) {
   5054 	case WM_T_82542_2_0:
   5055 	case WM_T_82542_2_1:
   5056 	case WM_T_82543:
   5057 	case WM_T_82544:
   5058 		delay(10);
   5059 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5060 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5061 		CSR_WRITE_FLUSH(sc);
   5062 		delay(2000);
   5063 		break;
   5064 	case WM_T_82540:
   5065 	case WM_T_82545:
   5066 	case WM_T_82545_3:
   5067 	case WM_T_82546:
   5068 	case WM_T_82546_3:
   5069 		delay(5*1000);
   5070 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5071 		break;
   5072 	case WM_T_82541:
   5073 	case WM_T_82541_2:
   5074 	case WM_T_82547:
   5075 	case WM_T_82547_2:
   5076 		delay(20000);
   5077 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5078 		break;
   5079 	case WM_T_82571:
   5080 	case WM_T_82572:
   5081 	case WM_T_82573:
   5082 	case WM_T_82574:
   5083 	case WM_T_82583:
   5084 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5085 			delay(10);
   5086 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5087 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5088 			CSR_WRITE_FLUSH(sc);
   5089 		}
   5090 		/* check EECD_EE_AUTORD */
   5091 		wm_get_auto_rd_done(sc);
   5092 		/*
   5093 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5094 		 * is set.
   5095 		 */
   5096 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5097 		    || (sc->sc_type == WM_T_82583))
   5098 			delay(25*1000);
   5099 		break;
   5100 	case WM_T_82575:
   5101 	case WM_T_82576:
   5102 	case WM_T_82580:
   5103 	case WM_T_I350:
   5104 	case WM_T_I354:
   5105 	case WM_T_I210:
   5106 	case WM_T_I211:
   5107 	case WM_T_80003:
   5108 		/* check EECD_EE_AUTORD */
   5109 		wm_get_auto_rd_done(sc);
   5110 		break;
   5111 	case WM_T_ICH8:
   5112 	case WM_T_ICH9:
   5113 	case WM_T_ICH10:
   5114 	case WM_T_PCH:
   5115 	case WM_T_PCH2:
   5116 	case WM_T_PCH_LPT:
   5117 	case WM_T_PCH_SPT:
   5118 	case WM_T_PCH_CNP:
   5119 		break;
   5120 	default:
   5121 		panic("%s: unknown type\n", __func__);
   5122 	}
   5123 
   5124 	/* Check whether EEPROM is present or not */
   5125 	switch (sc->sc_type) {
   5126 	case WM_T_82575:
   5127 	case WM_T_82576:
   5128 	case WM_T_82580:
   5129 	case WM_T_I350:
   5130 	case WM_T_I354:
   5131 	case WM_T_ICH8:
   5132 	case WM_T_ICH9:
   5133 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5134 			/* Not found */
   5135 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5136 			if (sc->sc_type == WM_T_82575)
   5137 				wm_reset_init_script_82575(sc);
   5138 		}
   5139 		break;
   5140 	default:
   5141 		break;
   5142 	}
   5143 
   5144 	if (phy_reset != 0)
   5145 		wm_phy_post_reset(sc);
   5146 
   5147 	if ((sc->sc_type == WM_T_82580)
   5148 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5149 		/* Clear global device reset status bit */
   5150 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5151 	}
   5152 
   5153 	/* Clear any pending interrupt events. */
   5154 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5155 	reg = CSR_READ(sc, WMREG_ICR);
   5156 	if (wm_is_using_msix(sc)) {
   5157 		if (sc->sc_type != WM_T_82574) {
   5158 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5159 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5160 		} else
   5161 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5162 	}
   5163 
   5164 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5165 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5166 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5167 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5168 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5169 		reg |= KABGTXD_BGSQLBIAS;
   5170 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5171 	}
   5172 
   5173 	/* Reload sc_ctrl */
   5174 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5175 
   5176 	wm_set_eee(sc);
   5177 
   5178 	/*
   5179 	 * For PCH, this write will make sure that any noise will be detected
   5180 	 * as a CRC error and be dropped rather than show up as a bad packet
   5181 	 * to the DMA engine
   5182 	 */
   5183 	if (sc->sc_type == WM_T_PCH)
   5184 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5185 
   5186 	if (sc->sc_type >= WM_T_82544)
   5187 		CSR_WRITE(sc, WMREG_WUC, 0);
   5188 
   5189 	if (sc->sc_type < WM_T_82575)
   5190 		wm_disable_aspm(sc); /* Workaround for some chips */
   5191 
   5192 	wm_reset_mdicnfg_82580(sc);
   5193 
   5194 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5195 		wm_pll_workaround_i210(sc);
   5196 
   5197 	if (sc->sc_type == WM_T_80003) {
   5198 		/* Default to TRUE to enable the MDIC W/A */
   5199 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5200 
   5201 		rv = wm_kmrn_readreg(sc,
   5202 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5203 		if (rv == 0) {
   5204 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5205 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5206 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5207 			else
   5208 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5209 		}
   5210 	}
   5211 }
   5212 
   5213 /*
   5214  * wm_add_rxbuf:
   5215  *
   5216  *	Add a receive buffer to the indiciated descriptor.
   5217  */
   5218 static int
   5219 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5220 {
   5221 	struct wm_softc *sc = rxq->rxq_sc;
   5222 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5223 	struct mbuf *m;
   5224 	int error;
   5225 
   5226 	KASSERT(mutex_owned(rxq->rxq_lock));
   5227 
   5228 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5229 	if (m == NULL)
   5230 		return ENOBUFS;
   5231 
   5232 	MCLGET(m, M_DONTWAIT);
   5233 	if ((m->m_flags & M_EXT) == 0) {
   5234 		m_freem(m);
   5235 		return ENOBUFS;
   5236 	}
   5237 
   5238 	if (rxs->rxs_mbuf != NULL)
   5239 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5240 
   5241 	rxs->rxs_mbuf = m;
   5242 
   5243 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5244 	/*
   5245 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5246 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5247 	 */
   5248 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5249 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5250 	if (error) {
   5251 		/* XXX XXX XXX */
   5252 		aprint_error_dev(sc->sc_dev,
   5253 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5254 		panic("wm_add_rxbuf");
   5255 	}
   5256 
   5257 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5258 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5259 
   5260 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5261 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5262 			wm_init_rxdesc(rxq, idx);
   5263 	} else
   5264 		wm_init_rxdesc(rxq, idx);
   5265 
   5266 	return 0;
   5267 }
   5268 
   5269 /*
   5270  * wm_rxdrain:
   5271  *
   5272  *	Drain the receive queue.
   5273  */
   5274 static void
   5275 wm_rxdrain(struct wm_rxqueue *rxq)
   5276 {
   5277 	struct wm_softc *sc = rxq->rxq_sc;
   5278 	struct wm_rxsoft *rxs;
   5279 	int i;
   5280 
   5281 	KASSERT(mutex_owned(rxq->rxq_lock));
   5282 
   5283 	for (i = 0; i < WM_NRXDESC; i++) {
   5284 		rxs = &rxq->rxq_soft[i];
   5285 		if (rxs->rxs_mbuf != NULL) {
   5286 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5287 			m_freem(rxs->rxs_mbuf);
   5288 			rxs->rxs_mbuf = NULL;
   5289 		}
   5290 	}
   5291 }
   5292 
   5293 /*
   5294  * Setup registers for RSS.
   5295  *
   5296  * XXX not yet VMDq support
   5297  */
   5298 static void
   5299 wm_init_rss(struct wm_softc *sc)
   5300 {
   5301 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5302 	int i;
   5303 
   5304 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5305 
   5306 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5307 		unsigned int qid, reta_ent;
   5308 
   5309 		qid  = i % sc->sc_nqueues;
   5310 		switch (sc->sc_type) {
   5311 		case WM_T_82574:
   5312 			reta_ent = __SHIFTIN(qid,
   5313 			    RETA_ENT_QINDEX_MASK_82574);
   5314 			break;
   5315 		case WM_T_82575:
   5316 			reta_ent = __SHIFTIN(qid,
   5317 			    RETA_ENT_QINDEX1_MASK_82575);
   5318 			break;
   5319 		default:
   5320 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5321 			break;
   5322 		}
   5323 
   5324 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5325 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5326 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5327 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5328 	}
   5329 
   5330 	rss_getkey((uint8_t *)rss_key);
   5331 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5332 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5333 
   5334 	if (sc->sc_type == WM_T_82574)
   5335 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5336 	else
   5337 		mrqc = MRQC_ENABLE_RSS_MQ;
   5338 
   5339 	/*
   5340 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5341 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5342 	 */
   5343 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5344 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5345 #if 0
   5346 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5347 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5348 #endif
   5349 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5350 
   5351 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5352 }
   5353 
   5354 /*
   5355  * Adjust TX and RX queue numbers which the system actulally uses.
   5356  *
   5357  * The numbers are affected by below parameters.
   5358  *     - The nubmer of hardware queues
   5359  *     - The number of MSI-X vectors (= "nvectors" argument)
   5360  *     - ncpu
   5361  */
   5362 static void
   5363 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5364 {
   5365 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5366 
   5367 	if (nvectors < 2) {
   5368 		sc->sc_nqueues = 1;
   5369 		return;
   5370 	}
   5371 
   5372 	switch (sc->sc_type) {
   5373 	case WM_T_82572:
   5374 		hw_ntxqueues = 2;
   5375 		hw_nrxqueues = 2;
   5376 		break;
   5377 	case WM_T_82574:
   5378 		hw_ntxqueues = 2;
   5379 		hw_nrxqueues = 2;
   5380 		break;
   5381 	case WM_T_82575:
   5382 		hw_ntxqueues = 4;
   5383 		hw_nrxqueues = 4;
   5384 		break;
   5385 	case WM_T_82576:
   5386 		hw_ntxqueues = 16;
   5387 		hw_nrxqueues = 16;
   5388 		break;
   5389 	case WM_T_82580:
   5390 	case WM_T_I350:
   5391 	case WM_T_I354:
   5392 		hw_ntxqueues = 8;
   5393 		hw_nrxqueues = 8;
   5394 		break;
   5395 	case WM_T_I210:
   5396 		hw_ntxqueues = 4;
   5397 		hw_nrxqueues = 4;
   5398 		break;
   5399 	case WM_T_I211:
   5400 		hw_ntxqueues = 2;
   5401 		hw_nrxqueues = 2;
   5402 		break;
   5403 		/*
   5404 		 * As below ethernet controllers does not support MSI-X,
   5405 		 * this driver let them not use multiqueue.
   5406 		 *     - WM_T_80003
   5407 		 *     - WM_T_ICH8
   5408 		 *     - WM_T_ICH9
   5409 		 *     - WM_T_ICH10
   5410 		 *     - WM_T_PCH
   5411 		 *     - WM_T_PCH2
   5412 		 *     - WM_T_PCH_LPT
   5413 		 */
   5414 	default:
   5415 		hw_ntxqueues = 1;
   5416 		hw_nrxqueues = 1;
   5417 		break;
   5418 	}
   5419 
   5420 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5421 
   5422 	/*
   5423 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5424 	 * the number of queues used actually.
   5425 	 */
   5426 	if (nvectors < hw_nqueues + 1)
   5427 		sc->sc_nqueues = nvectors - 1;
   5428 	else
   5429 		sc->sc_nqueues = hw_nqueues;
   5430 
   5431 	/*
   5432 	 * As queues more then cpus cannot improve scaling, we limit
   5433 	 * the number of queues used actually.
   5434 	 */
   5435 	if (ncpu < sc->sc_nqueues)
   5436 		sc->sc_nqueues = ncpu;
   5437 }
   5438 
   5439 static inline bool
   5440 wm_is_using_msix(struct wm_softc *sc)
   5441 {
   5442 
   5443 	return (sc->sc_nintrs > 1);
   5444 }
   5445 
   5446 static inline bool
   5447 wm_is_using_multiqueue(struct wm_softc *sc)
   5448 {
   5449 
   5450 	return (sc->sc_nqueues > 1);
   5451 }
   5452 
   5453 static int
   5454 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   5455 {
   5456 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5457 
   5458 	wmq->wmq_id = qidx;
   5459 	wmq->wmq_intr_idx = intr_idx;
   5460 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
   5461 	    wm_handle_queue, wmq);
   5462 	if (wmq->wmq_si != NULL)
   5463 		return 0;
   5464 
   5465 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5466 	    wmq->wmq_id);
   5467 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5468 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5469 	return ENOMEM;
   5470 }
   5471 
   5472 /*
   5473  * Both single interrupt MSI and INTx can use this function.
   5474  */
   5475 static int
   5476 wm_setup_legacy(struct wm_softc *sc)
   5477 {
   5478 	pci_chipset_tag_t pc = sc->sc_pc;
   5479 	const char *intrstr = NULL;
   5480 	char intrbuf[PCI_INTRSTR_LEN];
   5481 	int error;
   5482 
   5483 	error = wm_alloc_txrx_queues(sc);
   5484 	if (error) {
   5485 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5486 		    error);
   5487 		return ENOMEM;
   5488 	}
   5489 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5490 	    sizeof(intrbuf));
   5491 #ifdef WM_MPSAFE
   5492 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5493 #endif
   5494 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5495 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5496 	if (sc->sc_ihs[0] == NULL) {
   5497 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5498 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5499 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5500 		return ENOMEM;
   5501 	}
   5502 
   5503 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5504 	sc->sc_nintrs = 1;
   5505 
   5506 	return wm_softint_establish_queue(sc, 0, 0);
   5507 }
   5508 
   5509 static int
   5510 wm_setup_msix(struct wm_softc *sc)
   5511 {
   5512 	void *vih;
   5513 	kcpuset_t *affinity;
   5514 	int qidx, error, intr_idx, txrx_established;
   5515 	pci_chipset_tag_t pc = sc->sc_pc;
   5516 	const char *intrstr = NULL;
   5517 	char intrbuf[PCI_INTRSTR_LEN];
   5518 	char intr_xname[INTRDEVNAMEBUF];
   5519 
   5520 	if (sc->sc_nqueues < ncpu) {
   5521 		/*
   5522 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5523 		 * interrupts start from CPU#1.
   5524 		 */
   5525 		sc->sc_affinity_offset = 1;
   5526 	} else {
   5527 		/*
   5528 		 * In this case, this device use all CPUs. So, we unify
   5529 		 * affinitied cpu_index to msix vector number for readability.
   5530 		 */
   5531 		sc->sc_affinity_offset = 0;
   5532 	}
   5533 
   5534 	error = wm_alloc_txrx_queues(sc);
   5535 	if (error) {
   5536 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5537 		    error);
   5538 		return ENOMEM;
   5539 	}
   5540 
   5541 	kcpuset_create(&affinity, false);
   5542 	intr_idx = 0;
   5543 
   5544 	/*
   5545 	 * TX and RX
   5546 	 */
   5547 	txrx_established = 0;
   5548 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5549 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5550 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5551 
   5552 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5553 		    sizeof(intrbuf));
   5554 #ifdef WM_MPSAFE
   5555 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5556 		    PCI_INTR_MPSAFE, true);
   5557 #endif
   5558 		memset(intr_xname, 0, sizeof(intr_xname));
   5559 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5560 		    device_xname(sc->sc_dev), qidx);
   5561 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5562 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5563 		if (vih == NULL) {
   5564 			aprint_error_dev(sc->sc_dev,
   5565 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5566 			    intrstr ? " at " : "",
   5567 			    intrstr ? intrstr : "");
   5568 
   5569 			goto fail;
   5570 		}
   5571 		kcpuset_zero(affinity);
   5572 		/* Round-robin affinity */
   5573 		kcpuset_set(affinity, affinity_to);
   5574 		error = interrupt_distribute(vih, affinity, NULL);
   5575 		if (error == 0) {
   5576 			aprint_normal_dev(sc->sc_dev,
   5577 			    "for TX and RX interrupting at %s affinity to %u\n",
   5578 			    intrstr, affinity_to);
   5579 		} else {
   5580 			aprint_normal_dev(sc->sc_dev,
   5581 			    "for TX and RX interrupting at %s\n", intrstr);
   5582 		}
   5583 		sc->sc_ihs[intr_idx] = vih;
   5584 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   5585 			goto fail;
   5586 		txrx_established++;
   5587 		intr_idx++;
   5588 	}
   5589 
   5590 	/* LINK */
   5591 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5592 	    sizeof(intrbuf));
   5593 #ifdef WM_MPSAFE
   5594 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5595 #endif
   5596 	memset(intr_xname, 0, sizeof(intr_xname));
   5597 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5598 	    device_xname(sc->sc_dev));
   5599 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5600 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5601 	if (vih == NULL) {
   5602 		aprint_error_dev(sc->sc_dev,
   5603 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5604 		    intrstr ? " at " : "",
   5605 		    intrstr ? intrstr : "");
   5606 
   5607 		goto fail;
   5608 	}
   5609 	/* Keep default affinity to LINK interrupt */
   5610 	aprint_normal_dev(sc->sc_dev,
   5611 	    "for LINK interrupting at %s\n", intrstr);
   5612 	sc->sc_ihs[intr_idx] = vih;
   5613 	sc->sc_link_intr_idx = intr_idx;
   5614 
   5615 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5616 	kcpuset_destroy(affinity);
   5617 	return 0;
   5618 
   5619  fail:
   5620 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5621 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5622 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5623 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5624 	}
   5625 
   5626 	kcpuset_destroy(affinity);
   5627 	return ENOMEM;
   5628 }
   5629 
   5630 static void
   5631 wm_unset_stopping_flags(struct wm_softc *sc)
   5632 {
   5633 	int i;
   5634 
   5635 	KASSERT(WM_CORE_LOCKED(sc));
   5636 
   5637 	/* Must unset stopping flags in ascending order. */
   5638 	for (i = 0; i < sc->sc_nqueues; i++) {
   5639 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5640 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5641 
   5642 		mutex_enter(txq->txq_lock);
   5643 		txq->txq_stopping = false;
   5644 		mutex_exit(txq->txq_lock);
   5645 
   5646 		mutex_enter(rxq->rxq_lock);
   5647 		rxq->rxq_stopping = false;
   5648 		mutex_exit(rxq->rxq_lock);
   5649 	}
   5650 
   5651 	sc->sc_core_stopping = false;
   5652 }
   5653 
   5654 static void
   5655 wm_set_stopping_flags(struct wm_softc *sc)
   5656 {
   5657 	int i;
   5658 
   5659 	KASSERT(WM_CORE_LOCKED(sc));
   5660 
   5661 	sc->sc_core_stopping = true;
   5662 
   5663 	/* Must set stopping flags in ascending order. */
   5664 	for (i = 0; i < sc->sc_nqueues; i++) {
   5665 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5666 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5667 
   5668 		mutex_enter(rxq->rxq_lock);
   5669 		rxq->rxq_stopping = true;
   5670 		mutex_exit(rxq->rxq_lock);
   5671 
   5672 		mutex_enter(txq->txq_lock);
   5673 		txq->txq_stopping = true;
   5674 		mutex_exit(txq->txq_lock);
   5675 	}
   5676 }
   5677 
   5678 /*
   5679  * Write interrupt interval value to ITR or EITR
   5680  */
   5681 static void
   5682 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5683 {
   5684 
   5685 	if (!wmq->wmq_set_itr)
   5686 		return;
   5687 
   5688 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5689 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5690 
   5691 		/*
   5692 		 * 82575 doesn't have CNT_INGR field.
   5693 		 * So, overwrite counter field by software.
   5694 		 */
   5695 		if (sc->sc_type == WM_T_82575)
   5696 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5697 		else
   5698 			eitr |= EITR_CNT_INGR;
   5699 
   5700 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5701 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5702 		/*
   5703 		 * 82574 has both ITR and EITR. SET EITR when we use
   5704 		 * the multi queue function with MSI-X.
   5705 		 */
   5706 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5707 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5708 	} else {
   5709 		KASSERT(wmq->wmq_id == 0);
   5710 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5711 	}
   5712 
   5713 	wmq->wmq_set_itr = false;
   5714 }
   5715 
   5716 /*
   5717  * TODO
   5718  * Below dynamic calculation of itr is almost the same as linux igb,
   5719  * however it does not fit to wm(4). So, we will have been disable AIM
   5720  * until we will find appropriate calculation of itr.
   5721  */
   5722 /*
   5723  * calculate interrupt interval value to be going to write register in
   5724  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5725  */
   5726 static void
   5727 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5728 {
   5729 #ifdef NOTYET
   5730 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5731 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5732 	uint32_t avg_size = 0;
   5733 	uint32_t new_itr;
   5734 
   5735 	if (rxq->rxq_packets)
   5736 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5737 	if (txq->txq_packets)
   5738 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5739 
   5740 	if (avg_size == 0) {
   5741 		new_itr = 450; /* restore default value */
   5742 		goto out;
   5743 	}
   5744 
   5745 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5746 	avg_size += 24;
   5747 
   5748 	/* Don't starve jumbo frames */
   5749 	avg_size = uimin(avg_size, 3000);
   5750 
   5751 	/* Give a little boost to mid-size frames */
   5752 	if ((avg_size > 300) && (avg_size < 1200))
   5753 		new_itr = avg_size / 3;
   5754 	else
   5755 		new_itr = avg_size / 2;
   5756 
   5757 out:
   5758 	/*
   5759 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5760 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5761 	 */
   5762 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5763 		new_itr *= 4;
   5764 
   5765 	if (new_itr != wmq->wmq_itr) {
   5766 		wmq->wmq_itr = new_itr;
   5767 		wmq->wmq_set_itr = true;
   5768 	} else
   5769 		wmq->wmq_set_itr = false;
   5770 
   5771 	rxq->rxq_packets = 0;
   5772 	rxq->rxq_bytes = 0;
   5773 	txq->txq_packets = 0;
   5774 	txq->txq_bytes = 0;
   5775 #endif
   5776 }
   5777 
   5778 static void
   5779 wm_init_sysctls(struct wm_softc *sc)
   5780 {
   5781 	struct sysctllog **log;
   5782 	const struct sysctlnode *rnode, *cnode;
   5783 	int rv;
   5784 	const char *dvname;
   5785 
   5786 	log = &sc->sc_sysctllog;
   5787 	dvname = device_xname(sc->sc_dev);
   5788 
   5789 	rv = sysctl_createv(log, 0, NULL, &rnode,
   5790 	    0, CTLTYPE_NODE, dvname,
   5791 	    SYSCTL_DESCR("wm information and settings"),
   5792 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   5793 	if (rv != 0)
   5794 		goto err;
   5795 
   5796 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5797 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   5798 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   5799 	if (rv != 0)
   5800 		goto teardown;
   5801 
   5802 	return;
   5803 
   5804 teardown:
   5805 	sysctl_teardown(log);
   5806 err:
   5807 	sc->sc_sysctllog = NULL;
   5808 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   5809 	    __func__, rv);
   5810 }
   5811 
   5812 /*
   5813  * wm_init:		[ifnet interface function]
   5814  *
   5815  *	Initialize the interface.
   5816  */
   5817 static int
   5818 wm_init(struct ifnet *ifp)
   5819 {
   5820 	struct wm_softc *sc = ifp->if_softc;
   5821 	int ret;
   5822 
   5823 	WM_CORE_LOCK(sc);
   5824 	ret = wm_init_locked(ifp);
   5825 	WM_CORE_UNLOCK(sc);
   5826 
   5827 	return ret;
   5828 }
   5829 
   5830 static int
   5831 wm_init_locked(struct ifnet *ifp)
   5832 {
   5833 	struct wm_softc *sc = ifp->if_softc;
   5834 	struct ethercom *ec = &sc->sc_ethercom;
   5835 	int i, j, trynum, error = 0;
   5836 	uint32_t reg, sfp_mask = 0;
   5837 
   5838 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5839 		device_xname(sc->sc_dev), __func__));
   5840 	KASSERT(WM_CORE_LOCKED(sc));
   5841 
   5842 	/*
   5843 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5844 	 * There is a small but measurable benefit to avoiding the adjusment
   5845 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5846 	 * on such platforms.  One possibility is that the DMA itself is
   5847 	 * slightly more efficient if the front of the entire packet (instead
   5848 	 * of the front of the headers) is aligned.
   5849 	 *
   5850 	 * Note we must always set align_tweak to 0 if we are using
   5851 	 * jumbo frames.
   5852 	 */
   5853 #ifdef __NO_STRICT_ALIGNMENT
   5854 	sc->sc_align_tweak = 0;
   5855 #else
   5856 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5857 		sc->sc_align_tweak = 0;
   5858 	else
   5859 		sc->sc_align_tweak = 2;
   5860 #endif /* __NO_STRICT_ALIGNMENT */
   5861 
   5862 	/* Cancel any pending I/O. */
   5863 	wm_stop_locked(ifp, false, false);
   5864 
   5865 	/* Update statistics before reset */
   5866 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   5867 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   5868 
   5869 	/* PCH_SPT hardware workaround */
   5870 	if (sc->sc_type == WM_T_PCH_SPT)
   5871 		wm_flush_desc_rings(sc);
   5872 
   5873 	/* Reset the chip to a known state. */
   5874 	wm_reset(sc);
   5875 
   5876 	/*
   5877 	 * AMT based hardware can now take control from firmware
   5878 	 * Do this after reset.
   5879 	 */
   5880 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5881 		wm_get_hw_control(sc);
   5882 
   5883 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5884 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5885 		wm_legacy_irq_quirk_spt(sc);
   5886 
   5887 	/* Init hardware bits */
   5888 	wm_initialize_hardware_bits(sc);
   5889 
   5890 	/* Reset the PHY. */
   5891 	if (sc->sc_flags & WM_F_HAS_MII)
   5892 		wm_gmii_reset(sc);
   5893 
   5894 	if (sc->sc_type >= WM_T_ICH8) {
   5895 		reg = CSR_READ(sc, WMREG_GCR);
   5896 		/*
   5897 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5898 		 * default after reset.
   5899 		 */
   5900 		if (sc->sc_type == WM_T_ICH8)
   5901 			reg |= GCR_NO_SNOOP_ALL;
   5902 		else
   5903 			reg &= ~GCR_NO_SNOOP_ALL;
   5904 		CSR_WRITE(sc, WMREG_GCR, reg);
   5905 	}
   5906 
   5907 	if ((sc->sc_type >= WM_T_ICH8)
   5908 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5909 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5910 
   5911 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5912 		reg |= CTRL_EXT_RO_DIS;
   5913 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5914 	}
   5915 
   5916 	/* Calculate (E)ITR value */
   5917 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5918 		/*
   5919 		 * For NEWQUEUE's EITR (except for 82575).
   5920 		 * 82575's EITR should be set same throttling value as other
   5921 		 * old controllers' ITR because the interrupt/sec calculation
   5922 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5923 		 *
   5924 		 * 82574's EITR should be set same throttling value as ITR.
   5925 		 *
   5926 		 * For N interrupts/sec, set this value to:
   5927 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5928 		 */
   5929 		sc->sc_itr_init = 450;
   5930 	} else if (sc->sc_type >= WM_T_82543) {
   5931 		/*
   5932 		 * Set up the interrupt throttling register (units of 256ns)
   5933 		 * Note that a footnote in Intel's documentation says this
   5934 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5935 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5936 		 * that that is also true for the 1024ns units of the other
   5937 		 * interrupt-related timer registers -- so, really, we ought
   5938 		 * to divide this value by 4 when the link speed is low.
   5939 		 *
   5940 		 * XXX implement this division at link speed change!
   5941 		 */
   5942 
   5943 		/*
   5944 		 * For N interrupts/sec, set this value to:
   5945 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5946 		 * absolute and packet timer values to this value
   5947 		 * divided by 4 to get "simple timer" behavior.
   5948 		 */
   5949 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5950 	}
   5951 
   5952 	error = wm_init_txrx_queues(sc);
   5953 	if (error)
   5954 		goto out;
   5955 
   5956 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   5957 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   5958 	    (sc->sc_type >= WM_T_82575))
   5959 		wm_serdes_power_up_link_82575(sc);
   5960 
   5961 	/* Clear out the VLAN table -- we don't use it (yet). */
   5962 	CSR_WRITE(sc, WMREG_VET, 0);
   5963 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5964 		trynum = 10; /* Due to hw errata */
   5965 	else
   5966 		trynum = 1;
   5967 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5968 		for (j = 0; j < trynum; j++)
   5969 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5970 
   5971 	/*
   5972 	 * Set up flow-control parameters.
   5973 	 *
   5974 	 * XXX Values could probably stand some tuning.
   5975 	 */
   5976 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5977 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5978 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5979 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5980 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5981 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5982 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5983 	}
   5984 
   5985 	sc->sc_fcrtl = FCRTL_DFLT;
   5986 	if (sc->sc_type < WM_T_82543) {
   5987 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5988 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5989 	} else {
   5990 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5991 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5992 	}
   5993 
   5994 	if (sc->sc_type == WM_T_80003)
   5995 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5996 	else
   5997 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5998 
   5999 	/* Writes the control register. */
   6000 	wm_set_vlan(sc);
   6001 
   6002 	if (sc->sc_flags & WM_F_HAS_MII) {
   6003 		uint16_t kmreg;
   6004 
   6005 		switch (sc->sc_type) {
   6006 		case WM_T_80003:
   6007 		case WM_T_ICH8:
   6008 		case WM_T_ICH9:
   6009 		case WM_T_ICH10:
   6010 		case WM_T_PCH:
   6011 		case WM_T_PCH2:
   6012 		case WM_T_PCH_LPT:
   6013 		case WM_T_PCH_SPT:
   6014 		case WM_T_PCH_CNP:
   6015 			/*
   6016 			 * Set the mac to wait the maximum time between each
   6017 			 * iteration and increase the max iterations when
   6018 			 * polling the phy; this fixes erroneous timeouts at
   6019 			 * 10Mbps.
   6020 			 */
   6021 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6022 			    0xFFFF);
   6023 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6024 			    &kmreg);
   6025 			kmreg |= 0x3F;
   6026 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6027 			    kmreg);
   6028 			break;
   6029 		default:
   6030 			break;
   6031 		}
   6032 
   6033 		if (sc->sc_type == WM_T_80003) {
   6034 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6035 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6036 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6037 
   6038 			/* Bypass RX and TX FIFO's */
   6039 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6040 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6041 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6042 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6043 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6044 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6045 		}
   6046 	}
   6047 #if 0
   6048 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6049 #endif
   6050 
   6051 	/* Set up checksum offload parameters. */
   6052 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6053 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6054 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6055 		reg |= RXCSUM_IPOFL;
   6056 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6057 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6058 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6059 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6060 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6061 
   6062 	/* Set registers about MSI-X */
   6063 	if (wm_is_using_msix(sc)) {
   6064 		uint32_t ivar, qintr_idx;
   6065 		struct wm_queue *wmq;
   6066 		unsigned int qid;
   6067 
   6068 		if (sc->sc_type == WM_T_82575) {
   6069 			/* Interrupt control */
   6070 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6071 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6072 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6073 
   6074 			/* TX and RX */
   6075 			for (i = 0; i < sc->sc_nqueues; i++) {
   6076 				wmq = &sc->sc_queue[i];
   6077 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6078 				    EITR_TX_QUEUE(wmq->wmq_id)
   6079 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6080 			}
   6081 			/* Link status */
   6082 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6083 			    EITR_OTHER);
   6084 		} else if (sc->sc_type == WM_T_82574) {
   6085 			/* Interrupt control */
   6086 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6087 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6088 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6089 
   6090 			/*
   6091 			 * Workaround issue with spurious interrupts
   6092 			 * in MSI-X mode.
   6093 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6094 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6095 			 */
   6096 			reg = CSR_READ(sc, WMREG_RFCTL);
   6097 			reg |= WMREG_RFCTL_ACKDIS;
   6098 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6099 
   6100 			ivar = 0;
   6101 			/* TX and RX */
   6102 			for (i = 0; i < sc->sc_nqueues; i++) {
   6103 				wmq = &sc->sc_queue[i];
   6104 				qid = wmq->wmq_id;
   6105 				qintr_idx = wmq->wmq_intr_idx;
   6106 
   6107 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6108 				    IVAR_TX_MASK_Q_82574(qid));
   6109 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6110 				    IVAR_RX_MASK_Q_82574(qid));
   6111 			}
   6112 			/* Link status */
   6113 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6114 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6115 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6116 		} else {
   6117 			/* Interrupt control */
   6118 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6119 			    | GPIE_EIAME | GPIE_PBA);
   6120 
   6121 			switch (sc->sc_type) {
   6122 			case WM_T_82580:
   6123 			case WM_T_I350:
   6124 			case WM_T_I354:
   6125 			case WM_T_I210:
   6126 			case WM_T_I211:
   6127 				/* TX and RX */
   6128 				for (i = 0; i < sc->sc_nqueues; i++) {
   6129 					wmq = &sc->sc_queue[i];
   6130 					qid = wmq->wmq_id;
   6131 					qintr_idx = wmq->wmq_intr_idx;
   6132 
   6133 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6134 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6135 					ivar |= __SHIFTIN((qintr_idx
   6136 						| IVAR_VALID),
   6137 					    IVAR_TX_MASK_Q(qid));
   6138 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6139 					ivar |= __SHIFTIN((qintr_idx
   6140 						| IVAR_VALID),
   6141 					    IVAR_RX_MASK_Q(qid));
   6142 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6143 				}
   6144 				break;
   6145 			case WM_T_82576:
   6146 				/* TX and RX */
   6147 				for (i = 0; i < sc->sc_nqueues; i++) {
   6148 					wmq = &sc->sc_queue[i];
   6149 					qid = wmq->wmq_id;
   6150 					qintr_idx = wmq->wmq_intr_idx;
   6151 
   6152 					ivar = CSR_READ(sc,
   6153 					    WMREG_IVAR_Q_82576(qid));
   6154 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6155 					ivar |= __SHIFTIN((qintr_idx
   6156 						| IVAR_VALID),
   6157 					    IVAR_TX_MASK_Q_82576(qid));
   6158 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6159 					ivar |= __SHIFTIN((qintr_idx
   6160 						| IVAR_VALID),
   6161 					    IVAR_RX_MASK_Q_82576(qid));
   6162 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6163 					    ivar);
   6164 				}
   6165 				break;
   6166 			default:
   6167 				break;
   6168 			}
   6169 
   6170 			/* Link status */
   6171 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6172 			    IVAR_MISC_OTHER);
   6173 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6174 		}
   6175 
   6176 		if (wm_is_using_multiqueue(sc)) {
   6177 			wm_init_rss(sc);
   6178 
   6179 			/*
   6180 			** NOTE: Receive Full-Packet Checksum Offload
   6181 			** is mutually exclusive with Multiqueue. However
   6182 			** this is not the same as TCP/IP checksums which
   6183 			** still work.
   6184 			*/
   6185 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6186 			reg |= RXCSUM_PCSD;
   6187 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6188 		}
   6189 	}
   6190 
   6191 	/* Set up the interrupt registers. */
   6192 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6193 
   6194 	/* Enable SFP module insertion interrupt if it's required */
   6195 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6196 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6197 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6198 		sfp_mask = ICR_GPI(0);
   6199 	}
   6200 
   6201 	if (wm_is_using_msix(sc)) {
   6202 		uint32_t mask;
   6203 		struct wm_queue *wmq;
   6204 
   6205 		switch (sc->sc_type) {
   6206 		case WM_T_82574:
   6207 			mask = 0;
   6208 			for (i = 0; i < sc->sc_nqueues; i++) {
   6209 				wmq = &sc->sc_queue[i];
   6210 				mask |= ICR_TXQ(wmq->wmq_id);
   6211 				mask |= ICR_RXQ(wmq->wmq_id);
   6212 			}
   6213 			mask |= ICR_OTHER;
   6214 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6215 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6216 			break;
   6217 		default:
   6218 			if (sc->sc_type == WM_T_82575) {
   6219 				mask = 0;
   6220 				for (i = 0; i < sc->sc_nqueues; i++) {
   6221 					wmq = &sc->sc_queue[i];
   6222 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6223 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6224 				}
   6225 				mask |= EITR_OTHER;
   6226 			} else {
   6227 				mask = 0;
   6228 				for (i = 0; i < sc->sc_nqueues; i++) {
   6229 					wmq = &sc->sc_queue[i];
   6230 					mask |= 1 << wmq->wmq_intr_idx;
   6231 				}
   6232 				mask |= 1 << sc->sc_link_intr_idx;
   6233 			}
   6234 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6235 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6236 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6237 
   6238 			/* For other interrupts */
   6239 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6240 			break;
   6241 		}
   6242 	} else {
   6243 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6244 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6245 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6246 	}
   6247 
   6248 	/* Set up the inter-packet gap. */
   6249 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6250 
   6251 	if (sc->sc_type >= WM_T_82543) {
   6252 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6253 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6254 			wm_itrs_writereg(sc, wmq);
   6255 		}
   6256 		/*
   6257 		 * Link interrupts occur much less than TX
   6258 		 * interrupts and RX interrupts. So, we don't
   6259 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6260 		 * FreeBSD's if_igb.
   6261 		 */
   6262 	}
   6263 
   6264 	/* Set the VLAN ethernetype. */
   6265 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6266 
   6267 	/*
   6268 	 * Set up the transmit control register; we start out with
   6269 	 * a collision distance suitable for FDX, but update it whe
   6270 	 * we resolve the media type.
   6271 	 */
   6272 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6273 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6274 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6275 	if (sc->sc_type >= WM_T_82571)
   6276 		sc->sc_tctl |= TCTL_MULR;
   6277 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6278 
   6279 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6280 		/* Write TDT after TCTL.EN is set. See the document. */
   6281 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6282 	}
   6283 
   6284 	if (sc->sc_type == WM_T_80003) {
   6285 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6286 		reg &= ~TCTL_EXT_GCEX_MASK;
   6287 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6288 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6289 	}
   6290 
   6291 	/* Set the media. */
   6292 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6293 		goto out;
   6294 
   6295 	/* Configure for OS presence */
   6296 	wm_init_manageability(sc);
   6297 
   6298 	/*
   6299 	 * Set up the receive control register; we actually program the
   6300 	 * register when we set the receive filter. Use multicast address
   6301 	 * offset type 0.
   6302 	 *
   6303 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6304 	 * don't enable that feature.
   6305 	 */
   6306 	sc->sc_mchash_type = 0;
   6307 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6308 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6309 
   6310 	/* 82574 use one buffer extended Rx descriptor. */
   6311 	if (sc->sc_type == WM_T_82574)
   6312 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6313 
   6314 	/*
   6315 	 * The I350 has a bug where it always strips the CRC whether
   6316 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6317 	 */
   6318 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6319 	    || (sc->sc_type == WM_T_I210))
   6320 		sc->sc_rctl |= RCTL_SECRC;
   6321 
   6322 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6323 	    && (ifp->if_mtu > ETHERMTU)) {
   6324 		sc->sc_rctl |= RCTL_LPE;
   6325 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6326 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6327 	}
   6328 
   6329 	if (MCLBYTES == 2048)
   6330 		sc->sc_rctl |= RCTL_2k;
   6331 	else {
   6332 		if (sc->sc_type >= WM_T_82543) {
   6333 			switch (MCLBYTES) {
   6334 			case 4096:
   6335 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6336 				break;
   6337 			case 8192:
   6338 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6339 				break;
   6340 			case 16384:
   6341 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6342 				break;
   6343 			default:
   6344 				panic("wm_init: MCLBYTES %d unsupported",
   6345 				    MCLBYTES);
   6346 				break;
   6347 			}
   6348 		} else
   6349 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6350 	}
   6351 
   6352 	/* Enable ECC */
   6353 	switch (sc->sc_type) {
   6354 	case WM_T_82571:
   6355 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6356 		reg |= PBA_ECC_CORR_EN;
   6357 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6358 		break;
   6359 	case WM_T_PCH_LPT:
   6360 	case WM_T_PCH_SPT:
   6361 	case WM_T_PCH_CNP:
   6362 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6363 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6364 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6365 
   6366 		sc->sc_ctrl |= CTRL_MEHE;
   6367 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6368 		break;
   6369 	default:
   6370 		break;
   6371 	}
   6372 
   6373 	/*
   6374 	 * Set the receive filter.
   6375 	 *
   6376 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6377 	 * the setting of RCTL.EN in wm_set_filter()
   6378 	 */
   6379 	wm_set_filter(sc);
   6380 
   6381 	/* On 575 and later set RDT only if RX enabled */
   6382 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6383 		int qidx;
   6384 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6385 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6386 			for (i = 0; i < WM_NRXDESC; i++) {
   6387 				mutex_enter(rxq->rxq_lock);
   6388 				wm_init_rxdesc(rxq, i);
   6389 				mutex_exit(rxq->rxq_lock);
   6390 
   6391 			}
   6392 		}
   6393 	}
   6394 
   6395 	wm_unset_stopping_flags(sc);
   6396 
   6397 	/* Start the one second link check clock. */
   6398 	callout_schedule(&sc->sc_tick_ch, hz);
   6399 
   6400 	/* ...all done! */
   6401 	ifp->if_flags |= IFF_RUNNING;
   6402 
   6403  out:
   6404 	/* Save last flags for the callback */
   6405 	sc->sc_if_flags = ifp->if_flags;
   6406 	sc->sc_ec_capenable = ec->ec_capenable;
   6407 	if (error)
   6408 		log(LOG_ERR, "%s: interface not running\n",
   6409 		    device_xname(sc->sc_dev));
   6410 	return error;
   6411 }
   6412 
   6413 /*
   6414  * wm_stop:		[ifnet interface function]
   6415  *
   6416  *	Stop transmission on the interface.
   6417  */
   6418 static void
   6419 wm_stop(struct ifnet *ifp, int disable)
   6420 {
   6421 	struct wm_softc *sc = ifp->if_softc;
   6422 
   6423 	ASSERT_SLEEPABLE();
   6424 
   6425 	WM_CORE_LOCK(sc);
   6426 	wm_stop_locked(ifp, disable ? true : false, true);
   6427 	WM_CORE_UNLOCK(sc);
   6428 
   6429 	/*
   6430 	 * After wm_set_stopping_flags(), it is guaranteed
   6431 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   6432 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   6433 	 * because it can sleep...
   6434 	 * so, call workqueue_wait() here.
   6435 	 */
   6436 	for (int i = 0; i < sc->sc_nqueues; i++)
   6437 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   6438 }
   6439 
   6440 static void
   6441 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   6442 {
   6443 	struct wm_softc *sc = ifp->if_softc;
   6444 	struct wm_txsoft *txs;
   6445 	int i, qidx;
   6446 
   6447 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6448 		device_xname(sc->sc_dev), __func__));
   6449 	KASSERT(WM_CORE_LOCKED(sc));
   6450 
   6451 	wm_set_stopping_flags(sc);
   6452 
   6453 	if (sc->sc_flags & WM_F_HAS_MII) {
   6454 		/* Down the MII. */
   6455 		mii_down(&sc->sc_mii);
   6456 	} else {
   6457 #if 0
   6458 		/* Should we clear PHY's status properly? */
   6459 		wm_reset(sc);
   6460 #endif
   6461 	}
   6462 
   6463 	/* Stop the transmit and receive processes. */
   6464 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6465 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6466 	sc->sc_rctl &= ~RCTL_EN;
   6467 
   6468 	/*
   6469 	 * Clear the interrupt mask to ensure the device cannot assert its
   6470 	 * interrupt line.
   6471 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6472 	 * service any currently pending or shared interrupt.
   6473 	 */
   6474 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6475 	sc->sc_icr = 0;
   6476 	if (wm_is_using_msix(sc)) {
   6477 		if (sc->sc_type != WM_T_82574) {
   6478 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6479 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6480 		} else
   6481 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6482 	}
   6483 
   6484 	/*
   6485 	 * Stop callouts after interrupts are disabled; if we have
   6486 	 * to wait for them, we will be releasing the CORE_LOCK
   6487 	 * briefly, which will unblock interrupts on the current CPU.
   6488 	 */
   6489 
   6490 	/* Stop the one second clock. */
   6491 	if (wait)
   6492 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   6493 	else
   6494 		callout_stop(&sc->sc_tick_ch);
   6495 
   6496 	/* Stop the 82547 Tx FIFO stall check timer. */
   6497 	if (sc->sc_type == WM_T_82547) {
   6498 		if (wait)
   6499 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   6500 		else
   6501 			callout_stop(&sc->sc_txfifo_ch);
   6502 	}
   6503 
   6504 	/* Release any queued transmit buffers. */
   6505 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6506 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6507 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6508 		mutex_enter(txq->txq_lock);
   6509 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6510 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6511 			txs = &txq->txq_soft[i];
   6512 			if (txs->txs_mbuf != NULL) {
   6513 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6514 				m_freem(txs->txs_mbuf);
   6515 				txs->txs_mbuf = NULL;
   6516 			}
   6517 		}
   6518 		mutex_exit(txq->txq_lock);
   6519 	}
   6520 
   6521 	/* Mark the interface as down and cancel the watchdog timer. */
   6522 	ifp->if_flags &= ~IFF_RUNNING;
   6523 
   6524 	if (disable) {
   6525 		for (i = 0; i < sc->sc_nqueues; i++) {
   6526 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6527 			mutex_enter(rxq->rxq_lock);
   6528 			wm_rxdrain(rxq);
   6529 			mutex_exit(rxq->rxq_lock);
   6530 		}
   6531 	}
   6532 
   6533 #if 0 /* notyet */
   6534 	if (sc->sc_type >= WM_T_82544)
   6535 		CSR_WRITE(sc, WMREG_WUC, 0);
   6536 #endif
   6537 }
   6538 
   6539 static void
   6540 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6541 {
   6542 	struct mbuf *m;
   6543 	int i;
   6544 
   6545 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6546 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6547 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6548 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6549 		    m->m_data, m->m_len, m->m_flags);
   6550 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6551 	    i, i == 1 ? "" : "s");
   6552 }
   6553 
   6554 /*
   6555  * wm_82547_txfifo_stall:
   6556  *
   6557  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6558  *	reset the FIFO pointers, and restart packet transmission.
   6559  */
   6560 static void
   6561 wm_82547_txfifo_stall(void *arg)
   6562 {
   6563 	struct wm_softc *sc = arg;
   6564 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6565 
   6566 	mutex_enter(txq->txq_lock);
   6567 
   6568 	if (txq->txq_stopping)
   6569 		goto out;
   6570 
   6571 	if (txq->txq_fifo_stall) {
   6572 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6573 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6574 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6575 			/*
   6576 			 * Packets have drained.  Stop transmitter, reset
   6577 			 * FIFO pointers, restart transmitter, and kick
   6578 			 * the packet queue.
   6579 			 */
   6580 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6581 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6582 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6583 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6584 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6585 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6586 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6587 			CSR_WRITE_FLUSH(sc);
   6588 
   6589 			txq->txq_fifo_head = 0;
   6590 			txq->txq_fifo_stall = 0;
   6591 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6592 		} else {
   6593 			/*
   6594 			 * Still waiting for packets to drain; try again in
   6595 			 * another tick.
   6596 			 */
   6597 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6598 		}
   6599 	}
   6600 
   6601 out:
   6602 	mutex_exit(txq->txq_lock);
   6603 }
   6604 
   6605 /*
   6606  * wm_82547_txfifo_bugchk:
   6607  *
   6608  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6609  *	prevent enqueueing a packet that would wrap around the end
   6610  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6611  *
   6612  *	We do this by checking the amount of space before the end
   6613  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6614  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6615  *	the internal FIFO pointers to the beginning, and restart
   6616  *	transmission on the interface.
   6617  */
   6618 #define	WM_FIFO_HDR		0x10
   6619 #define	WM_82547_PAD_LEN	0x3e0
   6620 static int
   6621 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6622 {
   6623 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6624 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6625 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6626 
   6627 	/* Just return if already stalled. */
   6628 	if (txq->txq_fifo_stall)
   6629 		return 1;
   6630 
   6631 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6632 		/* Stall only occurs in half-duplex mode. */
   6633 		goto send_packet;
   6634 	}
   6635 
   6636 	if (len >= WM_82547_PAD_LEN + space) {
   6637 		txq->txq_fifo_stall = 1;
   6638 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6639 		return 1;
   6640 	}
   6641 
   6642  send_packet:
   6643 	txq->txq_fifo_head += len;
   6644 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6645 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6646 
   6647 	return 0;
   6648 }
   6649 
   6650 static int
   6651 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6652 {
   6653 	int error;
   6654 
   6655 	/*
   6656 	 * Allocate the control data structures, and create and load the
   6657 	 * DMA map for it.
   6658 	 *
   6659 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6660 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6661 	 * both sets within the same 4G segment.
   6662 	 */
   6663 	if (sc->sc_type < WM_T_82544)
   6664 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6665 	else
   6666 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6667 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6668 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6669 	else
   6670 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6671 
   6672 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6673 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6674 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6675 		aprint_error_dev(sc->sc_dev,
   6676 		    "unable to allocate TX control data, error = %d\n",
   6677 		    error);
   6678 		goto fail_0;
   6679 	}
   6680 
   6681 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6682 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6683 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6684 		aprint_error_dev(sc->sc_dev,
   6685 		    "unable to map TX control data, error = %d\n", error);
   6686 		goto fail_1;
   6687 	}
   6688 
   6689 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6690 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6691 		aprint_error_dev(sc->sc_dev,
   6692 		    "unable to create TX control data DMA map, error = %d\n",
   6693 		    error);
   6694 		goto fail_2;
   6695 	}
   6696 
   6697 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6698 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6699 		aprint_error_dev(sc->sc_dev,
   6700 		    "unable to load TX control data DMA map, error = %d\n",
   6701 		    error);
   6702 		goto fail_3;
   6703 	}
   6704 
   6705 	return 0;
   6706 
   6707  fail_3:
   6708 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6709  fail_2:
   6710 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6711 	    WM_TXDESCS_SIZE(txq));
   6712  fail_1:
   6713 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6714  fail_0:
   6715 	return error;
   6716 }
   6717 
   6718 static void
   6719 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6720 {
   6721 
   6722 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6723 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6724 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6725 	    WM_TXDESCS_SIZE(txq));
   6726 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6727 }
   6728 
   6729 static int
   6730 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6731 {
   6732 	int error;
   6733 	size_t rxq_descs_size;
   6734 
   6735 	/*
   6736 	 * Allocate the control data structures, and create and load the
   6737 	 * DMA map for it.
   6738 	 *
   6739 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6740 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6741 	 * both sets within the same 4G segment.
   6742 	 */
   6743 	rxq->rxq_ndesc = WM_NRXDESC;
   6744 	if (sc->sc_type == WM_T_82574)
   6745 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6746 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6747 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6748 	else
   6749 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6750 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6751 
   6752 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6753 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6754 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6755 		aprint_error_dev(sc->sc_dev,
   6756 		    "unable to allocate RX control data, error = %d\n",
   6757 		    error);
   6758 		goto fail_0;
   6759 	}
   6760 
   6761 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6762 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6763 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6764 		aprint_error_dev(sc->sc_dev,
   6765 		    "unable to map RX control data, error = %d\n", error);
   6766 		goto fail_1;
   6767 	}
   6768 
   6769 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6770 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6771 		aprint_error_dev(sc->sc_dev,
   6772 		    "unable to create RX control data DMA map, error = %d\n",
   6773 		    error);
   6774 		goto fail_2;
   6775 	}
   6776 
   6777 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6778 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6779 		aprint_error_dev(sc->sc_dev,
   6780 		    "unable to load RX control data DMA map, error = %d\n",
   6781 		    error);
   6782 		goto fail_3;
   6783 	}
   6784 
   6785 	return 0;
   6786 
   6787  fail_3:
   6788 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6789  fail_2:
   6790 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6791 	    rxq_descs_size);
   6792  fail_1:
   6793 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6794  fail_0:
   6795 	return error;
   6796 }
   6797 
   6798 static void
   6799 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6800 {
   6801 
   6802 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6803 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6804 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6805 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6806 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6807 }
   6808 
   6809 
   6810 static int
   6811 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6812 {
   6813 	int i, error;
   6814 
   6815 	/* Create the transmit buffer DMA maps. */
   6816 	WM_TXQUEUELEN(txq) =
   6817 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6818 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6819 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6820 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6821 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6822 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6823 			aprint_error_dev(sc->sc_dev,
   6824 			    "unable to create Tx DMA map %d, error = %d\n",
   6825 			    i, error);
   6826 			goto fail;
   6827 		}
   6828 	}
   6829 
   6830 	return 0;
   6831 
   6832  fail:
   6833 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6834 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6835 			bus_dmamap_destroy(sc->sc_dmat,
   6836 			    txq->txq_soft[i].txs_dmamap);
   6837 	}
   6838 	return error;
   6839 }
   6840 
   6841 static void
   6842 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6843 {
   6844 	int i;
   6845 
   6846 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6847 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6848 			bus_dmamap_destroy(sc->sc_dmat,
   6849 			    txq->txq_soft[i].txs_dmamap);
   6850 	}
   6851 }
   6852 
   6853 static int
   6854 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6855 {
   6856 	int i, error;
   6857 
   6858 	/* Create the receive buffer DMA maps. */
   6859 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6860 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6861 			    MCLBYTES, 0, 0,
   6862 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6863 			aprint_error_dev(sc->sc_dev,
   6864 			    "unable to create Rx DMA map %d error = %d\n",
   6865 			    i, error);
   6866 			goto fail;
   6867 		}
   6868 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6869 	}
   6870 
   6871 	return 0;
   6872 
   6873  fail:
   6874 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6875 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6876 			bus_dmamap_destroy(sc->sc_dmat,
   6877 			    rxq->rxq_soft[i].rxs_dmamap);
   6878 	}
   6879 	return error;
   6880 }
   6881 
   6882 static void
   6883 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6884 {
   6885 	int i;
   6886 
   6887 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6888 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6889 			bus_dmamap_destroy(sc->sc_dmat,
   6890 			    rxq->rxq_soft[i].rxs_dmamap);
   6891 	}
   6892 }
   6893 
   6894 /*
   6895  * wm_alloc_quques:
   6896  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6897  */
   6898 static int
   6899 wm_alloc_txrx_queues(struct wm_softc *sc)
   6900 {
   6901 	int i, error, tx_done, rx_done;
   6902 
   6903 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6904 	    KM_SLEEP);
   6905 	if (sc->sc_queue == NULL) {
   6906 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6907 		error = ENOMEM;
   6908 		goto fail_0;
   6909 	}
   6910 
   6911 	/* For transmission */
   6912 	error = 0;
   6913 	tx_done = 0;
   6914 	for (i = 0; i < sc->sc_nqueues; i++) {
   6915 #ifdef WM_EVENT_COUNTERS
   6916 		int j;
   6917 		const char *xname;
   6918 #endif
   6919 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6920 		txq->txq_sc = sc;
   6921 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6922 
   6923 		error = wm_alloc_tx_descs(sc, txq);
   6924 		if (error)
   6925 			break;
   6926 		error = wm_alloc_tx_buffer(sc, txq);
   6927 		if (error) {
   6928 			wm_free_tx_descs(sc, txq);
   6929 			break;
   6930 		}
   6931 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6932 		if (txq->txq_interq == NULL) {
   6933 			wm_free_tx_descs(sc, txq);
   6934 			wm_free_tx_buffer(sc, txq);
   6935 			error = ENOMEM;
   6936 			break;
   6937 		}
   6938 
   6939 #ifdef WM_EVENT_COUNTERS
   6940 		xname = device_xname(sc->sc_dev);
   6941 
   6942 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6943 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6944 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6945 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6946 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6947 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6948 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6949 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6950 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6951 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6952 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6953 
   6954 		for (j = 0; j < WM_NTXSEGS; j++) {
   6955 			snprintf(txq->txq_txseg_evcnt_names[j],
   6956 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6957 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6958 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6959 		}
   6960 
   6961 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6962 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6963 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6964 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6965 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6966 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   6967 #endif /* WM_EVENT_COUNTERS */
   6968 
   6969 		tx_done++;
   6970 	}
   6971 	if (error)
   6972 		goto fail_1;
   6973 
   6974 	/* For receive */
   6975 	error = 0;
   6976 	rx_done = 0;
   6977 	for (i = 0; i < sc->sc_nqueues; i++) {
   6978 #ifdef WM_EVENT_COUNTERS
   6979 		const char *xname;
   6980 #endif
   6981 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6982 		rxq->rxq_sc = sc;
   6983 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6984 
   6985 		error = wm_alloc_rx_descs(sc, rxq);
   6986 		if (error)
   6987 			break;
   6988 
   6989 		error = wm_alloc_rx_buffer(sc, rxq);
   6990 		if (error) {
   6991 			wm_free_rx_descs(sc, rxq);
   6992 			break;
   6993 		}
   6994 
   6995 #ifdef WM_EVENT_COUNTERS
   6996 		xname = device_xname(sc->sc_dev);
   6997 
   6998 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6999 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7000 
   7001 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7002 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7003 #endif /* WM_EVENT_COUNTERS */
   7004 
   7005 		rx_done++;
   7006 	}
   7007 	if (error)
   7008 		goto fail_2;
   7009 
   7010 	return 0;
   7011 
   7012  fail_2:
   7013 	for (i = 0; i < rx_done; i++) {
   7014 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7015 		wm_free_rx_buffer(sc, rxq);
   7016 		wm_free_rx_descs(sc, rxq);
   7017 		if (rxq->rxq_lock)
   7018 			mutex_obj_free(rxq->rxq_lock);
   7019 	}
   7020  fail_1:
   7021 	for (i = 0; i < tx_done; i++) {
   7022 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7023 		pcq_destroy(txq->txq_interq);
   7024 		wm_free_tx_buffer(sc, txq);
   7025 		wm_free_tx_descs(sc, txq);
   7026 		if (txq->txq_lock)
   7027 			mutex_obj_free(txq->txq_lock);
   7028 	}
   7029 
   7030 	kmem_free(sc->sc_queue,
   7031 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7032  fail_0:
   7033 	return error;
   7034 }
   7035 
   7036 /*
   7037  * wm_free_quques:
   7038  *	Free {tx,rx}descs and {tx,rx} buffers
   7039  */
   7040 static void
   7041 wm_free_txrx_queues(struct wm_softc *sc)
   7042 {
   7043 	int i;
   7044 
   7045 	for (i = 0; i < sc->sc_nqueues; i++) {
   7046 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7047 
   7048 #ifdef WM_EVENT_COUNTERS
   7049 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7050 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7051 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7052 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7053 #endif /* WM_EVENT_COUNTERS */
   7054 
   7055 		wm_free_rx_buffer(sc, rxq);
   7056 		wm_free_rx_descs(sc, rxq);
   7057 		if (rxq->rxq_lock)
   7058 			mutex_obj_free(rxq->rxq_lock);
   7059 	}
   7060 
   7061 	for (i = 0; i < sc->sc_nqueues; i++) {
   7062 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7063 		struct mbuf *m;
   7064 #ifdef WM_EVENT_COUNTERS
   7065 		int j;
   7066 
   7067 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7068 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7069 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7070 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7071 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7072 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7073 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7074 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7075 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7076 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7077 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7078 
   7079 		for (j = 0; j < WM_NTXSEGS; j++)
   7080 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7081 
   7082 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7083 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7084 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7085 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7086 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7087 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7088 #endif /* WM_EVENT_COUNTERS */
   7089 
   7090 		/* Drain txq_interq */
   7091 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7092 			m_freem(m);
   7093 		pcq_destroy(txq->txq_interq);
   7094 
   7095 		wm_free_tx_buffer(sc, txq);
   7096 		wm_free_tx_descs(sc, txq);
   7097 		if (txq->txq_lock)
   7098 			mutex_obj_free(txq->txq_lock);
   7099 	}
   7100 
   7101 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7102 }
   7103 
   7104 static void
   7105 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7106 {
   7107 
   7108 	KASSERT(mutex_owned(txq->txq_lock));
   7109 
   7110 	/* Initialize the transmit descriptor ring. */
   7111 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7112 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7113 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7114 	txq->txq_free = WM_NTXDESC(txq);
   7115 	txq->txq_next = 0;
   7116 }
   7117 
   7118 static void
   7119 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7120     struct wm_txqueue *txq)
   7121 {
   7122 
   7123 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7124 		device_xname(sc->sc_dev), __func__));
   7125 	KASSERT(mutex_owned(txq->txq_lock));
   7126 
   7127 	if (sc->sc_type < WM_T_82543) {
   7128 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7129 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7130 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7131 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7132 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7133 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7134 	} else {
   7135 		int qid = wmq->wmq_id;
   7136 
   7137 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7138 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7139 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7140 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7141 
   7142 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7143 			/*
   7144 			 * Don't write TDT before TCTL.EN is set.
   7145 			 * See the document.
   7146 			 */
   7147 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7148 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7149 			    | TXDCTL_WTHRESH(0));
   7150 		else {
   7151 			/* XXX should update with AIM? */
   7152 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7153 			if (sc->sc_type >= WM_T_82540) {
   7154 				/* Should be the same */
   7155 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7156 			}
   7157 
   7158 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7159 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7160 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7161 		}
   7162 	}
   7163 }
   7164 
   7165 static void
   7166 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7167 {
   7168 	int i;
   7169 
   7170 	KASSERT(mutex_owned(txq->txq_lock));
   7171 
   7172 	/* Initialize the transmit job descriptors. */
   7173 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7174 		txq->txq_soft[i].txs_mbuf = NULL;
   7175 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7176 	txq->txq_snext = 0;
   7177 	txq->txq_sdirty = 0;
   7178 }
   7179 
   7180 static void
   7181 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7182     struct wm_txqueue *txq)
   7183 {
   7184 
   7185 	KASSERT(mutex_owned(txq->txq_lock));
   7186 
   7187 	/*
   7188 	 * Set up some register offsets that are different between
   7189 	 * the i82542 and the i82543 and later chips.
   7190 	 */
   7191 	if (sc->sc_type < WM_T_82543)
   7192 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7193 	else
   7194 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7195 
   7196 	wm_init_tx_descs(sc, txq);
   7197 	wm_init_tx_regs(sc, wmq, txq);
   7198 	wm_init_tx_buffer(sc, txq);
   7199 
   7200 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7201 	txq->txq_sending = false;
   7202 }
   7203 
   7204 static void
   7205 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7206     struct wm_rxqueue *rxq)
   7207 {
   7208 
   7209 	KASSERT(mutex_owned(rxq->rxq_lock));
   7210 
   7211 	/*
   7212 	 * Initialize the receive descriptor and receive job
   7213 	 * descriptor rings.
   7214 	 */
   7215 	if (sc->sc_type < WM_T_82543) {
   7216 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7217 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7218 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7219 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7220 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7221 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7222 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7223 
   7224 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7225 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7226 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7227 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7228 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7229 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7230 	} else {
   7231 		int qid = wmq->wmq_id;
   7232 
   7233 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7234 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7235 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7236 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7237 
   7238 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7239 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7240 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7241 
   7242 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7243 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7244 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7245 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7246 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7247 			    | RXDCTL_WTHRESH(1));
   7248 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7249 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7250 		} else {
   7251 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7252 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7253 			/* XXX should update with AIM? */
   7254 			CSR_WRITE(sc, WMREG_RDTR,
   7255 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7256 			/* MUST be same */
   7257 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7258 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7259 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7260 		}
   7261 	}
   7262 }
   7263 
   7264 static int
   7265 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7266 {
   7267 	struct wm_rxsoft *rxs;
   7268 	int error, i;
   7269 
   7270 	KASSERT(mutex_owned(rxq->rxq_lock));
   7271 
   7272 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7273 		rxs = &rxq->rxq_soft[i];
   7274 		if (rxs->rxs_mbuf == NULL) {
   7275 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7276 				log(LOG_ERR, "%s: unable to allocate or map "
   7277 				    "rx buffer %d, error = %d\n",
   7278 				    device_xname(sc->sc_dev), i, error);
   7279 				/*
   7280 				 * XXX Should attempt to run with fewer receive
   7281 				 * XXX buffers instead of just failing.
   7282 				 */
   7283 				wm_rxdrain(rxq);
   7284 				return ENOMEM;
   7285 			}
   7286 		} else {
   7287 			/*
   7288 			 * For 82575 and 82576, the RX descriptors must be
   7289 			 * initialized after the setting of RCTL.EN in
   7290 			 * wm_set_filter()
   7291 			 */
   7292 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7293 				wm_init_rxdesc(rxq, i);
   7294 		}
   7295 	}
   7296 	rxq->rxq_ptr = 0;
   7297 	rxq->rxq_discard = 0;
   7298 	WM_RXCHAIN_RESET(rxq);
   7299 
   7300 	return 0;
   7301 }
   7302 
   7303 static int
   7304 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7305     struct wm_rxqueue *rxq)
   7306 {
   7307 
   7308 	KASSERT(mutex_owned(rxq->rxq_lock));
   7309 
   7310 	/*
   7311 	 * Set up some register offsets that are different between
   7312 	 * the i82542 and the i82543 and later chips.
   7313 	 */
   7314 	if (sc->sc_type < WM_T_82543)
   7315 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7316 	else
   7317 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7318 
   7319 	wm_init_rx_regs(sc, wmq, rxq);
   7320 	return wm_init_rx_buffer(sc, rxq);
   7321 }
   7322 
   7323 /*
   7324  * wm_init_quques:
   7325  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7326  */
   7327 static int
   7328 wm_init_txrx_queues(struct wm_softc *sc)
   7329 {
   7330 	int i, error = 0;
   7331 
   7332 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7333 		device_xname(sc->sc_dev), __func__));
   7334 
   7335 	for (i = 0; i < sc->sc_nqueues; i++) {
   7336 		struct wm_queue *wmq = &sc->sc_queue[i];
   7337 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7338 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7339 
   7340 		/*
   7341 		 * TODO
   7342 		 * Currently, use constant variable instead of AIM.
   7343 		 * Furthermore, the interrupt interval of multiqueue which use
   7344 		 * polling mode is less than default value.
   7345 		 * More tuning and AIM are required.
   7346 		 */
   7347 		if (wm_is_using_multiqueue(sc))
   7348 			wmq->wmq_itr = 50;
   7349 		else
   7350 			wmq->wmq_itr = sc->sc_itr_init;
   7351 		wmq->wmq_set_itr = true;
   7352 
   7353 		mutex_enter(txq->txq_lock);
   7354 		wm_init_tx_queue(sc, wmq, txq);
   7355 		mutex_exit(txq->txq_lock);
   7356 
   7357 		mutex_enter(rxq->rxq_lock);
   7358 		error = wm_init_rx_queue(sc, wmq, rxq);
   7359 		mutex_exit(rxq->rxq_lock);
   7360 		if (error)
   7361 			break;
   7362 	}
   7363 
   7364 	return error;
   7365 }
   7366 
   7367 /*
   7368  * wm_tx_offload:
   7369  *
   7370  *	Set up TCP/IP checksumming parameters for the
   7371  *	specified packet.
   7372  */
   7373 static void
   7374 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7375     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7376 {
   7377 	struct mbuf *m0 = txs->txs_mbuf;
   7378 	struct livengood_tcpip_ctxdesc *t;
   7379 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7380 	uint32_t ipcse;
   7381 	struct ether_header *eh;
   7382 	int offset, iphl;
   7383 	uint8_t fields;
   7384 
   7385 	/*
   7386 	 * XXX It would be nice if the mbuf pkthdr had offset
   7387 	 * fields for the protocol headers.
   7388 	 */
   7389 
   7390 	eh = mtod(m0, struct ether_header *);
   7391 	switch (htons(eh->ether_type)) {
   7392 	case ETHERTYPE_IP:
   7393 	case ETHERTYPE_IPV6:
   7394 		offset = ETHER_HDR_LEN;
   7395 		break;
   7396 
   7397 	case ETHERTYPE_VLAN:
   7398 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7399 		break;
   7400 
   7401 	default:
   7402 		/* Don't support this protocol or encapsulation. */
   7403  		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7404  		txq->txq_last_hw_ipcs = 0;
   7405  		txq->txq_last_hw_tucs = 0;
   7406 		*fieldsp = 0;
   7407 		*cmdp = 0;
   7408 		return;
   7409 	}
   7410 
   7411 	if ((m0->m_pkthdr.csum_flags &
   7412 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7413 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7414 	} else
   7415 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7416 
   7417 	ipcse = offset + iphl - 1;
   7418 
   7419 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7420 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7421 	seg = 0;
   7422 	fields = 0;
   7423 
   7424 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7425 		int hlen = offset + iphl;
   7426 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7427 
   7428 		if (__predict_false(m0->m_len <
   7429 				    (hlen + sizeof(struct tcphdr)))) {
   7430 			/*
   7431 			 * TCP/IP headers are not in the first mbuf; we need
   7432 			 * to do this the slow and painful way. Let's just
   7433 			 * hope this doesn't happen very often.
   7434 			 */
   7435 			struct tcphdr th;
   7436 
   7437 			WM_Q_EVCNT_INCR(txq, tsopain);
   7438 
   7439 			m_copydata(m0, hlen, sizeof(th), &th);
   7440 			if (v4) {
   7441 				struct ip ip;
   7442 
   7443 				m_copydata(m0, offset, sizeof(ip), &ip);
   7444 				ip.ip_len = 0;
   7445 				m_copyback(m0,
   7446 				    offset + offsetof(struct ip, ip_len),
   7447 				    sizeof(ip.ip_len), &ip.ip_len);
   7448 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7449 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7450 			} else {
   7451 				struct ip6_hdr ip6;
   7452 
   7453 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7454 				ip6.ip6_plen = 0;
   7455 				m_copyback(m0,
   7456 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7457 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7458 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7459 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7460 			}
   7461 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7462 			    sizeof(th.th_sum), &th.th_sum);
   7463 
   7464 			hlen += th.th_off << 2;
   7465 		} else {
   7466 			/*
   7467 			 * TCP/IP headers are in the first mbuf; we can do
   7468 			 * this the easy way.
   7469 			 */
   7470 			struct tcphdr *th;
   7471 
   7472 			if (v4) {
   7473 				struct ip *ip =
   7474 				    (void *)(mtod(m0, char *) + offset);
   7475 				th = (void *)(mtod(m0, char *) + hlen);
   7476 
   7477 				ip->ip_len = 0;
   7478 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7479 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7480 			} else {
   7481 				struct ip6_hdr *ip6 =
   7482 				    (void *)(mtod(m0, char *) + offset);
   7483 				th = (void *)(mtod(m0, char *) + hlen);
   7484 
   7485 				ip6->ip6_plen = 0;
   7486 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7487 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7488 			}
   7489 			hlen += th->th_off << 2;
   7490 		}
   7491 
   7492 		if (v4) {
   7493 			WM_Q_EVCNT_INCR(txq, tso);
   7494 			cmdlen |= WTX_TCPIP_CMD_IP;
   7495 		} else {
   7496 			WM_Q_EVCNT_INCR(txq, tso6);
   7497 			ipcse = 0;
   7498 		}
   7499 		cmd |= WTX_TCPIP_CMD_TSE;
   7500 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7501 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7502 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7503 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7504 	}
   7505 
   7506 	/*
   7507 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7508 	 * offload feature, if we load the context descriptor, we
   7509 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7510 	 */
   7511 
   7512 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7513 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7514 	    WTX_TCPIP_IPCSE(ipcse);
   7515 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7516 		WM_Q_EVCNT_INCR(txq, ipsum);
   7517 		fields |= WTX_IXSM;
   7518 	}
   7519 
   7520 	offset += iphl;
   7521 
   7522 	if (m0->m_pkthdr.csum_flags &
   7523 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7524 		WM_Q_EVCNT_INCR(txq, tusum);
   7525 		fields |= WTX_TXSM;
   7526 		tucs = WTX_TCPIP_TUCSS(offset) |
   7527 		    WTX_TCPIP_TUCSO(offset +
   7528 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7529 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7530 	} else if ((m0->m_pkthdr.csum_flags &
   7531 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7532 		WM_Q_EVCNT_INCR(txq, tusum6);
   7533 		fields |= WTX_TXSM;
   7534 		tucs = WTX_TCPIP_TUCSS(offset) |
   7535 		    WTX_TCPIP_TUCSO(offset +
   7536 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7537 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7538 	} else {
   7539 		/* Just initialize it to a valid TCP context. */
   7540 		tucs = WTX_TCPIP_TUCSS(offset) |
   7541 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7542 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7543 	}
   7544 
   7545 	*cmdp = cmd;
   7546 	*fieldsp = fields;
   7547 
   7548 	/*
   7549 	 * We don't have to write context descriptor for every packet
   7550 	 * except for 82574. For 82574, we must write context descriptor
   7551 	 * for every packet when we use two descriptor queues.
   7552 	 *
   7553 	 * The 82574L can only remember the *last* context used
   7554 	 * regardless of queue that it was use for.  We cannot reuse
   7555 	 * contexts on this hardware platform and must generate a new
   7556 	 * context every time.  82574L hardware spec, section 7.2.6,
   7557 	 * second note.
   7558 	 */
   7559 	if (sc->sc_nqueues < 2) {
   7560 		/*
   7561 	 	 *
   7562 	  	 * Setting up new checksum offload context for every
   7563 		 * frames takes a lot of processing time for hardware.
   7564 		 * This also reduces performance a lot for small sized
   7565 		 * frames so avoid it if driver can use previously
   7566 		 * configured checksum offload context.
   7567 		 * For TSO, in theory we can use the same TSO context only if
   7568 		 * frame is the same type(IP/TCP) and the same MSS. However
   7569 		 * checking whether a frame has the same IP/TCP structure is
   7570 		 * hard thing so just ignore that and always restablish a
   7571 		 * new TSO context.
   7572 	  	 */
   7573 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   7574 		    == 0) {
   7575 			if (txq->txq_last_hw_cmd == cmd &&
   7576 			    txq->txq_last_hw_fields == fields &&
   7577 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   7578 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   7579 				WM_Q_EVCNT_INCR(txq, skipcontext);
   7580 				return;
   7581 			}
   7582 		}
   7583 
   7584 	 	txq->txq_last_hw_cmd = cmd;
   7585  		txq->txq_last_hw_fields = fields;
   7586  		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   7587 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   7588 	}
   7589 
   7590 	/* Fill in the context descriptor. */
   7591 	t = (struct livengood_tcpip_ctxdesc *)
   7592 	    &txq->txq_descs[txq->txq_next];
   7593 	t->tcpip_ipcs = htole32(ipcs);
   7594 	t->tcpip_tucs = htole32(tucs);
   7595 	t->tcpip_cmdlen = htole32(cmdlen);
   7596 	t->tcpip_seg = htole32(seg);
   7597 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7598 
   7599 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7600 	txs->txs_ndesc++;
   7601 }
   7602 
   7603 static inline int
   7604 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7605 {
   7606 	struct wm_softc *sc = ifp->if_softc;
   7607 	u_int cpuid = cpu_index(curcpu());
   7608 
   7609 	/*
   7610 	 * Currently, simple distribute strategy.
   7611 	 * TODO:
   7612 	 * distribute by flowid(RSS has value).
   7613 	 */
   7614 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7615 }
   7616 
   7617 /*
   7618  * wm_start:		[ifnet interface function]
   7619  *
   7620  *	Start packet transmission on the interface.
   7621  */
   7622 static void
   7623 wm_start(struct ifnet *ifp)
   7624 {
   7625 	struct wm_softc *sc = ifp->if_softc;
   7626 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7627 
   7628 #ifdef WM_MPSAFE
   7629 	KASSERT(if_is_mpsafe(ifp));
   7630 #endif
   7631 	/*
   7632 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   7633 	 */
   7634 
   7635 	mutex_enter(txq->txq_lock);
   7636 	if (!txq->txq_stopping)
   7637 		wm_start_locked(ifp);
   7638 	mutex_exit(txq->txq_lock);
   7639 }
   7640 
   7641 static void
   7642 wm_start_locked(struct ifnet *ifp)
   7643 {
   7644 	struct wm_softc *sc = ifp->if_softc;
   7645 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7646 
   7647 	wm_send_common_locked(ifp, txq, false);
   7648 }
   7649 
   7650 static int
   7651 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7652 {
   7653 	int qid;
   7654 	struct wm_softc *sc = ifp->if_softc;
   7655 	struct wm_txqueue *txq;
   7656 
   7657 	qid = wm_select_txqueue(ifp, m);
   7658 	txq = &sc->sc_queue[qid].wmq_txq;
   7659 
   7660 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7661 		m_freem(m);
   7662 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7663 		return ENOBUFS;
   7664 	}
   7665 
   7666 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   7667 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   7668 	if (m->m_flags & M_MCAST)
   7669 		if_statinc_ref(nsr, if_omcasts);
   7670 	IF_STAT_PUTREF(ifp);
   7671 
   7672 	if (mutex_tryenter(txq->txq_lock)) {
   7673 		if (!txq->txq_stopping)
   7674 			wm_transmit_locked(ifp, txq);
   7675 		mutex_exit(txq->txq_lock);
   7676 	}
   7677 
   7678 	return 0;
   7679 }
   7680 
   7681 static void
   7682 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7683 {
   7684 
   7685 	wm_send_common_locked(ifp, txq, true);
   7686 }
   7687 
   7688 static void
   7689 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7690     bool is_transmit)
   7691 {
   7692 	struct wm_softc *sc = ifp->if_softc;
   7693 	struct mbuf *m0;
   7694 	struct wm_txsoft *txs;
   7695 	bus_dmamap_t dmamap;
   7696 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7697 	bus_addr_t curaddr;
   7698 	bus_size_t seglen, curlen;
   7699 	uint32_t cksumcmd;
   7700 	uint8_t cksumfields;
   7701 	bool remap = true;
   7702 
   7703 	KASSERT(mutex_owned(txq->txq_lock));
   7704 
   7705 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7706 		return;
   7707 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7708 		return;
   7709 
   7710 	/* Remember the previous number of free descriptors. */
   7711 	ofree = txq->txq_free;
   7712 
   7713 	/*
   7714 	 * Loop through the send queue, setting up transmit descriptors
   7715 	 * until we drain the queue, or use up all available transmit
   7716 	 * descriptors.
   7717 	 */
   7718 	for (;;) {
   7719 		m0 = NULL;
   7720 
   7721 		/* Get a work queue entry. */
   7722 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7723 			wm_txeof(txq, UINT_MAX);
   7724 			if (txq->txq_sfree == 0) {
   7725 				DPRINTF(WM_DEBUG_TX,
   7726 				    ("%s: TX: no free job descriptors\n",
   7727 					device_xname(sc->sc_dev)));
   7728 				WM_Q_EVCNT_INCR(txq, txsstall);
   7729 				break;
   7730 			}
   7731 		}
   7732 
   7733 		/* Grab a packet off the queue. */
   7734 		if (is_transmit)
   7735 			m0 = pcq_get(txq->txq_interq);
   7736 		else
   7737 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7738 		if (m0 == NULL)
   7739 			break;
   7740 
   7741 		DPRINTF(WM_DEBUG_TX,
   7742 		    ("%s: TX: have packet to transmit: %p\n",
   7743 			device_xname(sc->sc_dev), m0));
   7744 
   7745 		txs = &txq->txq_soft[txq->txq_snext];
   7746 		dmamap = txs->txs_dmamap;
   7747 
   7748 		use_tso = (m0->m_pkthdr.csum_flags &
   7749 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7750 
   7751 		/*
   7752 		 * So says the Linux driver:
   7753 		 * The controller does a simple calculation to make sure
   7754 		 * there is enough room in the FIFO before initiating the
   7755 		 * DMA for each buffer. The calc is:
   7756 		 *	4 = ceil(buffer len / MSS)
   7757 		 * To make sure we don't overrun the FIFO, adjust the max
   7758 		 * buffer len if the MSS drops.
   7759 		 */
   7760 		dmamap->dm_maxsegsz =
   7761 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7762 		    ? m0->m_pkthdr.segsz << 2
   7763 		    : WTX_MAX_LEN;
   7764 
   7765 		/*
   7766 		 * Load the DMA map.  If this fails, the packet either
   7767 		 * didn't fit in the allotted number of segments, or we
   7768 		 * were short on resources.  For the too-many-segments
   7769 		 * case, we simply report an error and drop the packet,
   7770 		 * since we can't sanely copy a jumbo packet to a single
   7771 		 * buffer.
   7772 		 */
   7773 retry:
   7774 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7775 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7776 		if (__predict_false(error)) {
   7777 			if (error == EFBIG) {
   7778 				if (remap == true) {
   7779 					struct mbuf *m;
   7780 
   7781 					remap = false;
   7782 					m = m_defrag(m0, M_NOWAIT);
   7783 					if (m != NULL) {
   7784 						WM_Q_EVCNT_INCR(txq, defrag);
   7785 						m0 = m;
   7786 						goto retry;
   7787 					}
   7788 				}
   7789 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7790 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7791 				    "DMA segments, dropping...\n",
   7792 				    device_xname(sc->sc_dev));
   7793 				wm_dump_mbuf_chain(sc, m0);
   7794 				m_freem(m0);
   7795 				continue;
   7796 			}
   7797 			/* Short on resources, just stop for now. */
   7798 			DPRINTF(WM_DEBUG_TX,
   7799 			    ("%s: TX: dmamap load failed: %d\n",
   7800 				device_xname(sc->sc_dev), error));
   7801 			break;
   7802 		}
   7803 
   7804 		segs_needed = dmamap->dm_nsegs;
   7805 		if (use_tso) {
   7806 			/* For sentinel descriptor; see below. */
   7807 			segs_needed++;
   7808 		}
   7809 
   7810 		/*
   7811 		 * Ensure we have enough descriptors free to describe
   7812 		 * the packet. Note, we always reserve one descriptor
   7813 		 * at the end of the ring due to the semantics of the
   7814 		 * TDT register, plus one more in the event we need
   7815 		 * to load offload context.
   7816 		 */
   7817 		if (segs_needed > txq->txq_free - 2) {
   7818 			/*
   7819 			 * Not enough free descriptors to transmit this
   7820 			 * packet.  We haven't committed anything yet,
   7821 			 * so just unload the DMA map, put the packet
   7822 			 * pack on the queue, and punt. Notify the upper
   7823 			 * layer that there are no more slots left.
   7824 			 */
   7825 			DPRINTF(WM_DEBUG_TX,
   7826 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7827 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7828 				segs_needed, txq->txq_free - 1));
   7829 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7830 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7831 			WM_Q_EVCNT_INCR(txq, txdstall);
   7832 			break;
   7833 		}
   7834 
   7835 		/*
   7836 		 * Check for 82547 Tx FIFO bug. We need to do this
   7837 		 * once we know we can transmit the packet, since we
   7838 		 * do some internal FIFO space accounting here.
   7839 		 */
   7840 		if (sc->sc_type == WM_T_82547 &&
   7841 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7842 			DPRINTF(WM_DEBUG_TX,
   7843 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7844 				device_xname(sc->sc_dev)));
   7845 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7846 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7847 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7848 			break;
   7849 		}
   7850 
   7851 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7852 
   7853 		DPRINTF(WM_DEBUG_TX,
   7854 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7855 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7856 
   7857 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7858 
   7859 		/*
   7860 		 * Store a pointer to the packet so that we can free it
   7861 		 * later.
   7862 		 *
   7863 		 * Initially, we consider the number of descriptors the
   7864 		 * packet uses the number of DMA segments.  This may be
   7865 		 * incremented by 1 if we do checksum offload (a descriptor
   7866 		 * is used to set the checksum context).
   7867 		 */
   7868 		txs->txs_mbuf = m0;
   7869 		txs->txs_firstdesc = txq->txq_next;
   7870 		txs->txs_ndesc = segs_needed;
   7871 
   7872 		/* Set up offload parameters for this packet. */
   7873 		if (m0->m_pkthdr.csum_flags &
   7874 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7875 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7876 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7877 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   7878 		} else {
   7879  			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7880  			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   7881 			cksumcmd = 0;
   7882 			cksumfields = 0;
   7883 		}
   7884 
   7885 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7886 
   7887 		/* Sync the DMA map. */
   7888 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7889 		    BUS_DMASYNC_PREWRITE);
   7890 
   7891 		/* Initialize the transmit descriptor. */
   7892 		for (nexttx = txq->txq_next, seg = 0;
   7893 		     seg < dmamap->dm_nsegs; seg++) {
   7894 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7895 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7896 			     seglen != 0;
   7897 			     curaddr += curlen, seglen -= curlen,
   7898 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7899 				curlen = seglen;
   7900 
   7901 				/*
   7902 				 * So says the Linux driver:
   7903 				 * Work around for premature descriptor
   7904 				 * write-backs in TSO mode.  Append a
   7905 				 * 4-byte sentinel descriptor.
   7906 				 */
   7907 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7908 				    curlen > 8)
   7909 					curlen -= 4;
   7910 
   7911 				wm_set_dma_addr(
   7912 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7913 				txq->txq_descs[nexttx].wtx_cmdlen
   7914 				    = htole32(cksumcmd | curlen);
   7915 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7916 				    = 0;
   7917 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7918 				    = cksumfields;
   7919 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7920 				lasttx = nexttx;
   7921 
   7922 				DPRINTF(WM_DEBUG_TX,
   7923 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7924 					"len %#04zx\n",
   7925 					device_xname(sc->sc_dev), nexttx,
   7926 					(uint64_t)curaddr, curlen));
   7927 			}
   7928 		}
   7929 
   7930 		KASSERT(lasttx != -1);
   7931 
   7932 		/*
   7933 		 * Set up the command byte on the last descriptor of
   7934 		 * the packet. If we're in the interrupt delay window,
   7935 		 * delay the interrupt.
   7936 		 */
   7937 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7938 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7939 
   7940 		/*
   7941 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7942 		 * up the descriptor to encapsulate the packet for us.
   7943 		 *
   7944 		 * This is only valid on the last descriptor of the packet.
   7945 		 */
   7946 		if (vlan_has_tag(m0)) {
   7947 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7948 			    htole32(WTX_CMD_VLE);
   7949 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7950 			    = htole16(vlan_get_tag(m0));
   7951 		}
   7952 
   7953 		txs->txs_lastdesc = lasttx;
   7954 
   7955 		DPRINTF(WM_DEBUG_TX,
   7956 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7957 			device_xname(sc->sc_dev),
   7958 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7959 
   7960 		/* Sync the descriptors we're using. */
   7961 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7962 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7963 
   7964 		/* Give the packet to the chip. */
   7965 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7966 
   7967 		DPRINTF(WM_DEBUG_TX,
   7968 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7969 
   7970 		DPRINTF(WM_DEBUG_TX,
   7971 		    ("%s: TX: finished transmitting packet, job %d\n",
   7972 			device_xname(sc->sc_dev), txq->txq_snext));
   7973 
   7974 		/* Advance the tx pointer. */
   7975 		txq->txq_free -= txs->txs_ndesc;
   7976 		txq->txq_next = nexttx;
   7977 
   7978 		txq->txq_sfree--;
   7979 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7980 
   7981 		/* Pass the packet to any BPF listeners. */
   7982 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7983 	}
   7984 
   7985 	if (m0 != NULL) {
   7986 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7987 		WM_Q_EVCNT_INCR(txq, descdrop);
   7988 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7989 			__func__));
   7990 		m_freem(m0);
   7991 	}
   7992 
   7993 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7994 		/* No more slots; notify upper layer. */
   7995 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7996 	}
   7997 
   7998 	if (txq->txq_free != ofree) {
   7999 		/* Set a watchdog timer in case the chip flakes out. */
   8000 		txq->txq_lastsent = time_uptime;
   8001 		txq->txq_sending = true;
   8002 	}
   8003 }
   8004 
   8005 /*
   8006  * wm_nq_tx_offload:
   8007  *
   8008  *	Set up TCP/IP checksumming parameters for the
   8009  *	specified packet, for NEWQUEUE devices
   8010  */
   8011 static void
   8012 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8013     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8014 {
   8015 	struct mbuf *m0 = txs->txs_mbuf;
   8016 	uint32_t vl_len, mssidx, cmdc;
   8017 	struct ether_header *eh;
   8018 	int offset, iphl;
   8019 
   8020 	/*
   8021 	 * XXX It would be nice if the mbuf pkthdr had offset
   8022 	 * fields for the protocol headers.
   8023 	 */
   8024 	*cmdlenp = 0;
   8025 	*fieldsp = 0;
   8026 
   8027 	eh = mtod(m0, struct ether_header *);
   8028 	switch (htons(eh->ether_type)) {
   8029 	case ETHERTYPE_IP:
   8030 	case ETHERTYPE_IPV6:
   8031 		offset = ETHER_HDR_LEN;
   8032 		break;
   8033 
   8034 	case ETHERTYPE_VLAN:
   8035 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8036 		break;
   8037 
   8038 	default:
   8039 		/* Don't support this protocol or encapsulation. */
   8040 		*do_csum = false;
   8041 		return;
   8042 	}
   8043 	*do_csum = true;
   8044 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8045 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8046 
   8047 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8048 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8049 
   8050 	if ((m0->m_pkthdr.csum_flags &
   8051 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8052 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8053 	} else {
   8054 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8055 	}
   8056 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8057 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8058 
   8059 	if (vlan_has_tag(m0)) {
   8060 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8061 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8062 		*cmdlenp |= NQTX_CMD_VLE;
   8063 	}
   8064 
   8065 	mssidx = 0;
   8066 
   8067 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8068 		int hlen = offset + iphl;
   8069 		int tcp_hlen;
   8070 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8071 
   8072 		if (__predict_false(m0->m_len <
   8073 				    (hlen + sizeof(struct tcphdr)))) {
   8074 			/*
   8075 			 * TCP/IP headers are not in the first mbuf; we need
   8076 			 * to do this the slow and painful way. Let's just
   8077 			 * hope this doesn't happen very often.
   8078 			 */
   8079 			struct tcphdr th;
   8080 
   8081 			WM_Q_EVCNT_INCR(txq, tsopain);
   8082 
   8083 			m_copydata(m0, hlen, sizeof(th), &th);
   8084 			if (v4) {
   8085 				struct ip ip;
   8086 
   8087 				m_copydata(m0, offset, sizeof(ip), &ip);
   8088 				ip.ip_len = 0;
   8089 				m_copyback(m0,
   8090 				    offset + offsetof(struct ip, ip_len),
   8091 				    sizeof(ip.ip_len), &ip.ip_len);
   8092 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8093 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8094 			} else {
   8095 				struct ip6_hdr ip6;
   8096 
   8097 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8098 				ip6.ip6_plen = 0;
   8099 				m_copyback(m0,
   8100 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8101 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8102 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8103 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8104 			}
   8105 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8106 			    sizeof(th.th_sum), &th.th_sum);
   8107 
   8108 			tcp_hlen = th.th_off << 2;
   8109 		} else {
   8110 			/*
   8111 			 * TCP/IP headers are in the first mbuf; we can do
   8112 			 * this the easy way.
   8113 			 */
   8114 			struct tcphdr *th;
   8115 
   8116 			if (v4) {
   8117 				struct ip *ip =
   8118 				    (void *)(mtod(m0, char *) + offset);
   8119 				th = (void *)(mtod(m0, char *) + hlen);
   8120 
   8121 				ip->ip_len = 0;
   8122 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8123 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8124 			} else {
   8125 				struct ip6_hdr *ip6 =
   8126 				    (void *)(mtod(m0, char *) + offset);
   8127 				th = (void *)(mtod(m0, char *) + hlen);
   8128 
   8129 				ip6->ip6_plen = 0;
   8130 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8131 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8132 			}
   8133 			tcp_hlen = th->th_off << 2;
   8134 		}
   8135 		hlen += tcp_hlen;
   8136 		*cmdlenp |= NQTX_CMD_TSE;
   8137 
   8138 		if (v4) {
   8139 			WM_Q_EVCNT_INCR(txq, tso);
   8140 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8141 		} else {
   8142 			WM_Q_EVCNT_INCR(txq, tso6);
   8143 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8144 		}
   8145 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8146 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8147 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8148 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8149 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8150 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8151 	} else {
   8152 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8153 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8154 	}
   8155 
   8156 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8157 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8158 		cmdc |= NQTXC_CMD_IP4;
   8159 	}
   8160 
   8161 	if (m0->m_pkthdr.csum_flags &
   8162 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8163 		WM_Q_EVCNT_INCR(txq, tusum);
   8164 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8165 			cmdc |= NQTXC_CMD_TCP;
   8166 		else
   8167 			cmdc |= NQTXC_CMD_UDP;
   8168 
   8169 		cmdc |= NQTXC_CMD_IP4;
   8170 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8171 	}
   8172 	if (m0->m_pkthdr.csum_flags &
   8173 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8174 		WM_Q_EVCNT_INCR(txq, tusum6);
   8175 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8176 			cmdc |= NQTXC_CMD_TCP;
   8177 		else
   8178 			cmdc |= NQTXC_CMD_UDP;
   8179 
   8180 		cmdc |= NQTXC_CMD_IP6;
   8181 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8182 	}
   8183 
   8184 	/*
   8185 	 * We don't have to write context descriptor for every packet to
   8186 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8187 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8188 	 * controllers.
   8189 	 * It would be overhead to write context descriptor for every packet,
   8190 	 * however it does not cause problems.
   8191 	 */
   8192 	/* Fill in the context descriptor. */
   8193 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8194 	    htole32(vl_len);
   8195 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8196 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8197 	    htole32(cmdc);
   8198 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8199 	    htole32(mssidx);
   8200 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8201 	DPRINTF(WM_DEBUG_TX,
   8202 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8203 		txq->txq_next, 0, vl_len));
   8204 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8205 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8206 	txs->txs_ndesc++;
   8207 }
   8208 
   8209 /*
   8210  * wm_nq_start:		[ifnet interface function]
   8211  *
   8212  *	Start packet transmission on the interface for NEWQUEUE devices
   8213  */
   8214 static void
   8215 wm_nq_start(struct ifnet *ifp)
   8216 {
   8217 	struct wm_softc *sc = ifp->if_softc;
   8218 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8219 
   8220 #ifdef WM_MPSAFE
   8221 	KASSERT(if_is_mpsafe(ifp));
   8222 #endif
   8223 	/*
   8224 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8225 	 */
   8226 
   8227 	mutex_enter(txq->txq_lock);
   8228 	if (!txq->txq_stopping)
   8229 		wm_nq_start_locked(ifp);
   8230 	mutex_exit(txq->txq_lock);
   8231 }
   8232 
   8233 static void
   8234 wm_nq_start_locked(struct ifnet *ifp)
   8235 {
   8236 	struct wm_softc *sc = ifp->if_softc;
   8237 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8238 
   8239 	wm_nq_send_common_locked(ifp, txq, false);
   8240 }
   8241 
   8242 static int
   8243 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8244 {
   8245 	int qid;
   8246 	struct wm_softc *sc = ifp->if_softc;
   8247 	struct wm_txqueue *txq;
   8248 
   8249 	qid = wm_select_txqueue(ifp, m);
   8250 	txq = &sc->sc_queue[qid].wmq_txq;
   8251 
   8252 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8253 		m_freem(m);
   8254 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8255 		return ENOBUFS;
   8256 	}
   8257 
   8258 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8259 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8260 	if (m->m_flags & M_MCAST)
   8261 		if_statinc_ref(nsr, if_omcasts);
   8262 	IF_STAT_PUTREF(ifp);
   8263 
   8264 	/*
   8265 	 * The situations which this mutex_tryenter() fails at running time
   8266 	 * are below two patterns.
   8267 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8268 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8269 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8270 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8271 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8272 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8273 	 * stuck, either.
   8274 	 */
   8275 	if (mutex_tryenter(txq->txq_lock)) {
   8276 		if (!txq->txq_stopping)
   8277 			wm_nq_transmit_locked(ifp, txq);
   8278 		mutex_exit(txq->txq_lock);
   8279 	}
   8280 
   8281 	return 0;
   8282 }
   8283 
   8284 static void
   8285 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8286 {
   8287 
   8288 	wm_nq_send_common_locked(ifp, txq, true);
   8289 }
   8290 
   8291 static void
   8292 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8293     bool is_transmit)
   8294 {
   8295 	struct wm_softc *sc = ifp->if_softc;
   8296 	struct mbuf *m0;
   8297 	struct wm_txsoft *txs;
   8298 	bus_dmamap_t dmamap;
   8299 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8300 	bool do_csum, sent;
   8301 	bool remap = true;
   8302 
   8303 	KASSERT(mutex_owned(txq->txq_lock));
   8304 
   8305 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8306 		return;
   8307 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8308 		return;
   8309 
   8310 	sent = false;
   8311 
   8312 	/*
   8313 	 * Loop through the send queue, setting up transmit descriptors
   8314 	 * until we drain the queue, or use up all available transmit
   8315 	 * descriptors.
   8316 	 */
   8317 	for (;;) {
   8318 		m0 = NULL;
   8319 
   8320 		/* Get a work queue entry. */
   8321 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8322 			wm_txeof(txq, UINT_MAX);
   8323 			if (txq->txq_sfree == 0) {
   8324 				DPRINTF(WM_DEBUG_TX,
   8325 				    ("%s: TX: no free job descriptors\n",
   8326 					device_xname(sc->sc_dev)));
   8327 				WM_Q_EVCNT_INCR(txq, txsstall);
   8328 				break;
   8329 			}
   8330 		}
   8331 
   8332 		/* Grab a packet off the queue. */
   8333 		if (is_transmit)
   8334 			m0 = pcq_get(txq->txq_interq);
   8335 		else
   8336 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8337 		if (m0 == NULL)
   8338 			break;
   8339 
   8340 		DPRINTF(WM_DEBUG_TX,
   8341 		    ("%s: TX: have packet to transmit: %p\n",
   8342 		    device_xname(sc->sc_dev), m0));
   8343 
   8344 		txs = &txq->txq_soft[txq->txq_snext];
   8345 		dmamap = txs->txs_dmamap;
   8346 
   8347 		/*
   8348 		 * Load the DMA map.  If this fails, the packet either
   8349 		 * didn't fit in the allotted number of segments, or we
   8350 		 * were short on resources.  For the too-many-segments
   8351 		 * case, we simply report an error and drop the packet,
   8352 		 * since we can't sanely copy a jumbo packet to a single
   8353 		 * buffer.
   8354 		 */
   8355 retry:
   8356 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8357 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8358 		if (__predict_false(error)) {
   8359 			if (error == EFBIG) {
   8360 				if (remap == true) {
   8361 					struct mbuf *m;
   8362 
   8363 					remap = false;
   8364 					m = m_defrag(m0, M_NOWAIT);
   8365 					if (m != NULL) {
   8366 						WM_Q_EVCNT_INCR(txq, defrag);
   8367 						m0 = m;
   8368 						goto retry;
   8369 					}
   8370 				}
   8371 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8372 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8373 				    "DMA segments, dropping...\n",
   8374 				    device_xname(sc->sc_dev));
   8375 				wm_dump_mbuf_chain(sc, m0);
   8376 				m_freem(m0);
   8377 				continue;
   8378 			}
   8379 			/* Short on resources, just stop for now. */
   8380 			DPRINTF(WM_DEBUG_TX,
   8381 			    ("%s: TX: dmamap load failed: %d\n",
   8382 				device_xname(sc->sc_dev), error));
   8383 			break;
   8384 		}
   8385 
   8386 		segs_needed = dmamap->dm_nsegs;
   8387 
   8388 		/*
   8389 		 * Ensure we have enough descriptors free to describe
   8390 		 * the packet. Note, we always reserve one descriptor
   8391 		 * at the end of the ring due to the semantics of the
   8392 		 * TDT register, plus one more in the event we need
   8393 		 * to load offload context.
   8394 		 */
   8395 		if (segs_needed > txq->txq_free - 2) {
   8396 			/*
   8397 			 * Not enough free descriptors to transmit this
   8398 			 * packet.  We haven't committed anything yet,
   8399 			 * so just unload the DMA map, put the packet
   8400 			 * pack on the queue, and punt. Notify the upper
   8401 			 * layer that there are no more slots left.
   8402 			 */
   8403 			DPRINTF(WM_DEBUG_TX,
   8404 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8405 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8406 				segs_needed, txq->txq_free - 1));
   8407 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8408 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8409 			WM_Q_EVCNT_INCR(txq, txdstall);
   8410 			break;
   8411 		}
   8412 
   8413 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8414 
   8415 		DPRINTF(WM_DEBUG_TX,
   8416 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8417 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8418 
   8419 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8420 
   8421 		/*
   8422 		 * Store a pointer to the packet so that we can free it
   8423 		 * later.
   8424 		 *
   8425 		 * Initially, we consider the number of descriptors the
   8426 		 * packet uses the number of DMA segments.  This may be
   8427 		 * incremented by 1 if we do checksum offload (a descriptor
   8428 		 * is used to set the checksum context).
   8429 		 */
   8430 		txs->txs_mbuf = m0;
   8431 		txs->txs_firstdesc = txq->txq_next;
   8432 		txs->txs_ndesc = segs_needed;
   8433 
   8434 		/* Set up offload parameters for this packet. */
   8435 		uint32_t cmdlen, fields, dcmdlen;
   8436 		if (m0->m_pkthdr.csum_flags &
   8437 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8438 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8439 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8440 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8441 			    &do_csum);
   8442 		} else {
   8443 			do_csum = false;
   8444 			cmdlen = 0;
   8445 			fields = 0;
   8446 		}
   8447 
   8448 		/* Sync the DMA map. */
   8449 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8450 		    BUS_DMASYNC_PREWRITE);
   8451 
   8452 		/* Initialize the first transmit descriptor. */
   8453 		nexttx = txq->txq_next;
   8454 		if (!do_csum) {
   8455 			/* Setup a legacy descriptor */
   8456 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8457 			    dmamap->dm_segs[0].ds_addr);
   8458 			txq->txq_descs[nexttx].wtx_cmdlen =
   8459 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8460 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8461 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8462 			if (vlan_has_tag(m0)) {
   8463 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8464 				    htole32(WTX_CMD_VLE);
   8465 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8466 				    htole16(vlan_get_tag(m0));
   8467 			} else
   8468 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8469 
   8470 			dcmdlen = 0;
   8471 		} else {
   8472 			/* Setup an advanced data descriptor */
   8473 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8474 			    htole64(dmamap->dm_segs[0].ds_addr);
   8475 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8476 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8477 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   8478 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8479 			    htole32(fields);
   8480 			DPRINTF(WM_DEBUG_TX,
   8481 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8482 				device_xname(sc->sc_dev), nexttx,
   8483 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8484 			DPRINTF(WM_DEBUG_TX,
   8485 			    ("\t 0x%08x%08x\n", fields,
   8486 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8487 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8488 		}
   8489 
   8490 		lasttx = nexttx;
   8491 		nexttx = WM_NEXTTX(txq, nexttx);
   8492 		/*
   8493 		 * Fill in the next descriptors. legacy or advanced format
   8494 		 * is the same here
   8495 		 */
   8496 		for (seg = 1; seg < dmamap->dm_nsegs;
   8497 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8498 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8499 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8500 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8501 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8502 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8503 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8504 			lasttx = nexttx;
   8505 
   8506 			DPRINTF(WM_DEBUG_TX,
   8507 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8508 				device_xname(sc->sc_dev), nexttx,
   8509 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8510 				dmamap->dm_segs[seg].ds_len));
   8511 		}
   8512 
   8513 		KASSERT(lasttx != -1);
   8514 
   8515 		/*
   8516 		 * Set up the command byte on the last descriptor of
   8517 		 * the packet. If we're in the interrupt delay window,
   8518 		 * delay the interrupt.
   8519 		 */
   8520 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8521 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8522 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8523 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8524 
   8525 		txs->txs_lastdesc = lasttx;
   8526 
   8527 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8528 		    device_xname(sc->sc_dev),
   8529 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8530 
   8531 		/* Sync the descriptors we're using. */
   8532 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8533 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8534 
   8535 		/* Give the packet to the chip. */
   8536 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8537 		sent = true;
   8538 
   8539 		DPRINTF(WM_DEBUG_TX,
   8540 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8541 
   8542 		DPRINTF(WM_DEBUG_TX,
   8543 		    ("%s: TX: finished transmitting packet, job %d\n",
   8544 			device_xname(sc->sc_dev), txq->txq_snext));
   8545 
   8546 		/* Advance the tx pointer. */
   8547 		txq->txq_free -= txs->txs_ndesc;
   8548 		txq->txq_next = nexttx;
   8549 
   8550 		txq->txq_sfree--;
   8551 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8552 
   8553 		/* Pass the packet to any BPF listeners. */
   8554 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8555 	}
   8556 
   8557 	if (m0 != NULL) {
   8558 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8559 		WM_Q_EVCNT_INCR(txq, descdrop);
   8560 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8561 			__func__));
   8562 		m_freem(m0);
   8563 	}
   8564 
   8565 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8566 		/* No more slots; notify upper layer. */
   8567 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8568 	}
   8569 
   8570 	if (sent) {
   8571 		/* Set a watchdog timer in case the chip flakes out. */
   8572 		txq->txq_lastsent = time_uptime;
   8573 		txq->txq_sending = true;
   8574 	}
   8575 }
   8576 
   8577 static void
   8578 wm_deferred_start_locked(struct wm_txqueue *txq)
   8579 {
   8580 	struct wm_softc *sc = txq->txq_sc;
   8581 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8582 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8583 	int qid = wmq->wmq_id;
   8584 
   8585 	KASSERT(mutex_owned(txq->txq_lock));
   8586 
   8587 	if (txq->txq_stopping) {
   8588 		mutex_exit(txq->txq_lock);
   8589 		return;
   8590 	}
   8591 
   8592 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8593 		/* XXX need for ALTQ or one CPU system */
   8594 		if (qid == 0)
   8595 			wm_nq_start_locked(ifp);
   8596 		wm_nq_transmit_locked(ifp, txq);
   8597 	} else {
   8598 		/* XXX need for ALTQ or one CPU system */
   8599 		if (qid == 0)
   8600 			wm_start_locked(ifp);
   8601 		wm_transmit_locked(ifp, txq);
   8602 	}
   8603 }
   8604 
   8605 /* Interrupt */
   8606 
   8607 /*
   8608  * wm_txeof:
   8609  *
   8610  *	Helper; handle transmit interrupts.
   8611  */
   8612 static bool
   8613 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8614 {
   8615 	struct wm_softc *sc = txq->txq_sc;
   8616 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8617 	struct wm_txsoft *txs;
   8618 	int count = 0;
   8619 	int i;
   8620 	uint8_t status;
   8621 	bool more = false;
   8622 
   8623 	KASSERT(mutex_owned(txq->txq_lock));
   8624 
   8625 	if (txq->txq_stopping)
   8626 		return false;
   8627 
   8628 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8629 
   8630 	/*
   8631 	 * Go through the Tx list and free mbufs for those
   8632 	 * frames which have been transmitted.
   8633 	 */
   8634 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8635 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8636 		if (limit-- == 0) {
   8637 			more = true;
   8638 			DPRINTF(WM_DEBUG_TX,
   8639 			    ("%s: TX: loop limited, job %d is not processed\n",
   8640 				device_xname(sc->sc_dev), i));
   8641 			break;
   8642 		}
   8643 
   8644 		txs = &txq->txq_soft[i];
   8645 
   8646 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8647 			device_xname(sc->sc_dev), i));
   8648 
   8649 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8650 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8651 
   8652 		status =
   8653 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8654 		if ((status & WTX_ST_DD) == 0) {
   8655 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8656 			    BUS_DMASYNC_PREREAD);
   8657 			break;
   8658 		}
   8659 
   8660 		count++;
   8661 		DPRINTF(WM_DEBUG_TX,
   8662 		    ("%s: TX: job %d done: descs %d..%d\n",
   8663 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8664 		    txs->txs_lastdesc));
   8665 
   8666 		/*
   8667 		 * XXX We should probably be using the statistics
   8668 		 * XXX registers, but I don't know if they exist
   8669 		 * XXX on chips before the i82544.
   8670 		 */
   8671 
   8672 #ifdef WM_EVENT_COUNTERS
   8673 		if (status & WTX_ST_TU)
   8674 			WM_Q_EVCNT_INCR(txq, underrun);
   8675 #endif /* WM_EVENT_COUNTERS */
   8676 
   8677 		/*
   8678 		 * 82574 and newer's document says the status field has neither
   8679 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8680 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8681 		 * Developer's Manual", 82574 datasheet and newer.
   8682 		 *
   8683 		 * XXX I saw the LC bit was set on I218 even though the media
   8684 		 * was full duplex, so the bit might be used for other
   8685 		 * meaning ...(I have no document).
   8686 		 */
   8687 
   8688 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8689 		    && ((sc->sc_type < WM_T_82574)
   8690 			|| (sc->sc_type == WM_T_80003))) {
   8691 			if_statinc(ifp, if_oerrors);
   8692 			if (status & WTX_ST_LC)
   8693 				log(LOG_WARNING, "%s: late collision\n",
   8694 				    device_xname(sc->sc_dev));
   8695 			else if (status & WTX_ST_EC) {
   8696 				if_statadd(ifp, if_collisions,
   8697 				    TX_COLLISION_THRESHOLD + 1);
   8698 				log(LOG_WARNING, "%s: excessive collisions\n",
   8699 				    device_xname(sc->sc_dev));
   8700 			}
   8701 		} else
   8702 			if_statinc(ifp, if_opackets);
   8703 
   8704 		txq->txq_packets++;
   8705 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8706 
   8707 		txq->txq_free += txs->txs_ndesc;
   8708 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8709 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8710 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8711 		m_freem(txs->txs_mbuf);
   8712 		txs->txs_mbuf = NULL;
   8713 	}
   8714 
   8715 	/* Update the dirty transmit buffer pointer. */
   8716 	txq->txq_sdirty = i;
   8717 	DPRINTF(WM_DEBUG_TX,
   8718 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8719 
   8720 	if (count != 0)
   8721 		rnd_add_uint32(&sc->rnd_source, count);
   8722 
   8723 	/*
   8724 	 * If there are no more pending transmissions, cancel the watchdog
   8725 	 * timer.
   8726 	 */
   8727 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8728 		txq->txq_sending = false;
   8729 
   8730 	return more;
   8731 }
   8732 
   8733 static inline uint32_t
   8734 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8735 {
   8736 	struct wm_softc *sc = rxq->rxq_sc;
   8737 
   8738 	if (sc->sc_type == WM_T_82574)
   8739 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8740 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8741 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8742 	else
   8743 		return rxq->rxq_descs[idx].wrx_status;
   8744 }
   8745 
   8746 static inline uint32_t
   8747 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8748 {
   8749 	struct wm_softc *sc = rxq->rxq_sc;
   8750 
   8751 	if (sc->sc_type == WM_T_82574)
   8752 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8753 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8754 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8755 	else
   8756 		return rxq->rxq_descs[idx].wrx_errors;
   8757 }
   8758 
   8759 static inline uint16_t
   8760 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8761 {
   8762 	struct wm_softc *sc = rxq->rxq_sc;
   8763 
   8764 	if (sc->sc_type == WM_T_82574)
   8765 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8766 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8767 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8768 	else
   8769 		return rxq->rxq_descs[idx].wrx_special;
   8770 }
   8771 
   8772 static inline int
   8773 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8774 {
   8775 	struct wm_softc *sc = rxq->rxq_sc;
   8776 
   8777 	if (sc->sc_type == WM_T_82574)
   8778 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8779 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8780 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8781 	else
   8782 		return rxq->rxq_descs[idx].wrx_len;
   8783 }
   8784 
   8785 #ifdef WM_DEBUG
   8786 static inline uint32_t
   8787 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8788 {
   8789 	struct wm_softc *sc = rxq->rxq_sc;
   8790 
   8791 	if (sc->sc_type == WM_T_82574)
   8792 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8793 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8794 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8795 	else
   8796 		return 0;
   8797 }
   8798 
   8799 static inline uint8_t
   8800 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8801 {
   8802 	struct wm_softc *sc = rxq->rxq_sc;
   8803 
   8804 	if (sc->sc_type == WM_T_82574)
   8805 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8806 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8807 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8808 	else
   8809 		return 0;
   8810 }
   8811 #endif /* WM_DEBUG */
   8812 
   8813 static inline bool
   8814 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8815     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8816 {
   8817 
   8818 	if (sc->sc_type == WM_T_82574)
   8819 		return (status & ext_bit) != 0;
   8820 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8821 		return (status & nq_bit) != 0;
   8822 	else
   8823 		return (status & legacy_bit) != 0;
   8824 }
   8825 
   8826 static inline bool
   8827 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8828     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8829 {
   8830 
   8831 	if (sc->sc_type == WM_T_82574)
   8832 		return (error & ext_bit) != 0;
   8833 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8834 		return (error & nq_bit) != 0;
   8835 	else
   8836 		return (error & legacy_bit) != 0;
   8837 }
   8838 
   8839 static inline bool
   8840 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8841 {
   8842 
   8843 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8844 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8845 		return true;
   8846 	else
   8847 		return false;
   8848 }
   8849 
   8850 static inline bool
   8851 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8852 {
   8853 	struct wm_softc *sc = rxq->rxq_sc;
   8854 
   8855 	/* XXX missing error bit for newqueue? */
   8856 	if (wm_rxdesc_is_set_error(sc, errors,
   8857 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8858 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8859 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8860 		NQRXC_ERROR_RXE)) {
   8861 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8862 		    EXTRXC_ERROR_SE, 0))
   8863 			log(LOG_WARNING, "%s: symbol error\n",
   8864 			    device_xname(sc->sc_dev));
   8865 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8866 		    EXTRXC_ERROR_SEQ, 0))
   8867 			log(LOG_WARNING, "%s: receive sequence error\n",
   8868 			    device_xname(sc->sc_dev));
   8869 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8870 		    EXTRXC_ERROR_CE, 0))
   8871 			log(LOG_WARNING, "%s: CRC error\n",
   8872 			    device_xname(sc->sc_dev));
   8873 		return true;
   8874 	}
   8875 
   8876 	return false;
   8877 }
   8878 
   8879 static inline bool
   8880 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8881 {
   8882 	struct wm_softc *sc = rxq->rxq_sc;
   8883 
   8884 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8885 		NQRXC_STATUS_DD)) {
   8886 		/* We have processed all of the receive descriptors. */
   8887 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8888 		return false;
   8889 	}
   8890 
   8891 	return true;
   8892 }
   8893 
   8894 static inline bool
   8895 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8896     uint16_t vlantag, struct mbuf *m)
   8897 {
   8898 
   8899 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8900 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8901 		vlan_set_tag(m, le16toh(vlantag));
   8902 	}
   8903 
   8904 	return true;
   8905 }
   8906 
   8907 static inline void
   8908 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8909     uint32_t errors, struct mbuf *m)
   8910 {
   8911 	struct wm_softc *sc = rxq->rxq_sc;
   8912 
   8913 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8914 		if (wm_rxdesc_is_set_status(sc, status,
   8915 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8916 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8917 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8918 			if (wm_rxdesc_is_set_error(sc, errors,
   8919 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8920 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8921 		}
   8922 		if (wm_rxdesc_is_set_status(sc, status,
   8923 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8924 			/*
   8925 			 * Note: we don't know if this was TCP or UDP,
   8926 			 * so we just set both bits, and expect the
   8927 			 * upper layers to deal.
   8928 			 */
   8929 			WM_Q_EVCNT_INCR(rxq, tusum);
   8930 			m->m_pkthdr.csum_flags |=
   8931 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8932 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8933 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8934 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8935 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8936 		}
   8937 	}
   8938 }
   8939 
   8940 /*
   8941  * wm_rxeof:
   8942  *
   8943  *	Helper; handle receive interrupts.
   8944  */
   8945 static bool
   8946 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8947 {
   8948 	struct wm_softc *sc = rxq->rxq_sc;
   8949 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8950 	struct wm_rxsoft *rxs;
   8951 	struct mbuf *m;
   8952 	int i, len;
   8953 	int count = 0;
   8954 	uint32_t status, errors;
   8955 	uint16_t vlantag;
   8956 	bool more = false;
   8957 
   8958 	KASSERT(mutex_owned(rxq->rxq_lock));
   8959 
   8960 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8961 		if (limit-- == 0) {
   8962 			rxq->rxq_ptr = i;
   8963 			more = true;
   8964 			DPRINTF(WM_DEBUG_RX,
   8965 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8966 				device_xname(sc->sc_dev), i));
   8967 			break;
   8968 		}
   8969 
   8970 		rxs = &rxq->rxq_soft[i];
   8971 
   8972 		DPRINTF(WM_DEBUG_RX,
   8973 		    ("%s: RX: checking descriptor %d\n",
   8974 			device_xname(sc->sc_dev), i));
   8975 		wm_cdrxsync(rxq, i,
   8976 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8977 
   8978 		status = wm_rxdesc_get_status(rxq, i);
   8979 		errors = wm_rxdesc_get_errors(rxq, i);
   8980 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8981 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8982 #ifdef WM_DEBUG
   8983 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8984 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8985 #endif
   8986 
   8987 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8988 			/*
   8989 			 * Update the receive pointer holding rxq_lock
   8990 			 * consistent with increment counter.
   8991 			 */
   8992 			rxq->rxq_ptr = i;
   8993 			break;
   8994 		}
   8995 
   8996 		count++;
   8997 		if (__predict_false(rxq->rxq_discard)) {
   8998 			DPRINTF(WM_DEBUG_RX,
   8999 			    ("%s: RX: discarding contents of descriptor %d\n",
   9000 				device_xname(sc->sc_dev), i));
   9001 			wm_init_rxdesc(rxq, i);
   9002 			if (wm_rxdesc_is_eop(rxq, status)) {
   9003 				/* Reset our state. */
   9004 				DPRINTF(WM_DEBUG_RX,
   9005 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9006 					device_xname(sc->sc_dev)));
   9007 				rxq->rxq_discard = 0;
   9008 			}
   9009 			continue;
   9010 		}
   9011 
   9012 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9013 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9014 
   9015 		m = rxs->rxs_mbuf;
   9016 
   9017 		/*
   9018 		 * Add a new receive buffer to the ring, unless of
   9019 		 * course the length is zero. Treat the latter as a
   9020 		 * failed mapping.
   9021 		 */
   9022 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9023 			/*
   9024 			 * Failed, throw away what we've done so
   9025 			 * far, and discard the rest of the packet.
   9026 			 */
   9027 			if_statinc(ifp, if_ierrors);
   9028 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9029 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9030 			wm_init_rxdesc(rxq, i);
   9031 			if (!wm_rxdesc_is_eop(rxq, status))
   9032 				rxq->rxq_discard = 1;
   9033 			if (rxq->rxq_head != NULL)
   9034 				m_freem(rxq->rxq_head);
   9035 			WM_RXCHAIN_RESET(rxq);
   9036 			DPRINTF(WM_DEBUG_RX,
   9037 			    ("%s: RX: Rx buffer allocation failed, "
   9038 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9039 				rxq->rxq_discard ? " (discard)" : ""));
   9040 			continue;
   9041 		}
   9042 
   9043 		m->m_len = len;
   9044 		rxq->rxq_len += len;
   9045 		DPRINTF(WM_DEBUG_RX,
   9046 		    ("%s: RX: buffer at %p len %d\n",
   9047 			device_xname(sc->sc_dev), m->m_data, len));
   9048 
   9049 		/* If this is not the end of the packet, keep looking. */
   9050 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9051 			WM_RXCHAIN_LINK(rxq, m);
   9052 			DPRINTF(WM_DEBUG_RX,
   9053 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9054 				device_xname(sc->sc_dev), rxq->rxq_len));
   9055 			continue;
   9056 		}
   9057 
   9058 		/*
   9059 		 * Okay, we have the entire packet now. The chip is
   9060 		 * configured to include the FCS except I350 and I21[01]
   9061 		 * (not all chips can be configured to strip it),
   9062 		 * so we need to trim it.
   9063 		 * May need to adjust length of previous mbuf in the
   9064 		 * chain if the current mbuf is too short.
   9065 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   9066 		 * is always set in I350, so we don't trim it.
   9067 		 */
   9068 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   9069 		    && (sc->sc_type != WM_T_I210)
   9070 		    && (sc->sc_type != WM_T_I211)) {
   9071 			if (m->m_len < ETHER_CRC_LEN) {
   9072 				rxq->rxq_tail->m_len
   9073 				    -= (ETHER_CRC_LEN - m->m_len);
   9074 				m->m_len = 0;
   9075 			} else
   9076 				m->m_len -= ETHER_CRC_LEN;
   9077 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9078 		} else
   9079 			len = rxq->rxq_len;
   9080 
   9081 		WM_RXCHAIN_LINK(rxq, m);
   9082 
   9083 		*rxq->rxq_tailp = NULL;
   9084 		m = rxq->rxq_head;
   9085 
   9086 		WM_RXCHAIN_RESET(rxq);
   9087 
   9088 		DPRINTF(WM_DEBUG_RX,
   9089 		    ("%s: RX: have entire packet, len -> %d\n",
   9090 			device_xname(sc->sc_dev), len));
   9091 
   9092 		/* If an error occurred, update stats and drop the packet. */
   9093 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9094 			m_freem(m);
   9095 			continue;
   9096 		}
   9097 
   9098 		/* No errors.  Receive the packet. */
   9099 		m_set_rcvif(m, ifp);
   9100 		m->m_pkthdr.len = len;
   9101 		/*
   9102 		 * TODO
   9103 		 * should be save rsshash and rsstype to this mbuf.
   9104 		 */
   9105 		DPRINTF(WM_DEBUG_RX,
   9106 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9107 			device_xname(sc->sc_dev), rsstype, rsshash));
   9108 
   9109 		/*
   9110 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9111 		 * for us.  Associate the tag with the packet.
   9112 		 */
   9113 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9114 			continue;
   9115 
   9116 		/* Set up checksum info for this packet. */
   9117 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9118 		/*
   9119 		 * Update the receive pointer holding rxq_lock consistent with
   9120 		 * increment counter.
   9121 		 */
   9122 		rxq->rxq_ptr = i;
   9123 		rxq->rxq_packets++;
   9124 		rxq->rxq_bytes += len;
   9125 		mutex_exit(rxq->rxq_lock);
   9126 
   9127 		/* Pass it on. */
   9128 		if_percpuq_enqueue(sc->sc_ipq, m);
   9129 
   9130 		mutex_enter(rxq->rxq_lock);
   9131 
   9132 		if (rxq->rxq_stopping)
   9133 			break;
   9134 	}
   9135 
   9136 	if (count != 0)
   9137 		rnd_add_uint32(&sc->rnd_source, count);
   9138 
   9139 	DPRINTF(WM_DEBUG_RX,
   9140 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9141 
   9142 	return more;
   9143 }
   9144 
   9145 /*
   9146  * wm_linkintr_gmii:
   9147  *
   9148  *	Helper; handle link interrupts for GMII.
   9149  */
   9150 static void
   9151 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9152 {
   9153 	device_t dev = sc->sc_dev;
   9154 	uint32_t status, reg;
   9155 	bool link;
   9156 	int rv;
   9157 
   9158 	KASSERT(WM_CORE_LOCKED(sc));
   9159 
   9160 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9161 		__func__));
   9162 
   9163 	if ((icr & ICR_LSC) == 0) {
   9164 		if (icr & ICR_RXSEQ)
   9165 			DPRINTF(WM_DEBUG_LINK,
   9166 			    ("%s: LINK Receive sequence error\n",
   9167 				device_xname(dev)));
   9168 		return;
   9169 	}
   9170 
   9171 	/* Link status changed */
   9172 	status = CSR_READ(sc, WMREG_STATUS);
   9173 	link = status & STATUS_LU;
   9174 	if (link) {
   9175 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9176 			device_xname(dev),
   9177 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9178 	} else {
   9179 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9180 			device_xname(dev)));
   9181 	}
   9182 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9183 		wm_gig_downshift_workaround_ich8lan(sc);
   9184 
   9185 	if ((sc->sc_type == WM_T_ICH8)
   9186 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9187 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9188 	}
   9189 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9190 		device_xname(dev)));
   9191 	mii_pollstat(&sc->sc_mii);
   9192 	if (sc->sc_type == WM_T_82543) {
   9193 		int miistatus, active;
   9194 
   9195 		/*
   9196 		 * With 82543, we need to force speed and
   9197 		 * duplex on the MAC equal to what the PHY
   9198 		 * speed and duplex configuration is.
   9199 		 */
   9200 		miistatus = sc->sc_mii.mii_media_status;
   9201 
   9202 		if (miistatus & IFM_ACTIVE) {
   9203 			active = sc->sc_mii.mii_media_active;
   9204 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9205 			switch (IFM_SUBTYPE(active)) {
   9206 			case IFM_10_T:
   9207 				sc->sc_ctrl |= CTRL_SPEED_10;
   9208 				break;
   9209 			case IFM_100_TX:
   9210 				sc->sc_ctrl |= CTRL_SPEED_100;
   9211 				break;
   9212 			case IFM_1000_T:
   9213 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9214 				break;
   9215 			default:
   9216 				/*
   9217 				 * Fiber?
   9218 				 * Shoud not enter here.
   9219 				 */
   9220 				device_printf(dev, "unknown media (%x)\n",
   9221 				    active);
   9222 				break;
   9223 			}
   9224 			if (active & IFM_FDX)
   9225 				sc->sc_ctrl |= CTRL_FD;
   9226 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9227 		}
   9228 	} else if (sc->sc_type == WM_T_PCH) {
   9229 		wm_k1_gig_workaround_hv(sc,
   9230 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9231 	}
   9232 
   9233 	/*
   9234 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9235 	 * aggressive resulting in many collisions. To avoid this, increase
   9236 	 * the IPG and reduce Rx latency in the PHY.
   9237 	 */
   9238 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9239 	    && link) {
   9240 		uint32_t tipg_reg;
   9241 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9242 		bool fdx;
   9243 		uint16_t emi_addr, emi_val;
   9244 
   9245 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9246 		tipg_reg &= ~TIPG_IPGT_MASK;
   9247 		fdx = status & STATUS_FD;
   9248 
   9249 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9250 			tipg_reg |= 0xff;
   9251 			/* Reduce Rx latency in analog PHY */
   9252 			emi_val = 0;
   9253 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9254 		    fdx && speed != STATUS_SPEED_1000) {
   9255 			tipg_reg |= 0xc;
   9256 			emi_val = 1;
   9257 		} else {
   9258 			/* Roll back the default values */
   9259 			tipg_reg |= 0x08;
   9260 			emi_val = 1;
   9261 		}
   9262 
   9263 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9264 
   9265 		rv = sc->phy.acquire(sc);
   9266 		if (rv)
   9267 			return;
   9268 
   9269 		if (sc->sc_type == WM_T_PCH2)
   9270 			emi_addr = I82579_RX_CONFIG;
   9271 		else
   9272 			emi_addr = I217_RX_CONFIG;
   9273 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9274 
   9275 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9276 			uint16_t phy_reg;
   9277 
   9278 			sc->phy.readreg_locked(dev, 2,
   9279 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9280 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9281 			if (speed == STATUS_SPEED_100
   9282 			    || speed == STATUS_SPEED_10)
   9283 				phy_reg |= 0x3e8;
   9284 			else
   9285 				phy_reg |= 0xfa;
   9286 			sc->phy.writereg_locked(dev, 2,
   9287 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9288 
   9289 			if (speed == STATUS_SPEED_1000) {
   9290 				sc->phy.readreg_locked(dev, 2,
   9291 				    HV_PM_CTRL, &phy_reg);
   9292 
   9293 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9294 
   9295 				sc->phy.writereg_locked(dev, 2,
   9296 				    HV_PM_CTRL, phy_reg);
   9297 			}
   9298 		}
   9299 		sc->phy.release(sc);
   9300 
   9301 		if (rv)
   9302 			return;
   9303 
   9304 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9305 			uint16_t data, ptr_gap;
   9306 
   9307 			if (speed == STATUS_SPEED_1000) {
   9308 				rv = sc->phy.acquire(sc);
   9309 				if (rv)
   9310 					return;
   9311 
   9312 				rv = sc->phy.readreg_locked(dev, 2,
   9313 				    I219_UNKNOWN1, &data);
   9314 				if (rv) {
   9315 					sc->phy.release(sc);
   9316 					return;
   9317 				}
   9318 
   9319 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9320 				if (ptr_gap < 0x18) {
   9321 					data &= ~(0x3ff << 2);
   9322 					data |= (0x18 << 2);
   9323 					rv = sc->phy.writereg_locked(dev,
   9324 					    2, I219_UNKNOWN1, data);
   9325 				}
   9326 				sc->phy.release(sc);
   9327 				if (rv)
   9328 					return;
   9329 			} else {
   9330 				rv = sc->phy.acquire(sc);
   9331 				if (rv)
   9332 					return;
   9333 
   9334 				rv = sc->phy.writereg_locked(dev, 2,
   9335 				    I219_UNKNOWN1, 0xc023);
   9336 				sc->phy.release(sc);
   9337 				if (rv)
   9338 					return;
   9339 
   9340 			}
   9341 		}
   9342 	}
   9343 
   9344 	/*
   9345 	 * I217 Packet Loss issue:
   9346 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9347 	 * on power up.
   9348 	 * Set the Beacon Duration for I217 to 8 usec
   9349 	 */
   9350 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9351 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9352 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9353 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9354 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9355 	}
   9356 
   9357 	/* Work-around I218 hang issue */
   9358 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9359 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9360 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9361 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9362 		wm_k1_workaround_lpt_lp(sc, link);
   9363 
   9364 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9365 		/*
   9366 		 * Set platform power management values for Latency
   9367 		 * Tolerance Reporting (LTR)
   9368 		 */
   9369 		wm_platform_pm_pch_lpt(sc,
   9370 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9371 	}
   9372 
   9373 	/* Clear link partner's EEE ability */
   9374 	sc->eee_lp_ability = 0;
   9375 
   9376 	/* FEXTNVM6 K1-off workaround */
   9377 	if (sc->sc_type == WM_T_PCH_SPT) {
   9378 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9379 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9380 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9381 		else
   9382 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9383 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9384 	}
   9385 
   9386 	if (!link)
   9387 		return;
   9388 
   9389 	switch (sc->sc_type) {
   9390 	case WM_T_PCH2:
   9391 		wm_k1_workaround_lv(sc);
   9392 		/* FALLTHROUGH */
   9393 	case WM_T_PCH:
   9394 		if (sc->sc_phytype == WMPHY_82578)
   9395 			wm_link_stall_workaround_hv(sc);
   9396 		break;
   9397 	default:
   9398 		break;
   9399 	}
   9400 
   9401 	/* Enable/Disable EEE after link up */
   9402 	if (sc->sc_phytype > WMPHY_82579)
   9403 		wm_set_eee_pchlan(sc);
   9404 }
   9405 
   9406 /*
   9407  * wm_linkintr_tbi:
   9408  *
   9409  *	Helper; handle link interrupts for TBI mode.
   9410  */
   9411 static void
   9412 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9413 {
   9414 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9415 	uint32_t status;
   9416 
   9417 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9418 		__func__));
   9419 
   9420 	status = CSR_READ(sc, WMREG_STATUS);
   9421 	if (icr & ICR_LSC) {
   9422 		wm_check_for_link(sc);
   9423 		if (status & STATUS_LU) {
   9424 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9425 				device_xname(sc->sc_dev),
   9426 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9427 			/*
   9428 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9429 			 * so we should update sc->sc_ctrl
   9430 			 */
   9431 
   9432 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9433 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9434 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9435 			if (status & STATUS_FD)
   9436 				sc->sc_tctl |=
   9437 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9438 			else
   9439 				sc->sc_tctl |=
   9440 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9441 			if (sc->sc_ctrl & CTRL_TFCE)
   9442 				sc->sc_fcrtl |= FCRTL_XONE;
   9443 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9444 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9445 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9446 			sc->sc_tbi_linkup = 1;
   9447 			if_link_state_change(ifp, LINK_STATE_UP);
   9448 		} else {
   9449 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9450 				device_xname(sc->sc_dev)));
   9451 			sc->sc_tbi_linkup = 0;
   9452 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9453 		}
   9454 		/* Update LED */
   9455 		wm_tbi_serdes_set_linkled(sc);
   9456 	} else if (icr & ICR_RXSEQ)
   9457 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9458 			device_xname(sc->sc_dev)));
   9459 }
   9460 
   9461 /*
   9462  * wm_linkintr_serdes:
   9463  *
   9464  *	Helper; handle link interrupts for TBI mode.
   9465  */
   9466 static void
   9467 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9468 {
   9469 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9470 	struct mii_data *mii = &sc->sc_mii;
   9471 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9472 	uint32_t pcs_adv, pcs_lpab, reg;
   9473 
   9474 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9475 		__func__));
   9476 
   9477 	if (icr & ICR_LSC) {
   9478 		/* Check PCS */
   9479 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9480 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9481 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9482 				device_xname(sc->sc_dev)));
   9483 			mii->mii_media_status |= IFM_ACTIVE;
   9484 			sc->sc_tbi_linkup = 1;
   9485 			if_link_state_change(ifp, LINK_STATE_UP);
   9486 		} else {
   9487 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9488 				device_xname(sc->sc_dev)));
   9489 			mii->mii_media_status |= IFM_NONE;
   9490 			sc->sc_tbi_linkup = 0;
   9491 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9492 			wm_tbi_serdes_set_linkled(sc);
   9493 			return;
   9494 		}
   9495 		mii->mii_media_active |= IFM_1000_SX;
   9496 		if ((reg & PCS_LSTS_FDX) != 0)
   9497 			mii->mii_media_active |= IFM_FDX;
   9498 		else
   9499 			mii->mii_media_active |= IFM_HDX;
   9500 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9501 			/* Check flow */
   9502 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9503 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9504 				DPRINTF(WM_DEBUG_LINK,
   9505 				    ("XXX LINKOK but not ACOMP\n"));
   9506 				return;
   9507 			}
   9508 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9509 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9510 			DPRINTF(WM_DEBUG_LINK,
   9511 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9512 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9513 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9514 				mii->mii_media_active |= IFM_FLOW
   9515 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9516 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9517 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9518 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9519 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9520 				mii->mii_media_active |= IFM_FLOW
   9521 				    | IFM_ETH_TXPAUSE;
   9522 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9523 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9524 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9525 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9526 				mii->mii_media_active |= IFM_FLOW
   9527 				    | IFM_ETH_RXPAUSE;
   9528 		}
   9529 		/* Update LED */
   9530 		wm_tbi_serdes_set_linkled(sc);
   9531 	} else
   9532 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9533 		    device_xname(sc->sc_dev)));
   9534 }
   9535 
   9536 /*
   9537  * wm_linkintr:
   9538  *
   9539  *	Helper; handle link interrupts.
   9540  */
   9541 static void
   9542 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9543 {
   9544 
   9545 	KASSERT(WM_CORE_LOCKED(sc));
   9546 
   9547 	if (sc->sc_flags & WM_F_HAS_MII)
   9548 		wm_linkintr_gmii(sc, icr);
   9549 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9550 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9551 		wm_linkintr_serdes(sc, icr);
   9552 	else
   9553 		wm_linkintr_tbi(sc, icr);
   9554 }
   9555 
   9556 
   9557 static inline void
   9558 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   9559 {
   9560 
   9561 	if (wmq->wmq_txrx_use_workqueue)
   9562 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   9563 	else
   9564 		softint_schedule(wmq->wmq_si);
   9565 }
   9566 
   9567 /*
   9568  * wm_intr_legacy:
   9569  *
   9570  *	Interrupt service routine for INTx and MSI.
   9571  */
   9572 static int
   9573 wm_intr_legacy(void *arg)
   9574 {
   9575 	struct wm_softc *sc = arg;
   9576 	struct wm_queue *wmq = &sc->sc_queue[0];
   9577 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9578 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9579 	uint32_t icr, rndval = 0;
   9580 	int handled = 0;
   9581 
   9582 	while (1 /* CONSTCOND */) {
   9583 		icr = CSR_READ(sc, WMREG_ICR);
   9584 		if ((icr & sc->sc_icr) == 0)
   9585 			break;
   9586 		if (handled == 0)
   9587 			DPRINTF(WM_DEBUG_TX,
   9588 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9589 		if (rndval == 0)
   9590 			rndval = icr;
   9591 
   9592 		mutex_enter(rxq->rxq_lock);
   9593 
   9594 		if (rxq->rxq_stopping) {
   9595 			mutex_exit(rxq->rxq_lock);
   9596 			break;
   9597 		}
   9598 
   9599 		handled = 1;
   9600 
   9601 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9602 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9603 			DPRINTF(WM_DEBUG_RX,
   9604 			    ("%s: RX: got Rx intr 0x%08x\n",
   9605 				device_xname(sc->sc_dev),
   9606 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9607 			WM_Q_EVCNT_INCR(rxq, intr);
   9608 		}
   9609 #endif
   9610 		/*
   9611 		 * wm_rxeof() does *not* call upper layer functions directly,
   9612 		 * as if_percpuq_enqueue() just call softint_schedule().
   9613 		 * So, we can call wm_rxeof() in interrupt context.
   9614 		 */
   9615 		wm_rxeof(rxq, UINT_MAX);
   9616 
   9617 		mutex_exit(rxq->rxq_lock);
   9618 		mutex_enter(txq->txq_lock);
   9619 
   9620 		if (txq->txq_stopping) {
   9621 			mutex_exit(txq->txq_lock);
   9622 			break;
   9623 		}
   9624 
   9625 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9626 		if (icr & ICR_TXDW) {
   9627 			DPRINTF(WM_DEBUG_TX,
   9628 			    ("%s: TX: got TXDW interrupt\n",
   9629 				device_xname(sc->sc_dev)));
   9630 			WM_Q_EVCNT_INCR(txq, txdw);
   9631 		}
   9632 #endif
   9633 		wm_txeof(txq, UINT_MAX);
   9634 
   9635 		mutex_exit(txq->txq_lock);
   9636 		WM_CORE_LOCK(sc);
   9637 
   9638 		if (sc->sc_core_stopping) {
   9639 			WM_CORE_UNLOCK(sc);
   9640 			break;
   9641 		}
   9642 
   9643 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9644 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9645 			wm_linkintr(sc, icr);
   9646 		}
   9647 		if ((icr & ICR_GPI(0)) != 0)
   9648 			device_printf(sc->sc_dev, "got module interrupt\n");
   9649 
   9650 		WM_CORE_UNLOCK(sc);
   9651 
   9652 		if (icr & ICR_RXO) {
   9653 #if defined(WM_DEBUG)
   9654 			log(LOG_WARNING, "%s: Receive overrun\n",
   9655 			    device_xname(sc->sc_dev));
   9656 #endif /* defined(WM_DEBUG) */
   9657 		}
   9658 	}
   9659 
   9660 	rnd_add_uint32(&sc->rnd_source, rndval);
   9661 
   9662 	if (handled) {
   9663 		/* Try to get more packets going. */
   9664 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9665 		wm_sched_handle_queue(sc, wmq);
   9666 	}
   9667 
   9668 	return handled;
   9669 }
   9670 
   9671 static inline void
   9672 wm_txrxintr_disable(struct wm_queue *wmq)
   9673 {
   9674 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9675 
   9676 	if (sc->sc_type == WM_T_82574)
   9677 		CSR_WRITE(sc, WMREG_IMC,
   9678 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9679 	else if (sc->sc_type == WM_T_82575)
   9680 		CSR_WRITE(sc, WMREG_EIMC,
   9681 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9682 	else
   9683 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9684 }
   9685 
   9686 static inline void
   9687 wm_txrxintr_enable(struct wm_queue *wmq)
   9688 {
   9689 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9690 
   9691 	wm_itrs_calculate(sc, wmq);
   9692 
   9693 	/*
   9694 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9695 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9696 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9697 	 * while each wm_handle_queue(wmq) is runnig.
   9698 	 */
   9699 	if (sc->sc_type == WM_T_82574)
   9700 		CSR_WRITE(sc, WMREG_IMS,
   9701 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9702 	else if (sc->sc_type == WM_T_82575)
   9703 		CSR_WRITE(sc, WMREG_EIMS,
   9704 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9705 	else
   9706 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9707 }
   9708 
   9709 static int
   9710 wm_txrxintr_msix(void *arg)
   9711 {
   9712 	struct wm_queue *wmq = arg;
   9713 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9714 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9715 	struct wm_softc *sc = txq->txq_sc;
   9716 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9717 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9718 	bool txmore;
   9719 	bool rxmore;
   9720 
   9721 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9722 
   9723 	DPRINTF(WM_DEBUG_TX,
   9724 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9725 
   9726 	wm_txrxintr_disable(wmq);
   9727 
   9728 	mutex_enter(txq->txq_lock);
   9729 
   9730 	if (txq->txq_stopping) {
   9731 		mutex_exit(txq->txq_lock);
   9732 		return 0;
   9733 	}
   9734 
   9735 	WM_Q_EVCNT_INCR(txq, txdw);
   9736 	txmore = wm_txeof(txq, txlimit);
   9737 	/* wm_deferred start() is done in wm_handle_queue(). */
   9738 	mutex_exit(txq->txq_lock);
   9739 
   9740 	DPRINTF(WM_DEBUG_RX,
   9741 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9742 	mutex_enter(rxq->rxq_lock);
   9743 
   9744 	if (rxq->rxq_stopping) {
   9745 		mutex_exit(rxq->rxq_lock);
   9746 		return 0;
   9747 	}
   9748 
   9749 	WM_Q_EVCNT_INCR(rxq, intr);
   9750 	rxmore = wm_rxeof(rxq, rxlimit);
   9751 	mutex_exit(rxq->rxq_lock);
   9752 
   9753 	wm_itrs_writereg(sc, wmq);
   9754 
   9755 	if (txmore || rxmore) {
   9756 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9757 		wm_sched_handle_queue(sc, wmq);
   9758 	} else
   9759 		wm_txrxintr_enable(wmq);
   9760 
   9761 	return 1;
   9762 }
   9763 
   9764 static void
   9765 wm_handle_queue(void *arg)
   9766 {
   9767 	struct wm_queue *wmq = arg;
   9768 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9769 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9770 	struct wm_softc *sc = txq->txq_sc;
   9771 	u_int txlimit = sc->sc_tx_process_limit;
   9772 	u_int rxlimit = sc->sc_rx_process_limit;
   9773 	bool txmore;
   9774 	bool rxmore;
   9775 
   9776 	mutex_enter(txq->txq_lock);
   9777 	if (txq->txq_stopping) {
   9778 		mutex_exit(txq->txq_lock);
   9779 		return;
   9780 	}
   9781 	txmore = wm_txeof(txq, txlimit);
   9782 	wm_deferred_start_locked(txq);
   9783 	mutex_exit(txq->txq_lock);
   9784 
   9785 	mutex_enter(rxq->rxq_lock);
   9786 	if (rxq->rxq_stopping) {
   9787 		mutex_exit(rxq->rxq_lock);
   9788 		return;
   9789 	}
   9790 	WM_Q_EVCNT_INCR(rxq, defer);
   9791 	rxmore = wm_rxeof(rxq, rxlimit);
   9792 	mutex_exit(rxq->rxq_lock);
   9793 
   9794 	if (txmore || rxmore) {
   9795 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9796 		wm_sched_handle_queue(sc, wmq);
   9797 	} else
   9798 		wm_txrxintr_enable(wmq);
   9799 }
   9800 
   9801 static void
   9802 wm_handle_queue_work(struct work *wk, void *context)
   9803 {
   9804 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   9805 
   9806 	/*
   9807 	 * "enqueued flag" is not required here.
   9808 	 */
   9809 	wm_handle_queue(wmq);
   9810 }
   9811 
   9812 /*
   9813  * wm_linkintr_msix:
   9814  *
   9815  *	Interrupt service routine for link status change for MSI-X.
   9816  */
   9817 static int
   9818 wm_linkintr_msix(void *arg)
   9819 {
   9820 	struct wm_softc *sc = arg;
   9821 	uint32_t reg;
   9822 	bool has_rxo;
   9823 
   9824 	reg = CSR_READ(sc, WMREG_ICR);
   9825 	WM_CORE_LOCK(sc);
   9826 	DPRINTF(WM_DEBUG_LINK,
   9827 	    ("%s: LINK: got link intr. ICR = %08x\n",
   9828 		device_xname(sc->sc_dev), reg));
   9829 
   9830 	if (sc->sc_core_stopping)
   9831 		goto out;
   9832 
   9833 	if ((reg & ICR_LSC) != 0) {
   9834 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9835 		wm_linkintr(sc, ICR_LSC);
   9836 	}
   9837 	if ((reg & ICR_GPI(0)) != 0)
   9838 		device_printf(sc->sc_dev, "got module interrupt\n");
   9839 
   9840 	/*
   9841 	 * XXX 82574 MSI-X mode workaround
   9842 	 *
   9843 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9844 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9845 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9846 	 * interrupts by writing WMREG_ICS to process receive packets.
   9847 	 */
   9848 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9849 #if defined(WM_DEBUG)
   9850 		log(LOG_WARNING, "%s: Receive overrun\n",
   9851 		    device_xname(sc->sc_dev));
   9852 #endif /* defined(WM_DEBUG) */
   9853 
   9854 		has_rxo = true;
   9855 		/*
   9856 		 * The RXO interrupt is very high rate when receive traffic is
   9857 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9858 		 * interrupts. ICR_OTHER will be enabled at the end of
   9859 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9860 		 * ICR_RXQ(1) interrupts.
   9861 		 */
   9862 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9863 
   9864 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9865 	}
   9866 
   9867 
   9868 
   9869 out:
   9870 	WM_CORE_UNLOCK(sc);
   9871 
   9872 	if (sc->sc_type == WM_T_82574) {
   9873 		if (!has_rxo)
   9874 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9875 		else
   9876 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9877 	} else if (sc->sc_type == WM_T_82575)
   9878 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9879 	else
   9880 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9881 
   9882 	return 1;
   9883 }
   9884 
   9885 /*
   9886  * Media related.
   9887  * GMII, SGMII, TBI (and SERDES)
   9888  */
   9889 
   9890 /* Common */
   9891 
   9892 /*
   9893  * wm_tbi_serdes_set_linkled:
   9894  *
   9895  *	Update the link LED on TBI and SERDES devices.
   9896  */
   9897 static void
   9898 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9899 {
   9900 
   9901 	if (sc->sc_tbi_linkup)
   9902 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9903 	else
   9904 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9905 
   9906 	/* 82540 or newer devices are active low */
   9907 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9908 
   9909 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9910 }
   9911 
   9912 /* GMII related */
   9913 
   9914 /*
   9915  * wm_gmii_reset:
   9916  *
   9917  *	Reset the PHY.
   9918  */
   9919 static void
   9920 wm_gmii_reset(struct wm_softc *sc)
   9921 {
   9922 	uint32_t reg;
   9923 	int rv;
   9924 
   9925 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9926 		device_xname(sc->sc_dev), __func__));
   9927 
   9928 	rv = sc->phy.acquire(sc);
   9929 	if (rv != 0) {
   9930 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9931 		    __func__);
   9932 		return;
   9933 	}
   9934 
   9935 	switch (sc->sc_type) {
   9936 	case WM_T_82542_2_0:
   9937 	case WM_T_82542_2_1:
   9938 		/* null */
   9939 		break;
   9940 	case WM_T_82543:
   9941 		/*
   9942 		 * With 82543, we need to force speed and duplex on the MAC
   9943 		 * equal to what the PHY speed and duplex configuration is.
   9944 		 * In addition, we need to perform a hardware reset on the PHY
   9945 		 * to take it out of reset.
   9946 		 */
   9947 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9948 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9949 
   9950 		/* The PHY reset pin is active-low. */
   9951 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9952 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9953 		    CTRL_EXT_SWDPIN(4));
   9954 		reg |= CTRL_EXT_SWDPIO(4);
   9955 
   9956 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9957 		CSR_WRITE_FLUSH(sc);
   9958 		delay(10*1000);
   9959 
   9960 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9961 		CSR_WRITE_FLUSH(sc);
   9962 		delay(150);
   9963 #if 0
   9964 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9965 #endif
   9966 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9967 		break;
   9968 	case WM_T_82544:	/* Reset 10000us */
   9969 	case WM_T_82540:
   9970 	case WM_T_82545:
   9971 	case WM_T_82545_3:
   9972 	case WM_T_82546:
   9973 	case WM_T_82546_3:
   9974 	case WM_T_82541:
   9975 	case WM_T_82541_2:
   9976 	case WM_T_82547:
   9977 	case WM_T_82547_2:
   9978 	case WM_T_82571:	/* Reset 100us */
   9979 	case WM_T_82572:
   9980 	case WM_T_82573:
   9981 	case WM_T_82574:
   9982 	case WM_T_82575:
   9983 	case WM_T_82576:
   9984 	case WM_T_82580:
   9985 	case WM_T_I350:
   9986 	case WM_T_I354:
   9987 	case WM_T_I210:
   9988 	case WM_T_I211:
   9989 	case WM_T_82583:
   9990 	case WM_T_80003:
   9991 		/* Generic reset */
   9992 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9993 		CSR_WRITE_FLUSH(sc);
   9994 		delay(20000);
   9995 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9996 		CSR_WRITE_FLUSH(sc);
   9997 		delay(20000);
   9998 
   9999 		if ((sc->sc_type == WM_T_82541)
   10000 		    || (sc->sc_type == WM_T_82541_2)
   10001 		    || (sc->sc_type == WM_T_82547)
   10002 		    || (sc->sc_type == WM_T_82547_2)) {
   10003 			/* Workaround for igp are done in igp_reset() */
   10004 			/* XXX add code to set LED after phy reset */
   10005 		}
   10006 		break;
   10007 	case WM_T_ICH8:
   10008 	case WM_T_ICH9:
   10009 	case WM_T_ICH10:
   10010 	case WM_T_PCH:
   10011 	case WM_T_PCH2:
   10012 	case WM_T_PCH_LPT:
   10013 	case WM_T_PCH_SPT:
   10014 	case WM_T_PCH_CNP:
   10015 		/* Generic reset */
   10016 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10017 		CSR_WRITE_FLUSH(sc);
   10018 		delay(100);
   10019 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10020 		CSR_WRITE_FLUSH(sc);
   10021 		delay(150);
   10022 		break;
   10023 	default:
   10024 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10025 		    __func__);
   10026 		break;
   10027 	}
   10028 
   10029 	sc->phy.release(sc);
   10030 
   10031 	/* get_cfg_done */
   10032 	wm_get_cfg_done(sc);
   10033 
   10034 	/* Extra setup */
   10035 	switch (sc->sc_type) {
   10036 	case WM_T_82542_2_0:
   10037 	case WM_T_82542_2_1:
   10038 	case WM_T_82543:
   10039 	case WM_T_82544:
   10040 	case WM_T_82540:
   10041 	case WM_T_82545:
   10042 	case WM_T_82545_3:
   10043 	case WM_T_82546:
   10044 	case WM_T_82546_3:
   10045 	case WM_T_82541_2:
   10046 	case WM_T_82547_2:
   10047 	case WM_T_82571:
   10048 	case WM_T_82572:
   10049 	case WM_T_82573:
   10050 	case WM_T_82574:
   10051 	case WM_T_82583:
   10052 	case WM_T_82575:
   10053 	case WM_T_82576:
   10054 	case WM_T_82580:
   10055 	case WM_T_I350:
   10056 	case WM_T_I354:
   10057 	case WM_T_I210:
   10058 	case WM_T_I211:
   10059 	case WM_T_80003:
   10060 		/* Null */
   10061 		break;
   10062 	case WM_T_82541:
   10063 	case WM_T_82547:
   10064 		/* XXX Configure actively LED after PHY reset */
   10065 		break;
   10066 	case WM_T_ICH8:
   10067 	case WM_T_ICH9:
   10068 	case WM_T_ICH10:
   10069 	case WM_T_PCH:
   10070 	case WM_T_PCH2:
   10071 	case WM_T_PCH_LPT:
   10072 	case WM_T_PCH_SPT:
   10073 	case WM_T_PCH_CNP:
   10074 		wm_phy_post_reset(sc);
   10075 		break;
   10076 	default:
   10077 		panic("%s: unknown type\n", __func__);
   10078 		break;
   10079 	}
   10080 }
   10081 
   10082 /*
   10083  * Setup sc_phytype and mii_{read|write}reg.
   10084  *
   10085  *  To identify PHY type, correct read/write function should be selected.
   10086  * To select correct read/write function, PCI ID or MAC type are required
   10087  * without accessing PHY registers.
   10088  *
   10089  *  On the first call of this function, PHY ID is not known yet. Check
   10090  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10091  * result might be incorrect.
   10092  *
   10093  *  In the second call, PHY OUI and model is used to identify PHY type.
   10094  * It might not be perfect because of the lack of compared entry, but it
   10095  * would be better than the first call.
   10096  *
   10097  *  If the detected new result and previous assumption is different,
   10098  * diagnous message will be printed.
   10099  */
   10100 static void
   10101 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10102     uint16_t phy_model)
   10103 {
   10104 	device_t dev = sc->sc_dev;
   10105 	struct mii_data *mii = &sc->sc_mii;
   10106 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10107 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10108 	mii_readreg_t new_readreg;
   10109 	mii_writereg_t new_writereg;
   10110 	bool dodiag = true;
   10111 
   10112 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10113 		device_xname(sc->sc_dev), __func__));
   10114 
   10115 	/*
   10116 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10117 	 * incorrect. So don't print diag output when it's 2nd call.
   10118 	 */
   10119 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10120 		dodiag = false;
   10121 
   10122 	if (mii->mii_readreg == NULL) {
   10123 		/*
   10124 		 *  This is the first call of this function. For ICH and PCH
   10125 		 * variants, it's difficult to determine the PHY access method
   10126 		 * by sc_type, so use the PCI product ID for some devices.
   10127 		 */
   10128 
   10129 		switch (sc->sc_pcidevid) {
   10130 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10131 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10132 			/* 82577 */
   10133 			new_phytype = WMPHY_82577;
   10134 			break;
   10135 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10136 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10137 			/* 82578 */
   10138 			new_phytype = WMPHY_82578;
   10139 			break;
   10140 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10141 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10142 			/* 82579 */
   10143 			new_phytype = WMPHY_82579;
   10144 			break;
   10145 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10146 		case PCI_PRODUCT_INTEL_82801I_BM:
   10147 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10148 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10149 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10150 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10151 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10152 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10153 			/* ICH8, 9, 10 with 82567 */
   10154 			new_phytype = WMPHY_BM;
   10155 			break;
   10156 		default:
   10157 			break;
   10158 		}
   10159 	} else {
   10160 		/* It's not the first call. Use PHY OUI and model */
   10161 		switch (phy_oui) {
   10162 		case MII_OUI_ATTANSIC: /* XXX ??? */
   10163 			switch (phy_model) {
   10164 			case 0x0004: /* XXX */
   10165 				new_phytype = WMPHY_82578;
   10166 				break;
   10167 			default:
   10168 				break;
   10169 			}
   10170 			break;
   10171 		case MII_OUI_xxMARVELL:
   10172 			switch (phy_model) {
   10173 			case MII_MODEL_xxMARVELL_I210:
   10174 				new_phytype = WMPHY_I210;
   10175 				break;
   10176 			case MII_MODEL_xxMARVELL_E1011:
   10177 			case MII_MODEL_xxMARVELL_E1000_3:
   10178 			case MII_MODEL_xxMARVELL_E1000_5:
   10179 			case MII_MODEL_xxMARVELL_E1112:
   10180 				new_phytype = WMPHY_M88;
   10181 				break;
   10182 			case MII_MODEL_xxMARVELL_E1149:
   10183 				new_phytype = WMPHY_BM;
   10184 				break;
   10185 			case MII_MODEL_xxMARVELL_E1111:
   10186 			case MII_MODEL_xxMARVELL_I347:
   10187 			case MII_MODEL_xxMARVELL_E1512:
   10188 			case MII_MODEL_xxMARVELL_E1340M:
   10189 			case MII_MODEL_xxMARVELL_E1543:
   10190 				new_phytype = WMPHY_M88;
   10191 				break;
   10192 			case MII_MODEL_xxMARVELL_I82563:
   10193 				new_phytype = WMPHY_GG82563;
   10194 				break;
   10195 			default:
   10196 				break;
   10197 			}
   10198 			break;
   10199 		case MII_OUI_INTEL:
   10200 			switch (phy_model) {
   10201 			case MII_MODEL_INTEL_I82577:
   10202 				new_phytype = WMPHY_82577;
   10203 				break;
   10204 			case MII_MODEL_INTEL_I82579:
   10205 				new_phytype = WMPHY_82579;
   10206 				break;
   10207 			case MII_MODEL_INTEL_I217:
   10208 				new_phytype = WMPHY_I217;
   10209 				break;
   10210 			case MII_MODEL_INTEL_I82580:
   10211 			case MII_MODEL_INTEL_I350:
   10212 				new_phytype = WMPHY_82580;
   10213 				break;
   10214 			default:
   10215 				break;
   10216 			}
   10217 			break;
   10218 		case MII_OUI_yyINTEL:
   10219 			switch (phy_model) {
   10220 			case MII_MODEL_yyINTEL_I82562G:
   10221 			case MII_MODEL_yyINTEL_I82562EM:
   10222 			case MII_MODEL_yyINTEL_I82562ET:
   10223 				new_phytype = WMPHY_IFE;
   10224 				break;
   10225 			case MII_MODEL_yyINTEL_IGP01E1000:
   10226 				new_phytype = WMPHY_IGP;
   10227 				break;
   10228 			case MII_MODEL_yyINTEL_I82566:
   10229 				new_phytype = WMPHY_IGP_3;
   10230 				break;
   10231 			default:
   10232 				break;
   10233 			}
   10234 			break;
   10235 		default:
   10236 			break;
   10237 		}
   10238 
   10239 		if (dodiag) {
   10240 			if (new_phytype == WMPHY_UNKNOWN)
   10241 				aprint_verbose_dev(dev,
   10242 				    "%s: Unknown PHY model. OUI=%06x, "
   10243 				    "model=%04x\n", __func__, phy_oui,
   10244 				    phy_model);
   10245 
   10246 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10247 			    && (sc->sc_phytype != new_phytype)) {
   10248 				aprint_error_dev(dev, "Previously assumed PHY "
   10249 				    "type(%u) was incorrect. PHY type from PHY"
   10250 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   10251 			}
   10252 		}
   10253 	}
   10254 
   10255 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10256 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10257 		/* SGMII */
   10258 		new_readreg = wm_sgmii_readreg;
   10259 		new_writereg = wm_sgmii_writereg;
   10260 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10261 		/* BM2 (phyaddr == 1) */
   10262 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10263 		    && (new_phytype != WMPHY_BM)
   10264 		    && (new_phytype != WMPHY_UNKNOWN))
   10265 			doubt_phytype = new_phytype;
   10266 		new_phytype = WMPHY_BM;
   10267 		new_readreg = wm_gmii_bm_readreg;
   10268 		new_writereg = wm_gmii_bm_writereg;
   10269 	} else if (sc->sc_type >= WM_T_PCH) {
   10270 		/* All PCH* use _hv_ */
   10271 		new_readreg = wm_gmii_hv_readreg;
   10272 		new_writereg = wm_gmii_hv_writereg;
   10273 	} else if (sc->sc_type >= WM_T_ICH8) {
   10274 		/* non-82567 ICH8, 9 and 10 */
   10275 		new_readreg = wm_gmii_i82544_readreg;
   10276 		new_writereg = wm_gmii_i82544_writereg;
   10277 	} else if (sc->sc_type >= WM_T_80003) {
   10278 		/* 80003 */
   10279 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10280 		    && (new_phytype != WMPHY_GG82563)
   10281 		    && (new_phytype != WMPHY_UNKNOWN))
   10282 			doubt_phytype = new_phytype;
   10283 		new_phytype = WMPHY_GG82563;
   10284 		new_readreg = wm_gmii_i80003_readreg;
   10285 		new_writereg = wm_gmii_i80003_writereg;
   10286 	} else if (sc->sc_type >= WM_T_I210) {
   10287 		/* I210 and I211 */
   10288 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10289 		    && (new_phytype != WMPHY_I210)
   10290 		    && (new_phytype != WMPHY_UNKNOWN))
   10291 			doubt_phytype = new_phytype;
   10292 		new_phytype = WMPHY_I210;
   10293 		new_readreg = wm_gmii_gs40g_readreg;
   10294 		new_writereg = wm_gmii_gs40g_writereg;
   10295 	} else if (sc->sc_type >= WM_T_82580) {
   10296 		/* 82580, I350 and I354 */
   10297 		new_readreg = wm_gmii_82580_readreg;
   10298 		new_writereg = wm_gmii_82580_writereg;
   10299 	} else if (sc->sc_type >= WM_T_82544) {
   10300 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10301 		new_readreg = wm_gmii_i82544_readreg;
   10302 		new_writereg = wm_gmii_i82544_writereg;
   10303 	} else {
   10304 		new_readreg = wm_gmii_i82543_readreg;
   10305 		new_writereg = wm_gmii_i82543_writereg;
   10306 	}
   10307 
   10308 	if (new_phytype == WMPHY_BM) {
   10309 		/* All BM use _bm_ */
   10310 		new_readreg = wm_gmii_bm_readreg;
   10311 		new_writereg = wm_gmii_bm_writereg;
   10312 	}
   10313 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10314 		/* All PCH* use _hv_ */
   10315 		new_readreg = wm_gmii_hv_readreg;
   10316 		new_writereg = wm_gmii_hv_writereg;
   10317 	}
   10318 
   10319 	/* Diag output */
   10320 	if (dodiag) {
   10321 		if (doubt_phytype != WMPHY_UNKNOWN)
   10322 			aprint_error_dev(dev, "Assumed new PHY type was "
   10323 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10324 			    new_phytype);
   10325 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10326 		    && (sc->sc_phytype != new_phytype))
   10327 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10328 			    "was incorrect. New PHY type = %u\n",
   10329 			    sc->sc_phytype, new_phytype);
   10330 
   10331 		if ((mii->mii_readreg != NULL) &&
   10332 		    (new_phytype == WMPHY_UNKNOWN))
   10333 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   10334 
   10335 		if ((mii->mii_readreg != NULL) &&
   10336 		    (mii->mii_readreg != new_readreg))
   10337 			aprint_error_dev(dev, "Previously assumed PHY "
   10338 			    "read/write function was incorrect.\n");
   10339 	}
   10340 
   10341 	/* Update now */
   10342 	sc->sc_phytype = new_phytype;
   10343 	mii->mii_readreg = new_readreg;
   10344 	mii->mii_writereg = new_writereg;
   10345 	if (new_readreg == wm_gmii_hv_readreg) {
   10346 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10347 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10348 	} else if (new_readreg == wm_sgmii_readreg) {
   10349 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10350 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10351 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10352 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10353 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10354 	}
   10355 }
   10356 
   10357 /*
   10358  * wm_get_phy_id_82575:
   10359  *
   10360  * Return PHY ID. Return -1 if it failed.
   10361  */
   10362 static int
   10363 wm_get_phy_id_82575(struct wm_softc *sc)
   10364 {
   10365 	uint32_t reg;
   10366 	int phyid = -1;
   10367 
   10368 	/* XXX */
   10369 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10370 		return -1;
   10371 
   10372 	if (wm_sgmii_uses_mdio(sc)) {
   10373 		switch (sc->sc_type) {
   10374 		case WM_T_82575:
   10375 		case WM_T_82576:
   10376 			reg = CSR_READ(sc, WMREG_MDIC);
   10377 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10378 			break;
   10379 		case WM_T_82580:
   10380 		case WM_T_I350:
   10381 		case WM_T_I354:
   10382 		case WM_T_I210:
   10383 		case WM_T_I211:
   10384 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10385 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10386 			break;
   10387 		default:
   10388 			return -1;
   10389 		}
   10390 	}
   10391 
   10392 	return phyid;
   10393 }
   10394 
   10395 /*
   10396  * wm_gmii_mediainit:
   10397  *
   10398  *	Initialize media for use on 1000BASE-T devices.
   10399  */
   10400 static void
   10401 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10402 {
   10403 	device_t dev = sc->sc_dev;
   10404 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10405 	struct mii_data *mii = &sc->sc_mii;
   10406 
   10407 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10408 		device_xname(sc->sc_dev), __func__));
   10409 
   10410 	/* We have GMII. */
   10411 	sc->sc_flags |= WM_F_HAS_MII;
   10412 
   10413 	if (sc->sc_type == WM_T_80003)
   10414 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10415 	else
   10416 		sc->sc_tipg = TIPG_1000T_DFLT;
   10417 
   10418 	/*
   10419 	 * Let the chip set speed/duplex on its own based on
   10420 	 * signals from the PHY.
   10421 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10422 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10423 	 */
   10424 	sc->sc_ctrl |= CTRL_SLU;
   10425 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10426 
   10427 	/* Initialize our media structures and probe the GMII. */
   10428 	mii->mii_ifp = ifp;
   10429 
   10430 	mii->mii_statchg = wm_gmii_statchg;
   10431 
   10432 	/* get PHY control from SMBus to PCIe */
   10433 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10434 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10435 	    || (sc->sc_type == WM_T_PCH_CNP))
   10436 		wm_init_phy_workarounds_pchlan(sc);
   10437 
   10438 	wm_gmii_reset(sc);
   10439 
   10440 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10441 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10442 	    wm_gmii_mediastatus, sc->sc_core_lock);
   10443 
   10444 	/* Setup internal SGMII PHY for SFP */
   10445 	wm_sgmii_sfp_preconfig(sc);
   10446 
   10447 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10448 	    || (sc->sc_type == WM_T_82580)
   10449 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10450 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10451 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10452 			/* Attach only one port */
   10453 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10454 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10455 		} else {
   10456 			int i, id;
   10457 			uint32_t ctrl_ext;
   10458 
   10459 			id = wm_get_phy_id_82575(sc);
   10460 			if (id != -1) {
   10461 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10462 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10463 			}
   10464 			if ((id == -1)
   10465 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10466 				/* Power on sgmii phy if it is disabled */
   10467 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10468 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10469 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10470 				CSR_WRITE_FLUSH(sc);
   10471 				delay(300*1000); /* XXX too long */
   10472 
   10473 				/*
   10474 				 * From 1 to 8.
   10475 				 *
   10476 				 * I2C access fails with I2C register's ERROR
   10477 				 * bit set, so prevent error message while
   10478 				 * scanning.
   10479 				 */
   10480 				sc->phy.no_errprint = true;
   10481 				for (i = 1; i < 8; i++)
   10482 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10483 					    0xffffffff, i, MII_OFFSET_ANY,
   10484 					    MIIF_DOPAUSE);
   10485 				sc->phy.no_errprint = false;
   10486 
   10487 				/* Restore previous sfp cage power state */
   10488 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10489 			}
   10490 		}
   10491 	} else
   10492 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10493 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10494 
   10495 	/*
   10496 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10497 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10498 	 */
   10499 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10500 		|| (sc->sc_type == WM_T_PCH_SPT)
   10501 		|| (sc->sc_type == WM_T_PCH_CNP))
   10502 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10503 		wm_set_mdio_slow_mode_hv(sc);
   10504 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10505 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10506 	}
   10507 
   10508 	/*
   10509 	 * (For ICH8 variants)
   10510 	 * If PHY detection failed, use BM's r/w function and retry.
   10511 	 */
   10512 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10513 		/* if failed, retry with *_bm_* */
   10514 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10515 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10516 		    sc->sc_phytype);
   10517 		sc->sc_phytype = WMPHY_BM;
   10518 		mii->mii_readreg = wm_gmii_bm_readreg;
   10519 		mii->mii_writereg = wm_gmii_bm_writereg;
   10520 
   10521 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10522 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10523 	}
   10524 
   10525 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10526 		/* Any PHY wasn't find */
   10527 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10528 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10529 		sc->sc_phytype = WMPHY_NONE;
   10530 	} else {
   10531 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10532 
   10533 		/*
   10534 		 * PHY Found! Check PHY type again by the second call of
   10535 		 * wm_gmii_setup_phytype.
   10536 		 */
   10537 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10538 		    child->mii_mpd_model);
   10539 
   10540 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10541 	}
   10542 }
   10543 
   10544 /*
   10545  * wm_gmii_mediachange:	[ifmedia interface function]
   10546  *
   10547  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10548  */
   10549 static int
   10550 wm_gmii_mediachange(struct ifnet *ifp)
   10551 {
   10552 	struct wm_softc *sc = ifp->if_softc;
   10553 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10554 	uint32_t reg;
   10555 	int rc;
   10556 
   10557 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10558 		device_xname(sc->sc_dev), __func__));
   10559 	if ((ifp->if_flags & IFF_UP) == 0)
   10560 		return 0;
   10561 
   10562 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10563 	if ((sc->sc_type == WM_T_82580)
   10564 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10565 	    || (sc->sc_type == WM_T_I211)) {
   10566 		reg = CSR_READ(sc, WMREG_PHPM);
   10567 		reg &= ~PHPM_GO_LINK_D;
   10568 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10569 	}
   10570 
   10571 	/* Disable D0 LPLU. */
   10572 	wm_lplu_d0_disable(sc);
   10573 
   10574 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10575 	sc->sc_ctrl |= CTRL_SLU;
   10576 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10577 	    || (sc->sc_type > WM_T_82543)) {
   10578 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10579 	} else {
   10580 		sc->sc_ctrl &= ~CTRL_ASDE;
   10581 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10582 		if (ife->ifm_media & IFM_FDX)
   10583 			sc->sc_ctrl |= CTRL_FD;
   10584 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10585 		case IFM_10_T:
   10586 			sc->sc_ctrl |= CTRL_SPEED_10;
   10587 			break;
   10588 		case IFM_100_TX:
   10589 			sc->sc_ctrl |= CTRL_SPEED_100;
   10590 			break;
   10591 		case IFM_1000_T:
   10592 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10593 			break;
   10594 		case IFM_NONE:
   10595 			/* There is no specific setting for IFM_NONE */
   10596 			break;
   10597 		default:
   10598 			panic("wm_gmii_mediachange: bad media 0x%x",
   10599 			    ife->ifm_media);
   10600 		}
   10601 	}
   10602 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10603 	CSR_WRITE_FLUSH(sc);
   10604 
   10605 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   10606 		wm_serdes_mediachange(ifp);
   10607 
   10608 	if (sc->sc_type <= WM_T_82543)
   10609 		wm_gmii_reset(sc);
   10610 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   10611 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   10612 		/* allow time for SFP cage time to power up phy */
   10613 		delay(300 * 1000);
   10614 		wm_gmii_reset(sc);
   10615 	}
   10616 
   10617 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10618 		return 0;
   10619 	return rc;
   10620 }
   10621 
   10622 /*
   10623  * wm_gmii_mediastatus:	[ifmedia interface function]
   10624  *
   10625  *	Get the current interface media status on a 1000BASE-T device.
   10626  */
   10627 static void
   10628 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10629 {
   10630 	struct wm_softc *sc = ifp->if_softc;
   10631 
   10632 	ether_mediastatus(ifp, ifmr);
   10633 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10634 	    | sc->sc_flowflags;
   10635 }
   10636 
   10637 #define	MDI_IO		CTRL_SWDPIN(2)
   10638 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10639 #define	MDI_CLK		CTRL_SWDPIN(3)
   10640 
   10641 static void
   10642 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10643 {
   10644 	uint32_t i, v;
   10645 
   10646 	v = CSR_READ(sc, WMREG_CTRL);
   10647 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10648 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10649 
   10650 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10651 		if (data & i)
   10652 			v |= MDI_IO;
   10653 		else
   10654 			v &= ~MDI_IO;
   10655 		CSR_WRITE(sc, WMREG_CTRL, v);
   10656 		CSR_WRITE_FLUSH(sc);
   10657 		delay(10);
   10658 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10659 		CSR_WRITE_FLUSH(sc);
   10660 		delay(10);
   10661 		CSR_WRITE(sc, WMREG_CTRL, v);
   10662 		CSR_WRITE_FLUSH(sc);
   10663 		delay(10);
   10664 	}
   10665 }
   10666 
   10667 static uint16_t
   10668 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10669 {
   10670 	uint32_t v, i;
   10671 	uint16_t data = 0;
   10672 
   10673 	v = CSR_READ(sc, WMREG_CTRL);
   10674 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10675 	v |= CTRL_SWDPIO(3);
   10676 
   10677 	CSR_WRITE(sc, WMREG_CTRL, v);
   10678 	CSR_WRITE_FLUSH(sc);
   10679 	delay(10);
   10680 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10681 	CSR_WRITE_FLUSH(sc);
   10682 	delay(10);
   10683 	CSR_WRITE(sc, WMREG_CTRL, v);
   10684 	CSR_WRITE_FLUSH(sc);
   10685 	delay(10);
   10686 
   10687 	for (i = 0; i < 16; i++) {
   10688 		data <<= 1;
   10689 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10690 		CSR_WRITE_FLUSH(sc);
   10691 		delay(10);
   10692 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10693 			data |= 1;
   10694 		CSR_WRITE(sc, WMREG_CTRL, v);
   10695 		CSR_WRITE_FLUSH(sc);
   10696 		delay(10);
   10697 	}
   10698 
   10699 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10700 	CSR_WRITE_FLUSH(sc);
   10701 	delay(10);
   10702 	CSR_WRITE(sc, WMREG_CTRL, v);
   10703 	CSR_WRITE_FLUSH(sc);
   10704 	delay(10);
   10705 
   10706 	return data;
   10707 }
   10708 
   10709 #undef MDI_IO
   10710 #undef MDI_DIR
   10711 #undef MDI_CLK
   10712 
   10713 /*
   10714  * wm_gmii_i82543_readreg:	[mii interface function]
   10715  *
   10716  *	Read a PHY register on the GMII (i82543 version).
   10717  */
   10718 static int
   10719 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10720 {
   10721 	struct wm_softc *sc = device_private(dev);
   10722 
   10723 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10724 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10725 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10726 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10727 
   10728 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10729 		device_xname(dev), phy, reg, *val));
   10730 
   10731 	return 0;
   10732 }
   10733 
   10734 /*
   10735  * wm_gmii_i82543_writereg:	[mii interface function]
   10736  *
   10737  *	Write a PHY register on the GMII (i82543 version).
   10738  */
   10739 static int
   10740 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10741 {
   10742 	struct wm_softc *sc = device_private(dev);
   10743 
   10744 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10745 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10746 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10747 	    (MII_COMMAND_START << 30), 32);
   10748 
   10749 	return 0;
   10750 }
   10751 
   10752 /*
   10753  * wm_gmii_mdic_readreg:	[mii interface function]
   10754  *
   10755  *	Read a PHY register on the GMII.
   10756  */
   10757 static int
   10758 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10759 {
   10760 	struct wm_softc *sc = device_private(dev);
   10761 	uint32_t mdic = 0;
   10762 	int i;
   10763 
   10764 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10765 	    && (reg > MII_ADDRMASK)) {
   10766 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10767 		    __func__, sc->sc_phytype, reg);
   10768 		reg &= MII_ADDRMASK;
   10769 	}
   10770 
   10771 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10772 	    MDIC_REGADD(reg));
   10773 
   10774 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10775 		delay(50);
   10776 		mdic = CSR_READ(sc, WMREG_MDIC);
   10777 		if (mdic & MDIC_READY)
   10778 			break;
   10779 	}
   10780 
   10781 	if ((mdic & MDIC_READY) == 0) {
   10782 		DPRINTF(WM_DEBUG_GMII,
   10783 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10784 			device_xname(dev), phy, reg));
   10785 		return ETIMEDOUT;
   10786 	} else if (mdic & MDIC_E) {
   10787 		/* This is normal if no PHY is present. */
   10788 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10789 			device_xname(sc->sc_dev), phy, reg));
   10790 		return -1;
   10791 	} else
   10792 		*val = MDIC_DATA(mdic);
   10793 
   10794 	/*
   10795 	 * Allow some time after each MDIC transaction to avoid
   10796 	 * reading duplicate data in the next MDIC transaction.
   10797 	 */
   10798 	if (sc->sc_type == WM_T_PCH2)
   10799 		delay(100);
   10800 
   10801 	return 0;
   10802 }
   10803 
   10804 /*
   10805  * wm_gmii_mdic_writereg:	[mii interface function]
   10806  *
   10807  *	Write a PHY register on the GMII.
   10808  */
   10809 static int
   10810 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10811 {
   10812 	struct wm_softc *sc = device_private(dev);
   10813 	uint32_t mdic = 0;
   10814 	int i;
   10815 
   10816 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10817 	    && (reg > MII_ADDRMASK)) {
   10818 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10819 		    __func__, sc->sc_phytype, reg);
   10820 		reg &= MII_ADDRMASK;
   10821 	}
   10822 
   10823 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10824 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10825 
   10826 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10827 		delay(50);
   10828 		mdic = CSR_READ(sc, WMREG_MDIC);
   10829 		if (mdic & MDIC_READY)
   10830 			break;
   10831 	}
   10832 
   10833 	if ((mdic & MDIC_READY) == 0) {
   10834 		DPRINTF(WM_DEBUG_GMII,
   10835 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10836 			device_xname(dev), phy, reg));
   10837 		return ETIMEDOUT;
   10838 	} else if (mdic & MDIC_E) {
   10839 		DPRINTF(WM_DEBUG_GMII,
   10840 		    ("%s: MDIC write error: phy %d reg %d\n",
   10841 			device_xname(dev), phy, reg));
   10842 		return -1;
   10843 	}
   10844 
   10845 	/*
   10846 	 * Allow some time after each MDIC transaction to avoid
   10847 	 * reading duplicate data in the next MDIC transaction.
   10848 	 */
   10849 	if (sc->sc_type == WM_T_PCH2)
   10850 		delay(100);
   10851 
   10852 	return 0;
   10853 }
   10854 
   10855 /*
   10856  * wm_gmii_i82544_readreg:	[mii interface function]
   10857  *
   10858  *	Read a PHY register on the GMII.
   10859  */
   10860 static int
   10861 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10862 {
   10863 	struct wm_softc *sc = device_private(dev);
   10864 	int rv;
   10865 
   10866 	if (sc->phy.acquire(sc)) {
   10867 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10868 		return -1;
   10869 	}
   10870 
   10871 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10872 
   10873 	sc->phy.release(sc);
   10874 
   10875 	return rv;
   10876 }
   10877 
   10878 static int
   10879 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10880 {
   10881 	struct wm_softc *sc = device_private(dev);
   10882 	int rv;
   10883 
   10884 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10885 		switch (sc->sc_phytype) {
   10886 		case WMPHY_IGP:
   10887 		case WMPHY_IGP_2:
   10888 		case WMPHY_IGP_3:
   10889 			rv = wm_gmii_mdic_writereg(dev, phy,
   10890 			    IGPHY_PAGE_SELECT, reg);
   10891 			if (rv != 0)
   10892 				return rv;
   10893 			break;
   10894 		default:
   10895 #ifdef WM_DEBUG
   10896 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10897 			    __func__, sc->sc_phytype, reg);
   10898 #endif
   10899 			break;
   10900 		}
   10901 	}
   10902 
   10903 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10904 }
   10905 
   10906 /*
   10907  * wm_gmii_i82544_writereg:	[mii interface function]
   10908  *
   10909  *	Write a PHY register on the GMII.
   10910  */
   10911 static int
   10912 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10913 {
   10914 	struct wm_softc *sc = device_private(dev);
   10915 	int rv;
   10916 
   10917 	if (sc->phy.acquire(sc)) {
   10918 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10919 		return -1;
   10920 	}
   10921 
   10922 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10923 	sc->phy.release(sc);
   10924 
   10925 	return rv;
   10926 }
   10927 
   10928 static int
   10929 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10930 {
   10931 	struct wm_softc *sc = device_private(dev);
   10932 	int rv;
   10933 
   10934 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10935 		switch (sc->sc_phytype) {
   10936 		case WMPHY_IGP:
   10937 		case WMPHY_IGP_2:
   10938 		case WMPHY_IGP_3:
   10939 			rv = wm_gmii_mdic_writereg(dev, phy,
   10940 			    IGPHY_PAGE_SELECT, reg);
   10941 			if (rv != 0)
   10942 				return rv;
   10943 			break;
   10944 		default:
   10945 #ifdef WM_DEBUG
   10946 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10947 			    __func__, sc->sc_phytype, reg);
   10948 #endif
   10949 			break;
   10950 		}
   10951 	}
   10952 
   10953 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10954 }
   10955 
   10956 /*
   10957  * wm_gmii_i80003_readreg:	[mii interface function]
   10958  *
   10959  *	Read a PHY register on the kumeran
   10960  * This could be handled by the PHY layer if we didn't have to lock the
   10961  * ressource ...
   10962  */
   10963 static int
   10964 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10965 {
   10966 	struct wm_softc *sc = device_private(dev);
   10967 	int page_select;
   10968 	uint16_t temp, temp2;
   10969 	int rv = 0;
   10970 
   10971 	if (phy != 1) /* Only one PHY on kumeran bus */
   10972 		return -1;
   10973 
   10974 	if (sc->phy.acquire(sc)) {
   10975 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10976 		return -1;
   10977 	}
   10978 
   10979 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10980 		page_select = GG82563_PHY_PAGE_SELECT;
   10981 	else {
   10982 		/*
   10983 		 * Use Alternative Page Select register to access registers
   10984 		 * 30 and 31.
   10985 		 */
   10986 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10987 	}
   10988 	temp = reg >> GG82563_PAGE_SHIFT;
   10989 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10990 		goto out;
   10991 
   10992 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10993 		/*
   10994 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10995 		 * register.
   10996 		 */
   10997 		delay(200);
   10998 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10999 		if ((rv != 0) || (temp2 != temp)) {
   11000 			device_printf(dev, "%s failed\n", __func__);
   11001 			rv = -1;
   11002 			goto out;
   11003 		}
   11004 		delay(200);
   11005 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11006 		delay(200);
   11007 	} else
   11008 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11009 
   11010 out:
   11011 	sc->phy.release(sc);
   11012 	return rv;
   11013 }
   11014 
   11015 /*
   11016  * wm_gmii_i80003_writereg:	[mii interface function]
   11017  *
   11018  *	Write a PHY register on the kumeran.
   11019  * This could be handled by the PHY layer if we didn't have to lock the
   11020  * ressource ...
   11021  */
   11022 static int
   11023 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11024 {
   11025 	struct wm_softc *sc = device_private(dev);
   11026 	int page_select, rv;
   11027 	uint16_t temp, temp2;
   11028 
   11029 	if (phy != 1) /* Only one PHY on kumeran bus */
   11030 		return -1;
   11031 
   11032 	if (sc->phy.acquire(sc)) {
   11033 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11034 		return -1;
   11035 	}
   11036 
   11037 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11038 		page_select = GG82563_PHY_PAGE_SELECT;
   11039 	else {
   11040 		/*
   11041 		 * Use Alternative Page Select register to access registers
   11042 		 * 30 and 31.
   11043 		 */
   11044 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11045 	}
   11046 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11047 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11048 		goto out;
   11049 
   11050 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11051 		/*
   11052 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11053 		 * register.
   11054 		 */
   11055 		delay(200);
   11056 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11057 		if ((rv != 0) || (temp2 != temp)) {
   11058 			device_printf(dev, "%s failed\n", __func__);
   11059 			rv = -1;
   11060 			goto out;
   11061 		}
   11062 		delay(200);
   11063 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11064 		delay(200);
   11065 	} else
   11066 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11067 
   11068 out:
   11069 	sc->phy.release(sc);
   11070 	return rv;
   11071 }
   11072 
   11073 /*
   11074  * wm_gmii_bm_readreg:	[mii interface function]
   11075  *
   11076  *	Read a PHY register on the kumeran
   11077  * This could be handled by the PHY layer if we didn't have to lock the
   11078  * ressource ...
   11079  */
   11080 static int
   11081 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11082 {
   11083 	struct wm_softc *sc = device_private(dev);
   11084 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11085 	int rv;
   11086 
   11087 	if (sc->phy.acquire(sc)) {
   11088 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11089 		return -1;
   11090 	}
   11091 
   11092 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11093 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11094 		    || (reg == 31)) ? 1 : phy;
   11095 	/* Page 800 works differently than the rest so it has its own func */
   11096 	if (page == BM_WUC_PAGE) {
   11097 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11098 		goto release;
   11099 	}
   11100 
   11101 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11102 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11103 		    && (sc->sc_type != WM_T_82583))
   11104 			rv = wm_gmii_mdic_writereg(dev, phy,
   11105 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11106 		else
   11107 			rv = wm_gmii_mdic_writereg(dev, phy,
   11108 			    BME1000_PHY_PAGE_SELECT, page);
   11109 		if (rv != 0)
   11110 			goto release;
   11111 	}
   11112 
   11113 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11114 
   11115 release:
   11116 	sc->phy.release(sc);
   11117 	return rv;
   11118 }
   11119 
   11120 /*
   11121  * wm_gmii_bm_writereg:	[mii interface function]
   11122  *
   11123  *	Write a PHY register on the kumeran.
   11124  * This could be handled by the PHY layer if we didn't have to lock the
   11125  * ressource ...
   11126  */
   11127 static int
   11128 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11129 {
   11130 	struct wm_softc *sc = device_private(dev);
   11131 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11132 	int rv;
   11133 
   11134 	if (sc->phy.acquire(sc)) {
   11135 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11136 		return -1;
   11137 	}
   11138 
   11139 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11140 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11141 		    || (reg == 31)) ? 1 : phy;
   11142 	/* Page 800 works differently than the rest so it has its own func */
   11143 	if (page == BM_WUC_PAGE) {
   11144 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11145 		goto release;
   11146 	}
   11147 
   11148 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11149 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11150 		    && (sc->sc_type != WM_T_82583))
   11151 			rv = wm_gmii_mdic_writereg(dev, phy,
   11152 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11153 		else
   11154 			rv = wm_gmii_mdic_writereg(dev, phy,
   11155 			    BME1000_PHY_PAGE_SELECT, page);
   11156 		if (rv != 0)
   11157 			goto release;
   11158 	}
   11159 
   11160 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11161 
   11162 release:
   11163 	sc->phy.release(sc);
   11164 	return rv;
   11165 }
   11166 
   11167 /*
   11168  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11169  *  @dev: pointer to the HW structure
   11170  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11171  *
   11172  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11173  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11174  */
   11175 static int
   11176 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11177 {
   11178 	uint16_t temp;
   11179 	int rv;
   11180 
   11181 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11182 		device_xname(dev), __func__));
   11183 
   11184 	if (!phy_regp)
   11185 		return -1;
   11186 
   11187 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11188 
   11189 	/* Select Port Control Registers page */
   11190 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11191 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11192 	if (rv != 0)
   11193 		return rv;
   11194 
   11195 	/* Read WUCE and save it */
   11196 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11197 	if (rv != 0)
   11198 		return rv;
   11199 
   11200 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11201 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11202 	 */
   11203 	temp = *phy_regp;
   11204 	temp |= BM_WUC_ENABLE_BIT;
   11205 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11206 
   11207 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11208 		return rv;
   11209 
   11210 	/* Select Host Wakeup Registers page - caller now able to write
   11211 	 * registers on the Wakeup registers page
   11212 	 */
   11213 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11214 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11215 }
   11216 
   11217 /*
   11218  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11219  *  @dev: pointer to the HW structure
   11220  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11221  *
   11222  *  Restore BM_WUC_ENABLE_REG to its original value.
   11223  *
   11224  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11225  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11226  *  caller.
   11227  */
   11228 static int
   11229 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11230 {
   11231 
   11232 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11233 		device_xname(dev), __func__));
   11234 
   11235 	if (!phy_regp)
   11236 		return -1;
   11237 
   11238 	/* Select Port Control Registers page */
   11239 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11240 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11241 
   11242 	/* Restore 769.17 to its original value */
   11243 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11244 
   11245 	return 0;
   11246 }
   11247 
   11248 /*
   11249  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11250  *  @sc: pointer to the HW structure
   11251  *  @offset: register offset to be read or written
   11252  *  @val: pointer to the data to read or write
   11253  *  @rd: determines if operation is read or write
   11254  *  @page_set: BM_WUC_PAGE already set and access enabled
   11255  *
   11256  *  Read the PHY register at offset and store the retrieved information in
   11257  *  data, or write data to PHY register at offset.  Note the procedure to
   11258  *  access the PHY wakeup registers is different than reading the other PHY
   11259  *  registers. It works as such:
   11260  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11261  *  2) Set page to 800 for host (801 if we were manageability)
   11262  *  3) Write the address using the address opcode (0x11)
   11263  *  4) Read or write the data using the data opcode (0x12)
   11264  *  5) Restore 769.17.2 to its original value
   11265  *
   11266  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11267  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11268  *
   11269  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11270  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11271  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11272  */
   11273 static int
   11274 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11275 	bool page_set)
   11276 {
   11277 	struct wm_softc *sc = device_private(dev);
   11278 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11279 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11280 	uint16_t wuce;
   11281 	int rv = 0;
   11282 
   11283 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11284 		device_xname(dev), __func__));
   11285 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11286 	if ((sc->sc_type == WM_T_PCH)
   11287 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11288 		device_printf(dev,
   11289 		    "Attempting to access page %d while gig enabled.\n", page);
   11290 	}
   11291 
   11292 	if (!page_set) {
   11293 		/* Enable access to PHY wakeup registers */
   11294 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11295 		if (rv != 0) {
   11296 			device_printf(dev,
   11297 			    "%s: Could not enable PHY wakeup reg access\n",
   11298 			    __func__);
   11299 			return rv;
   11300 		}
   11301 	}
   11302 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11303 		device_xname(sc->sc_dev), __func__, page, regnum));
   11304 
   11305 	/*
   11306 	 * 2) Access PHY wakeup register.
   11307 	 * See wm_access_phy_wakeup_reg_bm.
   11308 	 */
   11309 
   11310 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11311 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11312 	if (rv != 0)
   11313 		return rv;
   11314 
   11315 	if (rd) {
   11316 		/* Read the Wakeup register page value using opcode 0x12 */
   11317 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11318 	} else {
   11319 		/* Write the Wakeup register page value using opcode 0x12 */
   11320 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11321 	}
   11322 	if (rv != 0)
   11323 		return rv;
   11324 
   11325 	if (!page_set)
   11326 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11327 
   11328 	return rv;
   11329 }
   11330 
   11331 /*
   11332  * wm_gmii_hv_readreg:	[mii interface function]
   11333  *
   11334  *	Read a PHY register on the kumeran
   11335  * This could be handled by the PHY layer if we didn't have to lock the
   11336  * ressource ...
   11337  */
   11338 static int
   11339 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11340 {
   11341 	struct wm_softc *sc = device_private(dev);
   11342 	int rv;
   11343 
   11344 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11345 		device_xname(dev), __func__));
   11346 	if (sc->phy.acquire(sc)) {
   11347 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11348 		return -1;
   11349 	}
   11350 
   11351 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11352 	sc->phy.release(sc);
   11353 	return rv;
   11354 }
   11355 
   11356 static int
   11357 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11358 {
   11359 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11360 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11361 	int rv;
   11362 
   11363 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11364 
   11365 	/* Page 800 works differently than the rest so it has its own func */
   11366 	if (page == BM_WUC_PAGE)
   11367 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11368 
   11369 	/*
   11370 	 * Lower than page 768 works differently than the rest so it has its
   11371 	 * own func
   11372 	 */
   11373 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11374 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11375 		return -1;
   11376 	}
   11377 
   11378 	/*
   11379 	 * XXX I21[789] documents say that the SMBus Address register is at
   11380 	 * PHY address 01, Page 0 (not 768), Register 26.
   11381 	 */
   11382 	if (page == HV_INTC_FC_PAGE_START)
   11383 		page = 0;
   11384 
   11385 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11386 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11387 		    page << BME1000_PAGE_SHIFT);
   11388 		if (rv != 0)
   11389 			return rv;
   11390 	}
   11391 
   11392 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11393 }
   11394 
   11395 /*
   11396  * wm_gmii_hv_writereg:	[mii interface function]
   11397  *
   11398  *	Write a PHY register on the kumeran.
   11399  * This could be handled by the PHY layer if we didn't have to lock the
   11400  * ressource ...
   11401  */
   11402 static int
   11403 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11404 {
   11405 	struct wm_softc *sc = device_private(dev);
   11406 	int rv;
   11407 
   11408 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11409 		device_xname(dev), __func__));
   11410 
   11411 	if (sc->phy.acquire(sc)) {
   11412 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11413 		return -1;
   11414 	}
   11415 
   11416 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11417 	sc->phy.release(sc);
   11418 
   11419 	return rv;
   11420 }
   11421 
   11422 static int
   11423 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11424 {
   11425 	struct wm_softc *sc = device_private(dev);
   11426 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11427 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11428 	int rv;
   11429 
   11430 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11431 
   11432 	/* Page 800 works differently than the rest so it has its own func */
   11433 	if (page == BM_WUC_PAGE)
   11434 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11435 		    false);
   11436 
   11437 	/*
   11438 	 * Lower than page 768 works differently than the rest so it has its
   11439 	 * own func
   11440 	 */
   11441 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11442 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11443 		return -1;
   11444 	}
   11445 
   11446 	{
   11447 		/*
   11448 		 * XXX I21[789] documents say that the SMBus Address register
   11449 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11450 		 */
   11451 		if (page == HV_INTC_FC_PAGE_START)
   11452 			page = 0;
   11453 
   11454 		/*
   11455 		 * XXX Workaround MDIO accesses being disabled after entering
   11456 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11457 		 * register is set)
   11458 		 */
   11459 		if (sc->sc_phytype == WMPHY_82578) {
   11460 			struct mii_softc *child;
   11461 
   11462 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11463 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11464 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11465 			    && ((val & (1 << 11)) != 0)) {
   11466 				device_printf(dev, "XXX need workaround\n");
   11467 			}
   11468 		}
   11469 
   11470 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11471 			rv = wm_gmii_mdic_writereg(dev, 1,
   11472 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11473 			if (rv != 0)
   11474 				return rv;
   11475 		}
   11476 	}
   11477 
   11478 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11479 }
   11480 
   11481 /*
   11482  * wm_gmii_82580_readreg:	[mii interface function]
   11483  *
   11484  *	Read a PHY register on the 82580 and I350.
   11485  * This could be handled by the PHY layer if we didn't have to lock the
   11486  * ressource ...
   11487  */
   11488 static int
   11489 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11490 {
   11491 	struct wm_softc *sc = device_private(dev);
   11492 	int rv;
   11493 
   11494 	if (sc->phy.acquire(sc) != 0) {
   11495 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11496 		return -1;
   11497 	}
   11498 
   11499 #ifdef DIAGNOSTIC
   11500 	if (reg > MII_ADDRMASK) {
   11501 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11502 		    __func__, sc->sc_phytype, reg);
   11503 		reg &= MII_ADDRMASK;
   11504 	}
   11505 #endif
   11506 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11507 
   11508 	sc->phy.release(sc);
   11509 	return rv;
   11510 }
   11511 
   11512 /*
   11513  * wm_gmii_82580_writereg:	[mii interface function]
   11514  *
   11515  *	Write a PHY register on the 82580 and I350.
   11516  * This could be handled by the PHY layer if we didn't have to lock the
   11517  * ressource ...
   11518  */
   11519 static int
   11520 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11521 {
   11522 	struct wm_softc *sc = device_private(dev);
   11523 	int rv;
   11524 
   11525 	if (sc->phy.acquire(sc) != 0) {
   11526 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11527 		return -1;
   11528 	}
   11529 
   11530 #ifdef DIAGNOSTIC
   11531 	if (reg > MII_ADDRMASK) {
   11532 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11533 		    __func__, sc->sc_phytype, reg);
   11534 		reg &= MII_ADDRMASK;
   11535 	}
   11536 #endif
   11537 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11538 
   11539 	sc->phy.release(sc);
   11540 	return rv;
   11541 }
   11542 
   11543 /*
   11544  * wm_gmii_gs40g_readreg:	[mii interface function]
   11545  *
   11546  *	Read a PHY register on the I2100 and I211.
   11547  * This could be handled by the PHY layer if we didn't have to lock the
   11548  * ressource ...
   11549  */
   11550 static int
   11551 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11552 {
   11553 	struct wm_softc *sc = device_private(dev);
   11554 	int page, offset;
   11555 	int rv;
   11556 
   11557 	/* Acquire semaphore */
   11558 	if (sc->phy.acquire(sc)) {
   11559 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11560 		return -1;
   11561 	}
   11562 
   11563 	/* Page select */
   11564 	page = reg >> GS40G_PAGE_SHIFT;
   11565 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11566 	if (rv != 0)
   11567 		goto release;
   11568 
   11569 	/* Read reg */
   11570 	offset = reg & GS40G_OFFSET_MASK;
   11571 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11572 
   11573 release:
   11574 	sc->phy.release(sc);
   11575 	return rv;
   11576 }
   11577 
   11578 /*
   11579  * wm_gmii_gs40g_writereg:	[mii interface function]
   11580  *
   11581  *	Write a PHY register on the I210 and I211.
   11582  * This could be handled by the PHY layer if we didn't have to lock the
   11583  * ressource ...
   11584  */
   11585 static int
   11586 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11587 {
   11588 	struct wm_softc *sc = device_private(dev);
   11589 	uint16_t page;
   11590 	int offset, rv;
   11591 
   11592 	/* Acquire semaphore */
   11593 	if (sc->phy.acquire(sc)) {
   11594 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11595 		return -1;
   11596 	}
   11597 
   11598 	/* Page select */
   11599 	page = reg >> GS40G_PAGE_SHIFT;
   11600 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11601 	if (rv != 0)
   11602 		goto release;
   11603 
   11604 	/* Write reg */
   11605 	offset = reg & GS40G_OFFSET_MASK;
   11606 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11607 
   11608 release:
   11609 	/* Release semaphore */
   11610 	sc->phy.release(sc);
   11611 	return rv;
   11612 }
   11613 
   11614 /*
   11615  * wm_gmii_statchg:	[mii interface function]
   11616  *
   11617  *	Callback from MII layer when media changes.
   11618  */
   11619 static void
   11620 wm_gmii_statchg(struct ifnet *ifp)
   11621 {
   11622 	struct wm_softc *sc = ifp->if_softc;
   11623 	struct mii_data *mii = &sc->sc_mii;
   11624 
   11625 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11626 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11627 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11628 
   11629 	/* Get flow control negotiation result. */
   11630 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11631 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11632 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11633 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11634 	}
   11635 
   11636 	if (sc->sc_flowflags & IFM_FLOW) {
   11637 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11638 			sc->sc_ctrl |= CTRL_TFCE;
   11639 			sc->sc_fcrtl |= FCRTL_XONE;
   11640 		}
   11641 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11642 			sc->sc_ctrl |= CTRL_RFCE;
   11643 	}
   11644 
   11645 	if (mii->mii_media_active & IFM_FDX) {
   11646 		DPRINTF(WM_DEBUG_LINK,
   11647 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11648 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11649 	} else {
   11650 		DPRINTF(WM_DEBUG_LINK,
   11651 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11652 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11653 	}
   11654 
   11655 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11656 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11657 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11658 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11659 	if (sc->sc_type == WM_T_80003) {
   11660 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   11661 		case IFM_1000_T:
   11662 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11663 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11664 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11665 			break;
   11666 		default:
   11667 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11668 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11669 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11670 			break;
   11671 		}
   11672 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11673 	}
   11674 }
   11675 
   11676 /* kumeran related (80003, ICH* and PCH*) */
   11677 
   11678 /*
   11679  * wm_kmrn_readreg:
   11680  *
   11681  *	Read a kumeran register
   11682  */
   11683 static int
   11684 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11685 {
   11686 	int rv;
   11687 
   11688 	if (sc->sc_type == WM_T_80003)
   11689 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11690 	else
   11691 		rv = sc->phy.acquire(sc);
   11692 	if (rv != 0) {
   11693 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11694 		    __func__);
   11695 		return rv;
   11696 	}
   11697 
   11698 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11699 
   11700 	if (sc->sc_type == WM_T_80003)
   11701 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11702 	else
   11703 		sc->phy.release(sc);
   11704 
   11705 	return rv;
   11706 }
   11707 
   11708 static int
   11709 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11710 {
   11711 
   11712 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11713 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11714 	    KUMCTRLSTA_REN);
   11715 	CSR_WRITE_FLUSH(sc);
   11716 	delay(2);
   11717 
   11718 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11719 
   11720 	return 0;
   11721 }
   11722 
   11723 /*
   11724  * wm_kmrn_writereg:
   11725  *
   11726  *	Write a kumeran register
   11727  */
   11728 static int
   11729 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11730 {
   11731 	int rv;
   11732 
   11733 	if (sc->sc_type == WM_T_80003)
   11734 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11735 	else
   11736 		rv = sc->phy.acquire(sc);
   11737 	if (rv != 0) {
   11738 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11739 		    __func__);
   11740 		return rv;
   11741 	}
   11742 
   11743 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11744 
   11745 	if (sc->sc_type == WM_T_80003)
   11746 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11747 	else
   11748 		sc->phy.release(sc);
   11749 
   11750 	return rv;
   11751 }
   11752 
   11753 static int
   11754 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11755 {
   11756 
   11757 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11758 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11759 
   11760 	return 0;
   11761 }
   11762 
   11763 /*
   11764  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11765  * This access method is different from IEEE MMD.
   11766  */
   11767 static int
   11768 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11769 {
   11770 	struct wm_softc *sc = device_private(dev);
   11771 	int rv;
   11772 
   11773 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11774 	if (rv != 0)
   11775 		return rv;
   11776 
   11777 	if (rd)
   11778 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11779 	else
   11780 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11781 	return rv;
   11782 }
   11783 
   11784 static int
   11785 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11786 {
   11787 
   11788 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11789 }
   11790 
   11791 static int
   11792 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11793 {
   11794 
   11795 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11796 }
   11797 
   11798 /* SGMII related */
   11799 
   11800 /*
   11801  * wm_sgmii_uses_mdio
   11802  *
   11803  * Check whether the transaction is to the internal PHY or the external
   11804  * MDIO interface. Return true if it's MDIO.
   11805  */
   11806 static bool
   11807 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11808 {
   11809 	uint32_t reg;
   11810 	bool ismdio = false;
   11811 
   11812 	switch (sc->sc_type) {
   11813 	case WM_T_82575:
   11814 	case WM_T_82576:
   11815 		reg = CSR_READ(sc, WMREG_MDIC);
   11816 		ismdio = ((reg & MDIC_DEST) != 0);
   11817 		break;
   11818 	case WM_T_82580:
   11819 	case WM_T_I350:
   11820 	case WM_T_I354:
   11821 	case WM_T_I210:
   11822 	case WM_T_I211:
   11823 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11824 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11825 		break;
   11826 	default:
   11827 		break;
   11828 	}
   11829 
   11830 	return ismdio;
   11831 }
   11832 
   11833 /* Setup internal SGMII PHY for SFP */
   11834 static void
   11835 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   11836 {
   11837 	uint16_t id1, id2, phyreg;
   11838 	int i, rv;
   11839 
   11840 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   11841 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   11842 		return;
   11843 
   11844 	for (i = 0; i < MII_NPHY; i++) {
   11845 		sc->phy.no_errprint = true;
   11846 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   11847 		if (rv != 0)
   11848 			continue;
   11849 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   11850 		if (rv != 0)
   11851 			continue;
   11852 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   11853 			continue;
   11854 		sc->phy.no_errprint = false;
   11855 
   11856 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   11857 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   11858 		phyreg |= ESSR_SGMII_WOC_COPPER;
   11859 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   11860 		break;
   11861 	}
   11862 
   11863 }
   11864 
   11865 /*
   11866  * wm_sgmii_readreg:	[mii interface function]
   11867  *
   11868  *	Read a PHY register on the SGMII
   11869  * This could be handled by the PHY layer if we didn't have to lock the
   11870  * ressource ...
   11871  */
   11872 static int
   11873 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11874 {
   11875 	struct wm_softc *sc = device_private(dev);
   11876 	int rv;
   11877 
   11878 	if (sc->phy.acquire(sc)) {
   11879 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11880 		return -1;
   11881 	}
   11882 
   11883 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11884 
   11885 	sc->phy.release(sc);
   11886 	return rv;
   11887 }
   11888 
   11889 static int
   11890 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11891 {
   11892 	struct wm_softc *sc = device_private(dev);
   11893 	uint32_t i2ccmd;
   11894 	int i, rv = 0;
   11895 
   11896 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11897 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11898 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11899 
   11900 	/* Poll the ready bit */
   11901 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11902 		delay(50);
   11903 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11904 		if (i2ccmd & I2CCMD_READY)
   11905 			break;
   11906 	}
   11907 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11908 		device_printf(dev, "I2CCMD Read did not complete\n");
   11909 		rv = ETIMEDOUT;
   11910 	}
   11911 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11912 		if (!sc->phy.no_errprint)
   11913 			device_printf(dev, "I2CCMD Error bit set\n");
   11914 		rv = EIO;
   11915 	}
   11916 
   11917 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11918 
   11919 	return rv;
   11920 }
   11921 
   11922 /*
   11923  * wm_sgmii_writereg:	[mii interface function]
   11924  *
   11925  *	Write a PHY register on the SGMII.
   11926  * This could be handled by the PHY layer if we didn't have to lock the
   11927  * ressource ...
   11928  */
   11929 static int
   11930 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11931 {
   11932 	struct wm_softc *sc = device_private(dev);
   11933 	int rv;
   11934 
   11935 	if (sc->phy.acquire(sc) != 0) {
   11936 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11937 		return -1;
   11938 	}
   11939 
   11940 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11941 
   11942 	sc->phy.release(sc);
   11943 
   11944 	return rv;
   11945 }
   11946 
   11947 static int
   11948 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11949 {
   11950 	struct wm_softc *sc = device_private(dev);
   11951 	uint32_t i2ccmd;
   11952 	uint16_t swapdata;
   11953 	int rv = 0;
   11954 	int i;
   11955 
   11956 	/* Swap the data bytes for the I2C interface */
   11957 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11958 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11959 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11960 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11961 
   11962 	/* Poll the ready bit */
   11963 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11964 		delay(50);
   11965 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11966 		if (i2ccmd & I2CCMD_READY)
   11967 			break;
   11968 	}
   11969 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11970 		device_printf(dev, "I2CCMD Write did not complete\n");
   11971 		rv = ETIMEDOUT;
   11972 	}
   11973 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11974 		device_printf(dev, "I2CCMD Error bit set\n");
   11975 		rv = EIO;
   11976 	}
   11977 
   11978 	return rv;
   11979 }
   11980 
   11981 /* TBI related */
   11982 
   11983 static bool
   11984 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11985 {
   11986 	bool sig;
   11987 
   11988 	sig = ctrl & CTRL_SWDPIN(1);
   11989 
   11990 	/*
   11991 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11992 	 * detect a signal, 1 if they don't.
   11993 	 */
   11994 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11995 		sig = !sig;
   11996 
   11997 	return sig;
   11998 }
   11999 
   12000 /*
   12001  * wm_tbi_mediainit:
   12002  *
   12003  *	Initialize media for use on 1000BASE-X devices.
   12004  */
   12005 static void
   12006 wm_tbi_mediainit(struct wm_softc *sc)
   12007 {
   12008 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12009 	const char *sep = "";
   12010 
   12011 	if (sc->sc_type < WM_T_82543)
   12012 		sc->sc_tipg = TIPG_WM_DFLT;
   12013 	else
   12014 		sc->sc_tipg = TIPG_LG_DFLT;
   12015 
   12016 	sc->sc_tbi_serdes_anegticks = 5;
   12017 
   12018 	/* Initialize our media structures */
   12019 	sc->sc_mii.mii_ifp = ifp;
   12020 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12021 
   12022 	ifp->if_baudrate = IF_Gbps(1);
   12023 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12024 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12025 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12026 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12027 		    sc->sc_core_lock);
   12028 	} else {
   12029 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12030 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12031 	}
   12032 
   12033 	/*
   12034 	 * SWD Pins:
   12035 	 *
   12036 	 *	0 = Link LED (output)
   12037 	 *	1 = Loss Of Signal (input)
   12038 	 */
   12039 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12040 
   12041 	/* XXX Perhaps this is only for TBI */
   12042 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12043 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12044 
   12045 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12046 		sc->sc_ctrl &= ~CTRL_LRST;
   12047 
   12048 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12049 
   12050 #define	ADD(ss, mm, dd)							\
   12051 do {									\
   12052 	aprint_normal("%s%s", sep, ss);					\
   12053 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12054 	sep = ", ";							\
   12055 } while (/*CONSTCOND*/0)
   12056 
   12057 	aprint_normal_dev(sc->sc_dev, "");
   12058 
   12059 	if (sc->sc_type == WM_T_I354) {
   12060 		uint32_t status;
   12061 
   12062 		status = CSR_READ(sc, WMREG_STATUS);
   12063 		if (((status & STATUS_2P5_SKU) != 0)
   12064 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12065 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12066 		} else
   12067 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12068 	} else if (sc->sc_type == WM_T_82545) {
   12069 		/* Only 82545 is LX (XXX except SFP) */
   12070 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12071 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12072 	} else if (sc->sc_sfptype != 0) {
   12073 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12074 		switch (sc->sc_sfptype) {
   12075 		default:
   12076 		case SFF_SFP_ETH_FLAGS_1000SX:
   12077 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12078 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12079 			break;
   12080 		case SFF_SFP_ETH_FLAGS_1000LX:
   12081 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12082 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12083 			break;
   12084 		case SFF_SFP_ETH_FLAGS_1000CX:
   12085 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12086 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12087 			break;
   12088 		case SFF_SFP_ETH_FLAGS_1000T:
   12089 			ADD("1000baseT", IFM_1000_T, 0);
   12090 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12091 			break;
   12092 		case SFF_SFP_ETH_FLAGS_100FX:
   12093 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12094 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12095 			break;
   12096 		}
   12097 	} else {
   12098 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12099 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12100 	}
   12101 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12102 	aprint_normal("\n");
   12103 
   12104 #undef ADD
   12105 
   12106 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12107 }
   12108 
   12109 /*
   12110  * wm_tbi_mediachange:	[ifmedia interface function]
   12111  *
   12112  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12113  */
   12114 static int
   12115 wm_tbi_mediachange(struct ifnet *ifp)
   12116 {
   12117 	struct wm_softc *sc = ifp->if_softc;
   12118 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12119 	uint32_t status, ctrl;
   12120 	bool signal;
   12121 	int i;
   12122 
   12123 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12124 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12125 		/* XXX need some work for >= 82571 and < 82575 */
   12126 		if (sc->sc_type < WM_T_82575)
   12127 			return 0;
   12128 	}
   12129 
   12130 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12131 	    || (sc->sc_type >= WM_T_82575))
   12132 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12133 
   12134 	sc->sc_ctrl &= ~CTRL_LRST;
   12135 	sc->sc_txcw = TXCW_ANE;
   12136 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12137 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12138 	else if (ife->ifm_media & IFM_FDX)
   12139 		sc->sc_txcw |= TXCW_FD;
   12140 	else
   12141 		sc->sc_txcw |= TXCW_HD;
   12142 
   12143 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12144 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12145 
   12146 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12147 		device_xname(sc->sc_dev), sc->sc_txcw));
   12148 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12149 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12150 	CSR_WRITE_FLUSH(sc);
   12151 	delay(1000);
   12152 
   12153 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12154 	signal = wm_tbi_havesignal(sc, ctrl);
   12155 
   12156 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   12157 		signal));
   12158 
   12159 	if (signal) {
   12160 		/* Have signal; wait for the link to come up. */
   12161 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12162 			delay(10000);
   12163 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12164 				break;
   12165 		}
   12166 
   12167 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   12168 			device_xname(sc->sc_dev), i));
   12169 
   12170 		status = CSR_READ(sc, WMREG_STATUS);
   12171 		DPRINTF(WM_DEBUG_LINK,
   12172 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   12173 			device_xname(sc->sc_dev), status, STATUS_LU));
   12174 		if (status & STATUS_LU) {
   12175 			/* Link is up. */
   12176 			DPRINTF(WM_DEBUG_LINK,
   12177 			    ("%s: LINK: set media -> link up %s\n",
   12178 				device_xname(sc->sc_dev),
   12179 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12180 
   12181 			/*
   12182 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12183 			 * so we should update sc->sc_ctrl
   12184 			 */
   12185 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12186 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12187 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12188 			if (status & STATUS_FD)
   12189 				sc->sc_tctl |=
   12190 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12191 			else
   12192 				sc->sc_tctl |=
   12193 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12194 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12195 				sc->sc_fcrtl |= FCRTL_XONE;
   12196 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12197 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12198 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12199 			sc->sc_tbi_linkup = 1;
   12200 		} else {
   12201 			if (i == WM_LINKUP_TIMEOUT)
   12202 				wm_check_for_link(sc);
   12203 			/* Link is down. */
   12204 			DPRINTF(WM_DEBUG_LINK,
   12205 			    ("%s: LINK: set media -> link down\n",
   12206 				device_xname(sc->sc_dev)));
   12207 			sc->sc_tbi_linkup = 0;
   12208 		}
   12209 	} else {
   12210 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   12211 			device_xname(sc->sc_dev)));
   12212 		sc->sc_tbi_linkup = 0;
   12213 	}
   12214 
   12215 	wm_tbi_serdes_set_linkled(sc);
   12216 
   12217 	return 0;
   12218 }
   12219 
   12220 /*
   12221  * wm_tbi_mediastatus:	[ifmedia interface function]
   12222  *
   12223  *	Get the current interface media status on a 1000BASE-X device.
   12224  */
   12225 static void
   12226 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12227 {
   12228 	struct wm_softc *sc = ifp->if_softc;
   12229 	uint32_t ctrl, status;
   12230 
   12231 	ifmr->ifm_status = IFM_AVALID;
   12232 	ifmr->ifm_active = IFM_ETHER;
   12233 
   12234 	status = CSR_READ(sc, WMREG_STATUS);
   12235 	if ((status & STATUS_LU) == 0) {
   12236 		ifmr->ifm_active |= IFM_NONE;
   12237 		return;
   12238 	}
   12239 
   12240 	ifmr->ifm_status |= IFM_ACTIVE;
   12241 	/* Only 82545 is LX */
   12242 	if (sc->sc_type == WM_T_82545)
   12243 		ifmr->ifm_active |= IFM_1000_LX;
   12244 	else
   12245 		ifmr->ifm_active |= IFM_1000_SX;
   12246 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   12247 		ifmr->ifm_active |= IFM_FDX;
   12248 	else
   12249 		ifmr->ifm_active |= IFM_HDX;
   12250 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12251 	if (ctrl & CTRL_RFCE)
   12252 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   12253 	if (ctrl & CTRL_TFCE)
   12254 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   12255 }
   12256 
   12257 /* XXX TBI only */
   12258 static int
   12259 wm_check_for_link(struct wm_softc *sc)
   12260 {
   12261 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12262 	uint32_t rxcw;
   12263 	uint32_t ctrl;
   12264 	uint32_t status;
   12265 	bool signal;
   12266 
   12267 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   12268 		device_xname(sc->sc_dev), __func__));
   12269 
   12270 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12271 		/* XXX need some work for >= 82571 */
   12272 		if (sc->sc_type >= WM_T_82571) {
   12273 			sc->sc_tbi_linkup = 1;
   12274 			return 0;
   12275 		}
   12276 	}
   12277 
   12278 	rxcw = CSR_READ(sc, WMREG_RXCW);
   12279 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12280 	status = CSR_READ(sc, WMREG_STATUS);
   12281 	signal = wm_tbi_havesignal(sc, ctrl);
   12282 
   12283 	DPRINTF(WM_DEBUG_LINK,
   12284 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   12285 		device_xname(sc->sc_dev), __func__, signal,
   12286 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   12287 
   12288 	/*
   12289 	 * SWDPIN   LU RXCW
   12290 	 *	0    0	  0
   12291 	 *	0    0	  1	(should not happen)
   12292 	 *	0    1	  0	(should not happen)
   12293 	 *	0    1	  1	(should not happen)
   12294 	 *	1    0	  0	Disable autonego and force linkup
   12295 	 *	1    0	  1	got /C/ but not linkup yet
   12296 	 *	1    1	  0	(linkup)
   12297 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12298 	 *
   12299 	 */
   12300 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12301 		DPRINTF(WM_DEBUG_LINK,
   12302 		    ("%s: %s: force linkup and fullduplex\n",
   12303 			device_xname(sc->sc_dev), __func__));
   12304 		sc->sc_tbi_linkup = 0;
   12305 		/* Disable auto-negotiation in the TXCW register */
   12306 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12307 
   12308 		/*
   12309 		 * Force link-up and also force full-duplex.
   12310 		 *
   12311 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12312 		 * so we should update sc->sc_ctrl
   12313 		 */
   12314 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12315 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12316 	} else if (((status & STATUS_LU) != 0)
   12317 	    && ((rxcw & RXCW_C) != 0)
   12318 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12319 		sc->sc_tbi_linkup = 1;
   12320 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12321 			device_xname(sc->sc_dev),
   12322 			__func__));
   12323 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12324 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12325 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12326 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   12327 			device_xname(sc->sc_dev), __func__));
   12328 	} else {
   12329 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12330 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12331 			status));
   12332 	}
   12333 
   12334 	return 0;
   12335 }
   12336 
   12337 /*
   12338  * wm_tbi_tick:
   12339  *
   12340  *	Check the link on TBI devices.
   12341  *	This function acts as mii_tick().
   12342  */
   12343 static void
   12344 wm_tbi_tick(struct wm_softc *sc)
   12345 {
   12346 	struct mii_data *mii = &sc->sc_mii;
   12347 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12348 	uint32_t status;
   12349 
   12350 	KASSERT(WM_CORE_LOCKED(sc));
   12351 
   12352 	status = CSR_READ(sc, WMREG_STATUS);
   12353 
   12354 	/* XXX is this needed? */
   12355 	(void)CSR_READ(sc, WMREG_RXCW);
   12356 	(void)CSR_READ(sc, WMREG_CTRL);
   12357 
   12358 	/* set link status */
   12359 	if ((status & STATUS_LU) == 0) {
   12360 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12361 			device_xname(sc->sc_dev)));
   12362 		sc->sc_tbi_linkup = 0;
   12363 	} else if (sc->sc_tbi_linkup == 0) {
   12364 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12365 			device_xname(sc->sc_dev),
   12366 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12367 		sc->sc_tbi_linkup = 1;
   12368 		sc->sc_tbi_serdes_ticks = 0;
   12369 	}
   12370 
   12371 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12372 		goto setled;
   12373 
   12374 	if ((status & STATUS_LU) == 0) {
   12375 		sc->sc_tbi_linkup = 0;
   12376 		/* If the timer expired, retry autonegotiation */
   12377 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12378 		    && (++sc->sc_tbi_serdes_ticks
   12379 			>= sc->sc_tbi_serdes_anegticks)) {
   12380 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12381 				device_xname(sc->sc_dev), __func__));
   12382 			sc->sc_tbi_serdes_ticks = 0;
   12383 			/*
   12384 			 * Reset the link, and let autonegotiation do
   12385 			 * its thing
   12386 			 */
   12387 			sc->sc_ctrl |= CTRL_LRST;
   12388 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12389 			CSR_WRITE_FLUSH(sc);
   12390 			delay(1000);
   12391 			sc->sc_ctrl &= ~CTRL_LRST;
   12392 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12393 			CSR_WRITE_FLUSH(sc);
   12394 			delay(1000);
   12395 			CSR_WRITE(sc, WMREG_TXCW,
   12396 			    sc->sc_txcw & ~TXCW_ANE);
   12397 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12398 		}
   12399 	}
   12400 
   12401 setled:
   12402 	wm_tbi_serdes_set_linkled(sc);
   12403 }
   12404 
   12405 /* SERDES related */
   12406 static void
   12407 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12408 {
   12409 	uint32_t reg;
   12410 
   12411 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12412 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12413 		return;
   12414 
   12415 	/* Enable PCS to turn on link */
   12416 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12417 	reg |= PCS_CFG_PCS_EN;
   12418 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12419 
   12420 	/* Power up the laser */
   12421 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12422 	reg &= ~CTRL_EXT_SWDPIN(3);
   12423 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12424 
   12425 	/* Flush the write to verify completion */
   12426 	CSR_WRITE_FLUSH(sc);
   12427 	delay(1000);
   12428 }
   12429 
   12430 static int
   12431 wm_serdes_mediachange(struct ifnet *ifp)
   12432 {
   12433 	struct wm_softc *sc = ifp->if_softc;
   12434 	bool pcs_autoneg = true; /* XXX */
   12435 	uint32_t ctrl_ext, pcs_lctl, reg;
   12436 
   12437 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12438 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12439 		return 0;
   12440 
   12441 	/* XXX Currently, this function is not called on 8257[12] */
   12442 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12443 	    || (sc->sc_type >= WM_T_82575))
   12444 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12445 
   12446 	/* Power on the sfp cage if present */
   12447 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12448 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12449 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   12450 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12451 
   12452 	sc->sc_ctrl |= CTRL_SLU;
   12453 
   12454 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   12455 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12456 
   12457 		reg = CSR_READ(sc, WMREG_CONNSW);
   12458 		reg |= CONNSW_ENRGSRC;
   12459 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   12460 	}
   12461 
   12462 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12463 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12464 	case CTRL_EXT_LINK_MODE_SGMII:
   12465 		/* SGMII mode lets the phy handle forcing speed/duplex */
   12466 		pcs_autoneg = true;
   12467 		/* Autoneg time out should be disabled for SGMII mode */
   12468 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12469 		break;
   12470 	case CTRL_EXT_LINK_MODE_1000KX:
   12471 		pcs_autoneg = false;
   12472 		/* FALLTHROUGH */
   12473 	default:
   12474 		if ((sc->sc_type == WM_T_82575)
   12475 		    || (sc->sc_type == WM_T_82576)) {
   12476 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12477 				pcs_autoneg = false;
   12478 		}
   12479 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12480 		    | CTRL_FRCFDX;
   12481 
   12482 		/* Set speed of 1000/Full if speed/duplex is forced */
   12483 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12484 	}
   12485 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12486 
   12487 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   12488 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   12489 
   12490 	if (pcs_autoneg) {
   12491 		/* Set PCS register for autoneg */
   12492 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12493 
   12494 		/* Disable force flow control for autoneg */
   12495 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12496 
   12497 		/* Configure flow control advertisement for autoneg */
   12498 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12499 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12500 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12501 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12502 	} else
   12503 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12504 
   12505 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12506 
   12507 	return 0;
   12508 }
   12509 
   12510 static void
   12511 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12512 {
   12513 	struct wm_softc *sc = ifp->if_softc;
   12514 	struct mii_data *mii = &sc->sc_mii;
   12515 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12516 	uint32_t pcs_adv, pcs_lpab, reg;
   12517 
   12518 	ifmr->ifm_status = IFM_AVALID;
   12519 	ifmr->ifm_active = IFM_ETHER;
   12520 
   12521 	/* Check PCS */
   12522 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12523 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12524 		ifmr->ifm_active |= IFM_NONE;
   12525 		sc->sc_tbi_linkup = 0;
   12526 		goto setled;
   12527 	}
   12528 
   12529 	sc->sc_tbi_linkup = 1;
   12530 	ifmr->ifm_status |= IFM_ACTIVE;
   12531 	if (sc->sc_type == WM_T_I354) {
   12532 		uint32_t status;
   12533 
   12534 		status = CSR_READ(sc, WMREG_STATUS);
   12535 		if (((status & STATUS_2P5_SKU) != 0)
   12536 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12537 			ifmr->ifm_active |= IFM_2500_KX;
   12538 		} else
   12539 			ifmr->ifm_active |= IFM_1000_KX;
   12540 	} else {
   12541 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12542 		case PCS_LSTS_SPEED_10:
   12543 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12544 			break;
   12545 		case PCS_LSTS_SPEED_100:
   12546 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12547 			break;
   12548 		case PCS_LSTS_SPEED_1000:
   12549 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12550 			break;
   12551 		default:
   12552 			device_printf(sc->sc_dev, "Unknown speed\n");
   12553 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12554 			break;
   12555 		}
   12556 	}
   12557 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   12558 	if ((reg & PCS_LSTS_FDX) != 0)
   12559 		ifmr->ifm_active |= IFM_FDX;
   12560 	else
   12561 		ifmr->ifm_active |= IFM_HDX;
   12562 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12563 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12564 		/* Check flow */
   12565 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12566 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12567 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12568 			goto setled;
   12569 		}
   12570 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12571 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12572 		DPRINTF(WM_DEBUG_LINK,
   12573 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12574 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12575 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12576 			mii->mii_media_active |= IFM_FLOW
   12577 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12578 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12579 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12580 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12581 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12582 			mii->mii_media_active |= IFM_FLOW
   12583 			    | IFM_ETH_TXPAUSE;
   12584 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12585 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12586 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12587 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12588 			mii->mii_media_active |= IFM_FLOW
   12589 			    | IFM_ETH_RXPAUSE;
   12590 		}
   12591 	}
   12592 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12593 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12594 setled:
   12595 	wm_tbi_serdes_set_linkled(sc);
   12596 }
   12597 
   12598 /*
   12599  * wm_serdes_tick:
   12600  *
   12601  *	Check the link on serdes devices.
   12602  */
   12603 static void
   12604 wm_serdes_tick(struct wm_softc *sc)
   12605 {
   12606 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12607 	struct mii_data *mii = &sc->sc_mii;
   12608 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12609 	uint32_t reg;
   12610 
   12611 	KASSERT(WM_CORE_LOCKED(sc));
   12612 
   12613 	mii->mii_media_status = IFM_AVALID;
   12614 	mii->mii_media_active = IFM_ETHER;
   12615 
   12616 	/* Check PCS */
   12617 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12618 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12619 		mii->mii_media_status |= IFM_ACTIVE;
   12620 		sc->sc_tbi_linkup = 1;
   12621 		sc->sc_tbi_serdes_ticks = 0;
   12622 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12623 		if ((reg & PCS_LSTS_FDX) != 0)
   12624 			mii->mii_media_active |= IFM_FDX;
   12625 		else
   12626 			mii->mii_media_active |= IFM_HDX;
   12627 	} else {
   12628 		mii->mii_media_status |= IFM_NONE;
   12629 		sc->sc_tbi_linkup = 0;
   12630 		/* If the timer expired, retry autonegotiation */
   12631 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12632 		    && (++sc->sc_tbi_serdes_ticks
   12633 			>= sc->sc_tbi_serdes_anegticks)) {
   12634 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12635 				device_xname(sc->sc_dev), __func__));
   12636 			sc->sc_tbi_serdes_ticks = 0;
   12637 			/* XXX */
   12638 			wm_serdes_mediachange(ifp);
   12639 		}
   12640 	}
   12641 
   12642 	wm_tbi_serdes_set_linkled(sc);
   12643 }
   12644 
   12645 /* SFP related */
   12646 
   12647 static int
   12648 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12649 {
   12650 	uint32_t i2ccmd;
   12651 	int i;
   12652 
   12653 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12654 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12655 
   12656 	/* Poll the ready bit */
   12657 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12658 		delay(50);
   12659 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12660 		if (i2ccmd & I2CCMD_READY)
   12661 			break;
   12662 	}
   12663 	if ((i2ccmd & I2CCMD_READY) == 0)
   12664 		return -1;
   12665 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12666 		return -1;
   12667 
   12668 	*data = i2ccmd & 0x00ff;
   12669 
   12670 	return 0;
   12671 }
   12672 
   12673 static uint32_t
   12674 wm_sfp_get_media_type(struct wm_softc *sc)
   12675 {
   12676 	uint32_t ctrl_ext;
   12677 	uint8_t val = 0;
   12678 	int timeout = 3;
   12679 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12680 	int rv = -1;
   12681 
   12682 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12683 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12684 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12685 	CSR_WRITE_FLUSH(sc);
   12686 
   12687 	/* Read SFP module data */
   12688 	while (timeout) {
   12689 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12690 		if (rv == 0)
   12691 			break;
   12692 		delay(100*1000); /* XXX too big */
   12693 		timeout--;
   12694 	}
   12695 	if (rv != 0)
   12696 		goto out;
   12697 
   12698 	switch (val) {
   12699 	case SFF_SFP_ID_SFF:
   12700 		aprint_normal_dev(sc->sc_dev,
   12701 		    "Module/Connector soldered to board\n");
   12702 		break;
   12703 	case SFF_SFP_ID_SFP:
   12704 		sc->sc_flags |= WM_F_SFP;
   12705 		break;
   12706 	case SFF_SFP_ID_UNKNOWN:
   12707 		goto out;
   12708 	default:
   12709 		break;
   12710 	}
   12711 
   12712 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12713 	if (rv != 0)
   12714 		goto out;
   12715 
   12716 	sc->sc_sfptype = val;
   12717 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12718 		mediatype = WM_MEDIATYPE_SERDES;
   12719 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12720 		sc->sc_flags |= WM_F_SGMII;
   12721 		mediatype = WM_MEDIATYPE_COPPER;
   12722 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12723 		sc->sc_flags |= WM_F_SGMII;
   12724 		mediatype = WM_MEDIATYPE_SERDES;
   12725 	} else {
   12726 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   12727 		    __func__, sc->sc_sfptype);
   12728 		sc->sc_sfptype = 0; /* XXX unknown */
   12729 	}
   12730 
   12731 out:
   12732 	/* Restore I2C interface setting */
   12733 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12734 
   12735 	return mediatype;
   12736 }
   12737 
   12738 /*
   12739  * NVM related.
   12740  * Microwire, SPI (w/wo EERD) and Flash.
   12741  */
   12742 
   12743 /* Both spi and uwire */
   12744 
   12745 /*
   12746  * wm_eeprom_sendbits:
   12747  *
   12748  *	Send a series of bits to the EEPROM.
   12749  */
   12750 static void
   12751 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12752 {
   12753 	uint32_t reg;
   12754 	int x;
   12755 
   12756 	reg = CSR_READ(sc, WMREG_EECD);
   12757 
   12758 	for (x = nbits; x > 0; x--) {
   12759 		if (bits & (1U << (x - 1)))
   12760 			reg |= EECD_DI;
   12761 		else
   12762 			reg &= ~EECD_DI;
   12763 		CSR_WRITE(sc, WMREG_EECD, reg);
   12764 		CSR_WRITE_FLUSH(sc);
   12765 		delay(2);
   12766 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12767 		CSR_WRITE_FLUSH(sc);
   12768 		delay(2);
   12769 		CSR_WRITE(sc, WMREG_EECD, reg);
   12770 		CSR_WRITE_FLUSH(sc);
   12771 		delay(2);
   12772 	}
   12773 }
   12774 
   12775 /*
   12776  * wm_eeprom_recvbits:
   12777  *
   12778  *	Receive a series of bits from the EEPROM.
   12779  */
   12780 static void
   12781 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12782 {
   12783 	uint32_t reg, val;
   12784 	int x;
   12785 
   12786 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12787 
   12788 	val = 0;
   12789 	for (x = nbits; x > 0; x--) {
   12790 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12791 		CSR_WRITE_FLUSH(sc);
   12792 		delay(2);
   12793 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12794 			val |= (1U << (x - 1));
   12795 		CSR_WRITE(sc, WMREG_EECD, reg);
   12796 		CSR_WRITE_FLUSH(sc);
   12797 		delay(2);
   12798 	}
   12799 	*valp = val;
   12800 }
   12801 
   12802 /* Microwire */
   12803 
   12804 /*
   12805  * wm_nvm_read_uwire:
   12806  *
   12807  *	Read a word from the EEPROM using the MicroWire protocol.
   12808  */
   12809 static int
   12810 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12811 {
   12812 	uint32_t reg, val;
   12813 	int i;
   12814 
   12815 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12816 		device_xname(sc->sc_dev), __func__));
   12817 
   12818 	if (sc->nvm.acquire(sc) != 0)
   12819 		return -1;
   12820 
   12821 	for (i = 0; i < wordcnt; i++) {
   12822 		/* Clear SK and DI. */
   12823 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12824 		CSR_WRITE(sc, WMREG_EECD, reg);
   12825 
   12826 		/*
   12827 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12828 		 * and Xen.
   12829 		 *
   12830 		 * We use this workaround only for 82540 because qemu's
   12831 		 * e1000 act as 82540.
   12832 		 */
   12833 		if (sc->sc_type == WM_T_82540) {
   12834 			reg |= EECD_SK;
   12835 			CSR_WRITE(sc, WMREG_EECD, reg);
   12836 			reg &= ~EECD_SK;
   12837 			CSR_WRITE(sc, WMREG_EECD, reg);
   12838 			CSR_WRITE_FLUSH(sc);
   12839 			delay(2);
   12840 		}
   12841 		/* XXX: end of workaround */
   12842 
   12843 		/* Set CHIP SELECT. */
   12844 		reg |= EECD_CS;
   12845 		CSR_WRITE(sc, WMREG_EECD, reg);
   12846 		CSR_WRITE_FLUSH(sc);
   12847 		delay(2);
   12848 
   12849 		/* Shift in the READ command. */
   12850 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12851 
   12852 		/* Shift in address. */
   12853 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12854 
   12855 		/* Shift out the data. */
   12856 		wm_eeprom_recvbits(sc, &val, 16);
   12857 		data[i] = val & 0xffff;
   12858 
   12859 		/* Clear CHIP SELECT. */
   12860 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12861 		CSR_WRITE(sc, WMREG_EECD, reg);
   12862 		CSR_WRITE_FLUSH(sc);
   12863 		delay(2);
   12864 	}
   12865 
   12866 	sc->nvm.release(sc);
   12867 	return 0;
   12868 }
   12869 
   12870 /* SPI */
   12871 
   12872 /*
   12873  * Set SPI and FLASH related information from the EECD register.
   12874  * For 82541 and 82547, the word size is taken from EEPROM.
   12875  */
   12876 static int
   12877 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12878 {
   12879 	int size;
   12880 	uint32_t reg;
   12881 	uint16_t data;
   12882 
   12883 	reg = CSR_READ(sc, WMREG_EECD);
   12884 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12885 
   12886 	/* Read the size of NVM from EECD by default */
   12887 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12888 	switch (sc->sc_type) {
   12889 	case WM_T_82541:
   12890 	case WM_T_82541_2:
   12891 	case WM_T_82547:
   12892 	case WM_T_82547_2:
   12893 		/* Set dummy value to access EEPROM */
   12894 		sc->sc_nvm_wordsize = 64;
   12895 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12896 			aprint_error_dev(sc->sc_dev,
   12897 			    "%s: failed to read EEPROM size\n", __func__);
   12898 		}
   12899 		reg = data;
   12900 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12901 		if (size == 0)
   12902 			size = 6; /* 64 word size */
   12903 		else
   12904 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12905 		break;
   12906 	case WM_T_80003:
   12907 	case WM_T_82571:
   12908 	case WM_T_82572:
   12909 	case WM_T_82573: /* SPI case */
   12910 	case WM_T_82574: /* SPI case */
   12911 	case WM_T_82583: /* SPI case */
   12912 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12913 		if (size > 14)
   12914 			size = 14;
   12915 		break;
   12916 	case WM_T_82575:
   12917 	case WM_T_82576:
   12918 	case WM_T_82580:
   12919 	case WM_T_I350:
   12920 	case WM_T_I354:
   12921 	case WM_T_I210:
   12922 	case WM_T_I211:
   12923 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12924 		if (size > 15)
   12925 			size = 15;
   12926 		break;
   12927 	default:
   12928 		aprint_error_dev(sc->sc_dev,
   12929 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12930 		return -1;
   12931 		break;
   12932 	}
   12933 
   12934 	sc->sc_nvm_wordsize = 1 << size;
   12935 
   12936 	return 0;
   12937 }
   12938 
   12939 /*
   12940  * wm_nvm_ready_spi:
   12941  *
   12942  *	Wait for a SPI EEPROM to be ready for commands.
   12943  */
   12944 static int
   12945 wm_nvm_ready_spi(struct wm_softc *sc)
   12946 {
   12947 	uint32_t val;
   12948 	int usec;
   12949 
   12950 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12951 		device_xname(sc->sc_dev), __func__));
   12952 
   12953 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12954 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12955 		wm_eeprom_recvbits(sc, &val, 8);
   12956 		if ((val & SPI_SR_RDY) == 0)
   12957 			break;
   12958 	}
   12959 	if (usec >= SPI_MAX_RETRIES) {
   12960 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12961 		return -1;
   12962 	}
   12963 	return 0;
   12964 }
   12965 
   12966 /*
   12967  * wm_nvm_read_spi:
   12968  *
   12969  *	Read a work from the EEPROM using the SPI protocol.
   12970  */
   12971 static int
   12972 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12973 {
   12974 	uint32_t reg, val;
   12975 	int i;
   12976 	uint8_t opc;
   12977 	int rv = 0;
   12978 
   12979 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12980 		device_xname(sc->sc_dev), __func__));
   12981 
   12982 	if (sc->nvm.acquire(sc) != 0)
   12983 		return -1;
   12984 
   12985 	/* Clear SK and CS. */
   12986 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12987 	CSR_WRITE(sc, WMREG_EECD, reg);
   12988 	CSR_WRITE_FLUSH(sc);
   12989 	delay(2);
   12990 
   12991 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12992 		goto out;
   12993 
   12994 	/* Toggle CS to flush commands. */
   12995 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12996 	CSR_WRITE_FLUSH(sc);
   12997 	delay(2);
   12998 	CSR_WRITE(sc, WMREG_EECD, reg);
   12999 	CSR_WRITE_FLUSH(sc);
   13000 	delay(2);
   13001 
   13002 	opc = SPI_OPC_READ;
   13003 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13004 		opc |= SPI_OPC_A8;
   13005 
   13006 	wm_eeprom_sendbits(sc, opc, 8);
   13007 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13008 
   13009 	for (i = 0; i < wordcnt; i++) {
   13010 		wm_eeprom_recvbits(sc, &val, 16);
   13011 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13012 	}
   13013 
   13014 	/* Raise CS and clear SK. */
   13015 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13016 	CSR_WRITE(sc, WMREG_EECD, reg);
   13017 	CSR_WRITE_FLUSH(sc);
   13018 	delay(2);
   13019 
   13020 out:
   13021 	sc->nvm.release(sc);
   13022 	return rv;
   13023 }
   13024 
   13025 /* Using with EERD */
   13026 
   13027 static int
   13028 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13029 {
   13030 	uint32_t attempts = 100000;
   13031 	uint32_t i, reg = 0;
   13032 	int32_t done = -1;
   13033 
   13034 	for (i = 0; i < attempts; i++) {
   13035 		reg = CSR_READ(sc, rw);
   13036 
   13037 		if (reg & EERD_DONE) {
   13038 			done = 0;
   13039 			break;
   13040 		}
   13041 		delay(5);
   13042 	}
   13043 
   13044 	return done;
   13045 }
   13046 
   13047 static int
   13048 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13049 {
   13050 	int i, eerd = 0;
   13051 	int rv = 0;
   13052 
   13053 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13054 		device_xname(sc->sc_dev), __func__));
   13055 
   13056 	if (sc->nvm.acquire(sc) != 0)
   13057 		return -1;
   13058 
   13059 	for (i = 0; i < wordcnt; i++) {
   13060 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13061 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13062 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13063 		if (rv != 0) {
   13064 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13065 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13066 			break;
   13067 		}
   13068 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13069 	}
   13070 
   13071 	sc->nvm.release(sc);
   13072 	return rv;
   13073 }
   13074 
   13075 /* Flash */
   13076 
   13077 static int
   13078 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13079 {
   13080 	uint32_t eecd;
   13081 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13082 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13083 	uint32_t nvm_dword = 0;
   13084 	uint8_t sig_byte = 0;
   13085 	int rv;
   13086 
   13087 	switch (sc->sc_type) {
   13088 	case WM_T_PCH_SPT:
   13089 	case WM_T_PCH_CNP:
   13090 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13091 		act_offset = ICH_NVM_SIG_WORD * 2;
   13092 
   13093 		/* Set bank to 0 in case flash read fails. */
   13094 		*bank = 0;
   13095 
   13096 		/* Check bank 0 */
   13097 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13098 		if (rv != 0)
   13099 			return rv;
   13100 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13101 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13102 			*bank = 0;
   13103 			return 0;
   13104 		}
   13105 
   13106 		/* Check bank 1 */
   13107 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13108 		    &nvm_dword);
   13109 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13110 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13111 			*bank = 1;
   13112 			return 0;
   13113 		}
   13114 		aprint_error_dev(sc->sc_dev,
   13115 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13116 		return -1;
   13117 	case WM_T_ICH8:
   13118 	case WM_T_ICH9:
   13119 		eecd = CSR_READ(sc, WMREG_EECD);
   13120 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13121 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13122 			return 0;
   13123 		}
   13124 		/* FALLTHROUGH */
   13125 	default:
   13126 		/* Default to 0 */
   13127 		*bank = 0;
   13128 
   13129 		/* Check bank 0 */
   13130 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13131 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13132 			*bank = 0;
   13133 			return 0;
   13134 		}
   13135 
   13136 		/* Check bank 1 */
   13137 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13138 		    &sig_byte);
   13139 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13140 			*bank = 1;
   13141 			return 0;
   13142 		}
   13143 	}
   13144 
   13145 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13146 		device_xname(sc->sc_dev)));
   13147 	return -1;
   13148 }
   13149 
   13150 /******************************************************************************
   13151  * This function does initial flash setup so that a new read/write/erase cycle
   13152  * can be started.
   13153  *
   13154  * sc - The pointer to the hw structure
   13155  ****************************************************************************/
   13156 static int32_t
   13157 wm_ich8_cycle_init(struct wm_softc *sc)
   13158 {
   13159 	uint16_t hsfsts;
   13160 	int32_t error = 1;
   13161 	int32_t i     = 0;
   13162 
   13163 	if (sc->sc_type >= WM_T_PCH_SPT)
   13164 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13165 	else
   13166 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13167 
   13168 	/* May be check the Flash Des Valid bit in Hw status */
   13169 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13170 		return error;
   13171 
   13172 	/* Clear FCERR in Hw status by writing 1 */
   13173 	/* Clear DAEL in Hw status by writing a 1 */
   13174 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13175 
   13176 	if (sc->sc_type >= WM_T_PCH_SPT)
   13177 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13178 	else
   13179 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13180 
   13181 	/*
   13182 	 * Either we should have a hardware SPI cycle in progress bit to check
   13183 	 * against, in order to start a new cycle or FDONE bit should be
   13184 	 * changed in the hardware so that it is 1 after hardware reset, which
   13185 	 * can then be used as an indication whether a cycle is in progress or
   13186 	 * has been completed .. we should also have some software semaphore
   13187 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13188 	 * threads access to those bits can be sequentiallized or a way so that
   13189 	 * 2 threads don't start the cycle at the same time
   13190 	 */
   13191 
   13192 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13193 		/*
   13194 		 * There is no cycle running at present, so we can start a
   13195 		 * cycle
   13196 		 */
   13197 
   13198 		/* Begin by setting Flash Cycle Done. */
   13199 		hsfsts |= HSFSTS_DONE;
   13200 		if (sc->sc_type >= WM_T_PCH_SPT)
   13201 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13202 			    hsfsts & 0xffffUL);
   13203 		else
   13204 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13205 		error = 0;
   13206 	} else {
   13207 		/*
   13208 		 * Otherwise poll for sometime so the current cycle has a
   13209 		 * chance to end before giving up.
   13210 		 */
   13211 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   13212 			if (sc->sc_type >= WM_T_PCH_SPT)
   13213 				hsfsts = ICH8_FLASH_READ32(sc,
   13214 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13215 			else
   13216 				hsfsts = ICH8_FLASH_READ16(sc,
   13217 				    ICH_FLASH_HSFSTS);
   13218 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13219 				error = 0;
   13220 				break;
   13221 			}
   13222 			delay(1);
   13223 		}
   13224 		if (error == 0) {
   13225 			/*
   13226 			 * Successful in waiting for previous cycle to timeout,
   13227 			 * now set the Flash Cycle Done.
   13228 			 */
   13229 			hsfsts |= HSFSTS_DONE;
   13230 			if (sc->sc_type >= WM_T_PCH_SPT)
   13231 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13232 				    hsfsts & 0xffffUL);
   13233 			else
   13234 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   13235 				    hsfsts);
   13236 		}
   13237 	}
   13238 	return error;
   13239 }
   13240 
   13241 /******************************************************************************
   13242  * This function starts a flash cycle and waits for its completion
   13243  *
   13244  * sc - The pointer to the hw structure
   13245  ****************************************************************************/
   13246 static int32_t
   13247 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   13248 {
   13249 	uint16_t hsflctl;
   13250 	uint16_t hsfsts;
   13251 	int32_t error = 1;
   13252 	uint32_t i = 0;
   13253 
   13254 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   13255 	if (sc->sc_type >= WM_T_PCH_SPT)
   13256 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   13257 	else
   13258 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13259 	hsflctl |= HSFCTL_GO;
   13260 	if (sc->sc_type >= WM_T_PCH_SPT)
   13261 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13262 		    (uint32_t)hsflctl << 16);
   13263 	else
   13264 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13265 
   13266 	/* Wait till FDONE bit is set to 1 */
   13267 	do {
   13268 		if (sc->sc_type >= WM_T_PCH_SPT)
   13269 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13270 			    & 0xffffUL;
   13271 		else
   13272 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13273 		if (hsfsts & HSFSTS_DONE)
   13274 			break;
   13275 		delay(1);
   13276 		i++;
   13277 	} while (i < timeout);
   13278 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   13279 		error = 0;
   13280 
   13281 	return error;
   13282 }
   13283 
   13284 /******************************************************************************
   13285  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   13286  *
   13287  * sc - The pointer to the hw structure
   13288  * index - The index of the byte or word to read.
   13289  * size - Size of data to read, 1=byte 2=word, 4=dword
   13290  * data - Pointer to the word to store the value read.
   13291  *****************************************************************************/
   13292 static int32_t
   13293 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   13294     uint32_t size, uint32_t *data)
   13295 {
   13296 	uint16_t hsfsts;
   13297 	uint16_t hsflctl;
   13298 	uint32_t flash_linear_address;
   13299 	uint32_t flash_data = 0;
   13300 	int32_t error = 1;
   13301 	int32_t count = 0;
   13302 
   13303 	if (size < 1  || size > 4 || data == 0x0 ||
   13304 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   13305 		return error;
   13306 
   13307 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   13308 	    sc->sc_ich8_flash_base;
   13309 
   13310 	do {
   13311 		delay(1);
   13312 		/* Steps */
   13313 		error = wm_ich8_cycle_init(sc);
   13314 		if (error)
   13315 			break;
   13316 
   13317 		if (sc->sc_type >= WM_T_PCH_SPT)
   13318 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13319 			    >> 16;
   13320 		else
   13321 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13322 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13323 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13324 		    & HSFCTL_BCOUNT_MASK;
   13325 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13326 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13327 			/*
   13328 			 * In SPT, This register is in Lan memory space, not
   13329 			 * flash. Therefore, only 32 bit access is supported.
   13330 			 */
   13331 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13332 			    (uint32_t)hsflctl << 16);
   13333 		} else
   13334 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13335 
   13336 		/*
   13337 		 * Write the last 24 bits of index into Flash Linear address
   13338 		 * field in Flash Address
   13339 		 */
   13340 		/* TODO: TBD maybe check the index against the size of flash */
   13341 
   13342 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13343 
   13344 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13345 
   13346 		/*
   13347 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13348 		 * the whole sequence a few more times, else read in (shift in)
   13349 		 * the Flash Data0, the order is least significant byte first
   13350 		 * msb to lsb
   13351 		 */
   13352 		if (error == 0) {
   13353 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13354 			if (size == 1)
   13355 				*data = (uint8_t)(flash_data & 0x000000FF);
   13356 			else if (size == 2)
   13357 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13358 			else if (size == 4)
   13359 				*data = (uint32_t)flash_data;
   13360 			break;
   13361 		} else {
   13362 			/*
   13363 			 * If we've gotten here, then things are probably
   13364 			 * completely hosed, but if the error condition is
   13365 			 * detected, it won't hurt to give it another try...
   13366 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13367 			 */
   13368 			if (sc->sc_type >= WM_T_PCH_SPT)
   13369 				hsfsts = ICH8_FLASH_READ32(sc,
   13370 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13371 			else
   13372 				hsfsts = ICH8_FLASH_READ16(sc,
   13373 				    ICH_FLASH_HSFSTS);
   13374 
   13375 			if (hsfsts & HSFSTS_ERR) {
   13376 				/* Repeat for some time before giving up. */
   13377 				continue;
   13378 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13379 				break;
   13380 		}
   13381 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13382 
   13383 	return error;
   13384 }
   13385 
   13386 /******************************************************************************
   13387  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13388  *
   13389  * sc - pointer to wm_hw structure
   13390  * index - The index of the byte to read.
   13391  * data - Pointer to a byte to store the value read.
   13392  *****************************************************************************/
   13393 static int32_t
   13394 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13395 {
   13396 	int32_t status;
   13397 	uint32_t word = 0;
   13398 
   13399 	status = wm_read_ich8_data(sc, index, 1, &word);
   13400 	if (status == 0)
   13401 		*data = (uint8_t)word;
   13402 	else
   13403 		*data = 0;
   13404 
   13405 	return status;
   13406 }
   13407 
   13408 /******************************************************************************
   13409  * Reads a word from the NVM using the ICH8 flash access registers.
   13410  *
   13411  * sc - pointer to wm_hw structure
   13412  * index - The starting byte index of the word to read.
   13413  * data - Pointer to a word to store the value read.
   13414  *****************************************************************************/
   13415 static int32_t
   13416 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13417 {
   13418 	int32_t status;
   13419 	uint32_t word = 0;
   13420 
   13421 	status = wm_read_ich8_data(sc, index, 2, &word);
   13422 	if (status == 0)
   13423 		*data = (uint16_t)word;
   13424 	else
   13425 		*data = 0;
   13426 
   13427 	return status;
   13428 }
   13429 
   13430 /******************************************************************************
   13431  * Reads a dword from the NVM using the ICH8 flash access registers.
   13432  *
   13433  * sc - pointer to wm_hw structure
   13434  * index - The starting byte index of the word to read.
   13435  * data - Pointer to a word to store the value read.
   13436  *****************************************************************************/
   13437 static int32_t
   13438 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13439 {
   13440 	int32_t status;
   13441 
   13442 	status = wm_read_ich8_data(sc, index, 4, data);
   13443 	return status;
   13444 }
   13445 
   13446 /******************************************************************************
   13447  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13448  * register.
   13449  *
   13450  * sc - Struct containing variables accessed by shared code
   13451  * offset - offset of word in the EEPROM to read
   13452  * data - word read from the EEPROM
   13453  * words - number of words to read
   13454  *****************************************************************************/
   13455 static int
   13456 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13457 {
   13458 	int32_t	 rv = 0;
   13459 	uint32_t flash_bank = 0;
   13460 	uint32_t act_offset = 0;
   13461 	uint32_t bank_offset = 0;
   13462 	uint16_t word = 0;
   13463 	uint16_t i = 0;
   13464 
   13465 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13466 		device_xname(sc->sc_dev), __func__));
   13467 
   13468 	if (sc->nvm.acquire(sc) != 0)
   13469 		return -1;
   13470 
   13471 	/*
   13472 	 * We need to know which is the valid flash bank.  In the event
   13473 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13474 	 * managing flash_bank. So it cannot be trusted and needs
   13475 	 * to be updated with each read.
   13476 	 */
   13477 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13478 	if (rv) {
   13479 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13480 			device_xname(sc->sc_dev)));
   13481 		flash_bank = 0;
   13482 	}
   13483 
   13484 	/*
   13485 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13486 	 * size
   13487 	 */
   13488 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13489 
   13490 	for (i = 0; i < words; i++) {
   13491 		/* The NVM part needs a byte offset, hence * 2 */
   13492 		act_offset = bank_offset + ((offset + i) * 2);
   13493 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13494 		if (rv) {
   13495 			aprint_error_dev(sc->sc_dev,
   13496 			    "%s: failed to read NVM\n", __func__);
   13497 			break;
   13498 		}
   13499 		data[i] = word;
   13500 	}
   13501 
   13502 	sc->nvm.release(sc);
   13503 	return rv;
   13504 }
   13505 
   13506 /******************************************************************************
   13507  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13508  * register.
   13509  *
   13510  * sc - Struct containing variables accessed by shared code
   13511  * offset - offset of word in the EEPROM to read
   13512  * data - word read from the EEPROM
   13513  * words - number of words to read
   13514  *****************************************************************************/
   13515 static int
   13516 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13517 {
   13518 	int32_t	 rv = 0;
   13519 	uint32_t flash_bank = 0;
   13520 	uint32_t act_offset = 0;
   13521 	uint32_t bank_offset = 0;
   13522 	uint32_t dword = 0;
   13523 	uint16_t i = 0;
   13524 
   13525 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13526 		device_xname(sc->sc_dev), __func__));
   13527 
   13528 	if (sc->nvm.acquire(sc) != 0)
   13529 		return -1;
   13530 
   13531 	/*
   13532 	 * We need to know which is the valid flash bank.  In the event
   13533 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13534 	 * managing flash_bank. So it cannot be trusted and needs
   13535 	 * to be updated with each read.
   13536 	 */
   13537 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13538 	if (rv) {
   13539 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13540 			device_xname(sc->sc_dev)));
   13541 		flash_bank = 0;
   13542 	}
   13543 
   13544 	/*
   13545 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13546 	 * size
   13547 	 */
   13548 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13549 
   13550 	for (i = 0; i < words; i++) {
   13551 		/* The NVM part needs a byte offset, hence * 2 */
   13552 		act_offset = bank_offset + ((offset + i) * 2);
   13553 		/* but we must read dword aligned, so mask ... */
   13554 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13555 		if (rv) {
   13556 			aprint_error_dev(sc->sc_dev,
   13557 			    "%s: failed to read NVM\n", __func__);
   13558 			break;
   13559 		}
   13560 		/* ... and pick out low or high word */
   13561 		if ((act_offset & 0x2) == 0)
   13562 			data[i] = (uint16_t)(dword & 0xFFFF);
   13563 		else
   13564 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13565 	}
   13566 
   13567 	sc->nvm.release(sc);
   13568 	return rv;
   13569 }
   13570 
   13571 /* iNVM */
   13572 
   13573 static int
   13574 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13575 {
   13576 	int32_t	 rv = 0;
   13577 	uint32_t invm_dword;
   13578 	uint16_t i;
   13579 	uint8_t record_type, word_address;
   13580 
   13581 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13582 		device_xname(sc->sc_dev), __func__));
   13583 
   13584 	for (i = 0; i < INVM_SIZE; i++) {
   13585 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13586 		/* Get record type */
   13587 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13588 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13589 			break;
   13590 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13591 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13592 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13593 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13594 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13595 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13596 			if (word_address == address) {
   13597 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13598 				rv = 0;
   13599 				break;
   13600 			}
   13601 		}
   13602 	}
   13603 
   13604 	return rv;
   13605 }
   13606 
   13607 static int
   13608 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13609 {
   13610 	int rv = 0;
   13611 	int i;
   13612 
   13613 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13614 		device_xname(sc->sc_dev), __func__));
   13615 
   13616 	if (sc->nvm.acquire(sc) != 0)
   13617 		return -1;
   13618 
   13619 	for (i = 0; i < words; i++) {
   13620 		switch (offset + i) {
   13621 		case NVM_OFF_MACADDR:
   13622 		case NVM_OFF_MACADDR1:
   13623 		case NVM_OFF_MACADDR2:
   13624 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13625 			if (rv != 0) {
   13626 				data[i] = 0xffff;
   13627 				rv = -1;
   13628 			}
   13629 			break;
   13630 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   13631 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13632 			if (rv != 0) {
   13633 				*data = INVM_DEFAULT_AL;
   13634 				rv = 0;
   13635 			}
   13636 			break;
   13637 		case NVM_OFF_CFG2:
   13638 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13639 			if (rv != 0) {
   13640 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13641 				rv = 0;
   13642 			}
   13643 			break;
   13644 		case NVM_OFF_CFG4:
   13645 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13646 			if (rv != 0) {
   13647 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13648 				rv = 0;
   13649 			}
   13650 			break;
   13651 		case NVM_OFF_LED_1_CFG:
   13652 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13653 			if (rv != 0) {
   13654 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13655 				rv = 0;
   13656 			}
   13657 			break;
   13658 		case NVM_OFF_LED_0_2_CFG:
   13659 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13660 			if (rv != 0) {
   13661 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13662 				rv = 0;
   13663 			}
   13664 			break;
   13665 		case NVM_OFF_ID_LED_SETTINGS:
   13666 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13667 			if (rv != 0) {
   13668 				*data = ID_LED_RESERVED_FFFF;
   13669 				rv = 0;
   13670 			}
   13671 			break;
   13672 		default:
   13673 			DPRINTF(WM_DEBUG_NVM,
   13674 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13675 			*data = NVM_RESERVED_WORD;
   13676 			break;
   13677 		}
   13678 	}
   13679 
   13680 	sc->nvm.release(sc);
   13681 	return rv;
   13682 }
   13683 
   13684 /* Lock, detecting NVM type, validate checksum, version and read */
   13685 
   13686 static int
   13687 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13688 {
   13689 	uint32_t eecd = 0;
   13690 
   13691 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13692 	    || sc->sc_type == WM_T_82583) {
   13693 		eecd = CSR_READ(sc, WMREG_EECD);
   13694 
   13695 		/* Isolate bits 15 & 16 */
   13696 		eecd = ((eecd >> 15) & 0x03);
   13697 
   13698 		/* If both bits are set, device is Flash type */
   13699 		if (eecd == 0x03)
   13700 			return 0;
   13701 	}
   13702 	return 1;
   13703 }
   13704 
   13705 static int
   13706 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13707 {
   13708 	uint32_t eec;
   13709 
   13710 	eec = CSR_READ(sc, WMREG_EEC);
   13711 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13712 		return 1;
   13713 
   13714 	return 0;
   13715 }
   13716 
   13717 /*
   13718  * wm_nvm_validate_checksum
   13719  *
   13720  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13721  */
   13722 static int
   13723 wm_nvm_validate_checksum(struct wm_softc *sc)
   13724 {
   13725 	uint16_t checksum;
   13726 	uint16_t eeprom_data;
   13727 #ifdef WM_DEBUG
   13728 	uint16_t csum_wordaddr, valid_checksum;
   13729 #endif
   13730 	int i;
   13731 
   13732 	checksum = 0;
   13733 
   13734 	/* Don't check for I211 */
   13735 	if (sc->sc_type == WM_T_I211)
   13736 		return 0;
   13737 
   13738 #ifdef WM_DEBUG
   13739 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13740 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13741 		csum_wordaddr = NVM_OFF_COMPAT;
   13742 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13743 	} else {
   13744 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13745 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13746 	}
   13747 
   13748 	/* Dump EEPROM image for debug */
   13749 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13750 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13751 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13752 		/* XXX PCH_SPT? */
   13753 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13754 		if ((eeprom_data & valid_checksum) == 0)
   13755 			DPRINTF(WM_DEBUG_NVM,
   13756 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13757 				device_xname(sc->sc_dev), eeprom_data,
   13758 				    valid_checksum));
   13759 	}
   13760 
   13761 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13762 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13763 		for (i = 0; i < NVM_SIZE; i++) {
   13764 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13765 				printf("XXXX ");
   13766 			else
   13767 				printf("%04hx ", eeprom_data);
   13768 			if (i % 8 == 7)
   13769 				printf("\n");
   13770 		}
   13771 	}
   13772 
   13773 #endif /* WM_DEBUG */
   13774 
   13775 	for (i = 0; i < NVM_SIZE; i++) {
   13776 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13777 			return 1;
   13778 		checksum += eeprom_data;
   13779 	}
   13780 
   13781 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13782 #ifdef WM_DEBUG
   13783 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13784 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13785 #endif
   13786 	}
   13787 
   13788 	return 0;
   13789 }
   13790 
   13791 static void
   13792 wm_nvm_version_invm(struct wm_softc *sc)
   13793 {
   13794 	uint32_t dword;
   13795 
   13796 	/*
   13797 	 * Linux's code to decode version is very strange, so we don't
   13798 	 * obey that algorithm and just use word 61 as the document.
   13799 	 * Perhaps it's not perfect though...
   13800 	 *
   13801 	 * Example:
   13802 	 *
   13803 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13804 	 */
   13805 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13806 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13807 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13808 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13809 }
   13810 
   13811 static void
   13812 wm_nvm_version(struct wm_softc *sc)
   13813 {
   13814 	uint16_t major, minor, build, patch;
   13815 	uint16_t uid0, uid1;
   13816 	uint16_t nvm_data;
   13817 	uint16_t off;
   13818 	bool check_version = false;
   13819 	bool check_optionrom = false;
   13820 	bool have_build = false;
   13821 	bool have_uid = true;
   13822 
   13823 	/*
   13824 	 * Version format:
   13825 	 *
   13826 	 * XYYZ
   13827 	 * X0YZ
   13828 	 * X0YY
   13829 	 *
   13830 	 * Example:
   13831 	 *
   13832 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13833 	 *	82571	0x50a6	5.10.6?
   13834 	 *	82572	0x506a	5.6.10?
   13835 	 *	82572EI	0x5069	5.6.9?
   13836 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13837 	 *		0x2013	2.1.3?
   13838 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   13839 	 * ICH8+82567	0x0040	0.4.0?
   13840 	 * ICH9+82566	0x1040	1.4.0?
   13841 	 *ICH10+82567	0x0043	0.4.3?
   13842 	 *  PCH+82577	0x00c1	0.12.1?
   13843 	 * PCH2+82579	0x00d3	0.13.3?
   13844 	 *		0x00d4	0.13.4?
   13845 	 *  LPT+I218	0x0023	0.2.3?
   13846 	 *  SPT+I219	0x0084	0.8.4?
   13847 	 *  CNP+I219	0x0054	0.5.4?
   13848 	 */
   13849 
   13850 	/*
   13851 	 * XXX
   13852 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13853 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13854 	 */
   13855 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13856 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13857 		have_uid = false;
   13858 
   13859 	switch (sc->sc_type) {
   13860 	case WM_T_82571:
   13861 	case WM_T_82572:
   13862 	case WM_T_82574:
   13863 	case WM_T_82583:
   13864 		check_version = true;
   13865 		check_optionrom = true;
   13866 		have_build = true;
   13867 		break;
   13868 	case WM_T_ICH8:
   13869 	case WM_T_ICH9:
   13870 	case WM_T_ICH10:
   13871 	case WM_T_PCH:
   13872 	case WM_T_PCH2:
   13873 	case WM_T_PCH_LPT:
   13874 	case WM_T_PCH_SPT:
   13875 	case WM_T_PCH_CNP:
   13876 		check_version = true;
   13877 		have_build = true;
   13878 		have_uid = false;
   13879 		break;
   13880 	case WM_T_82575:
   13881 	case WM_T_82576:
   13882 	case WM_T_82580:
   13883 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13884 			check_version = true;
   13885 		break;
   13886 	case WM_T_I211:
   13887 		wm_nvm_version_invm(sc);
   13888 		have_uid = false;
   13889 		goto printver;
   13890 	case WM_T_I210:
   13891 		if (!wm_nvm_flash_presence_i210(sc)) {
   13892 			wm_nvm_version_invm(sc);
   13893 			have_uid = false;
   13894 			goto printver;
   13895 		}
   13896 		/* FALLTHROUGH */
   13897 	case WM_T_I350:
   13898 	case WM_T_I354:
   13899 		check_version = true;
   13900 		check_optionrom = true;
   13901 		break;
   13902 	default:
   13903 		return;
   13904 	}
   13905 	if (check_version
   13906 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13907 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13908 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13909 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13910 			build = nvm_data & NVM_BUILD_MASK;
   13911 			have_build = true;
   13912 		} else
   13913 			minor = nvm_data & 0x00ff;
   13914 
   13915 		/* Decimal */
   13916 		minor = (minor / 16) * 10 + (minor % 16);
   13917 		sc->sc_nvm_ver_major = major;
   13918 		sc->sc_nvm_ver_minor = minor;
   13919 
   13920 printver:
   13921 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13922 		    sc->sc_nvm_ver_minor);
   13923 		if (have_build) {
   13924 			sc->sc_nvm_ver_build = build;
   13925 			aprint_verbose(".%d", build);
   13926 		}
   13927 	}
   13928 
   13929 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13930 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13931 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13932 		/* Option ROM Version */
   13933 		if ((off != 0x0000) && (off != 0xffff)) {
   13934 			int rv;
   13935 
   13936 			off += NVM_COMBO_VER_OFF;
   13937 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13938 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13939 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13940 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13941 				/* 16bits */
   13942 				major = uid0 >> 8;
   13943 				build = (uid0 << 8) | (uid1 >> 8);
   13944 				patch = uid1 & 0x00ff;
   13945 				aprint_verbose(", option ROM Version %d.%d.%d",
   13946 				    major, build, patch);
   13947 			}
   13948 		}
   13949 	}
   13950 
   13951 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13952 		aprint_verbose(", Image Unique ID %08x",
   13953 		    ((uint32_t)uid1 << 16) | uid0);
   13954 }
   13955 
   13956 /*
   13957  * wm_nvm_read:
   13958  *
   13959  *	Read data from the serial EEPROM.
   13960  */
   13961 static int
   13962 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13963 {
   13964 	int rv;
   13965 
   13966 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13967 		device_xname(sc->sc_dev), __func__));
   13968 
   13969 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13970 		return -1;
   13971 
   13972 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13973 
   13974 	return rv;
   13975 }
   13976 
   13977 /*
   13978  * Hardware semaphores.
   13979  * Very complexed...
   13980  */
   13981 
   13982 static int
   13983 wm_get_null(struct wm_softc *sc)
   13984 {
   13985 
   13986 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13987 		device_xname(sc->sc_dev), __func__));
   13988 	return 0;
   13989 }
   13990 
   13991 static void
   13992 wm_put_null(struct wm_softc *sc)
   13993 {
   13994 
   13995 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13996 		device_xname(sc->sc_dev), __func__));
   13997 	return;
   13998 }
   13999 
   14000 static int
   14001 wm_get_eecd(struct wm_softc *sc)
   14002 {
   14003 	uint32_t reg;
   14004 	int x;
   14005 
   14006 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14007 		device_xname(sc->sc_dev), __func__));
   14008 
   14009 	reg = CSR_READ(sc, WMREG_EECD);
   14010 
   14011 	/* Request EEPROM access. */
   14012 	reg |= EECD_EE_REQ;
   14013 	CSR_WRITE(sc, WMREG_EECD, reg);
   14014 
   14015 	/* ..and wait for it to be granted. */
   14016 	for (x = 0; x < 1000; x++) {
   14017 		reg = CSR_READ(sc, WMREG_EECD);
   14018 		if (reg & EECD_EE_GNT)
   14019 			break;
   14020 		delay(5);
   14021 	}
   14022 	if ((reg & EECD_EE_GNT) == 0) {
   14023 		aprint_error_dev(sc->sc_dev,
   14024 		    "could not acquire EEPROM GNT\n");
   14025 		reg &= ~EECD_EE_REQ;
   14026 		CSR_WRITE(sc, WMREG_EECD, reg);
   14027 		return -1;
   14028 	}
   14029 
   14030 	return 0;
   14031 }
   14032 
   14033 static void
   14034 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14035 {
   14036 
   14037 	*eecd |= EECD_SK;
   14038 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14039 	CSR_WRITE_FLUSH(sc);
   14040 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14041 		delay(1);
   14042 	else
   14043 		delay(50);
   14044 }
   14045 
   14046 static void
   14047 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14048 {
   14049 
   14050 	*eecd &= ~EECD_SK;
   14051 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14052 	CSR_WRITE_FLUSH(sc);
   14053 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14054 		delay(1);
   14055 	else
   14056 		delay(50);
   14057 }
   14058 
   14059 static void
   14060 wm_put_eecd(struct wm_softc *sc)
   14061 {
   14062 	uint32_t reg;
   14063 
   14064 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14065 		device_xname(sc->sc_dev), __func__));
   14066 
   14067 	/* Stop nvm */
   14068 	reg = CSR_READ(sc, WMREG_EECD);
   14069 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14070 		/* Pull CS high */
   14071 		reg |= EECD_CS;
   14072 		wm_nvm_eec_clock_lower(sc, &reg);
   14073 	} else {
   14074 		/* CS on Microwire is active-high */
   14075 		reg &= ~(EECD_CS | EECD_DI);
   14076 		CSR_WRITE(sc, WMREG_EECD, reg);
   14077 		wm_nvm_eec_clock_raise(sc, &reg);
   14078 		wm_nvm_eec_clock_lower(sc, &reg);
   14079 	}
   14080 
   14081 	reg = CSR_READ(sc, WMREG_EECD);
   14082 	reg &= ~EECD_EE_REQ;
   14083 	CSR_WRITE(sc, WMREG_EECD, reg);
   14084 
   14085 	return;
   14086 }
   14087 
   14088 /*
   14089  * Get hardware semaphore.
   14090  * Same as e1000_get_hw_semaphore_generic()
   14091  */
   14092 static int
   14093 wm_get_swsm_semaphore(struct wm_softc *sc)
   14094 {
   14095 	int32_t timeout;
   14096 	uint32_t swsm;
   14097 
   14098 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14099 		device_xname(sc->sc_dev), __func__));
   14100 	KASSERT(sc->sc_nvm_wordsize > 0);
   14101 
   14102 retry:
   14103 	/* Get the SW semaphore. */
   14104 	timeout = sc->sc_nvm_wordsize + 1;
   14105 	while (timeout) {
   14106 		swsm = CSR_READ(sc, WMREG_SWSM);
   14107 
   14108 		if ((swsm & SWSM_SMBI) == 0)
   14109 			break;
   14110 
   14111 		delay(50);
   14112 		timeout--;
   14113 	}
   14114 
   14115 	if (timeout == 0) {
   14116 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14117 			/*
   14118 			 * In rare circumstances, the SW semaphore may already
   14119 			 * be held unintentionally. Clear the semaphore once
   14120 			 * before giving up.
   14121 			 */
   14122 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14123 			wm_put_swsm_semaphore(sc);
   14124 			goto retry;
   14125 		}
   14126 		aprint_error_dev(sc->sc_dev,
   14127 		    "could not acquire SWSM SMBI\n");
   14128 		return 1;
   14129 	}
   14130 
   14131 	/* Get the FW semaphore. */
   14132 	timeout = sc->sc_nvm_wordsize + 1;
   14133 	while (timeout) {
   14134 		swsm = CSR_READ(sc, WMREG_SWSM);
   14135 		swsm |= SWSM_SWESMBI;
   14136 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14137 		/* If we managed to set the bit we got the semaphore. */
   14138 		swsm = CSR_READ(sc, WMREG_SWSM);
   14139 		if (swsm & SWSM_SWESMBI)
   14140 			break;
   14141 
   14142 		delay(50);
   14143 		timeout--;
   14144 	}
   14145 
   14146 	if (timeout == 0) {
   14147 		aprint_error_dev(sc->sc_dev,
   14148 		    "could not acquire SWSM SWESMBI\n");
   14149 		/* Release semaphores */
   14150 		wm_put_swsm_semaphore(sc);
   14151 		return 1;
   14152 	}
   14153 	return 0;
   14154 }
   14155 
   14156 /*
   14157  * Put hardware semaphore.
   14158  * Same as e1000_put_hw_semaphore_generic()
   14159  */
   14160 static void
   14161 wm_put_swsm_semaphore(struct wm_softc *sc)
   14162 {
   14163 	uint32_t swsm;
   14164 
   14165 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14166 		device_xname(sc->sc_dev), __func__));
   14167 
   14168 	swsm = CSR_READ(sc, WMREG_SWSM);
   14169 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14170 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14171 }
   14172 
   14173 /*
   14174  * Get SW/FW semaphore.
   14175  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14176  */
   14177 static int
   14178 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14179 {
   14180 	uint32_t swfw_sync;
   14181 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14182 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14183 	int timeout;
   14184 
   14185 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14186 		device_xname(sc->sc_dev), __func__));
   14187 
   14188 	if (sc->sc_type == WM_T_80003)
   14189 		timeout = 50;
   14190 	else
   14191 		timeout = 200;
   14192 
   14193 	while (timeout) {
   14194 		if (wm_get_swsm_semaphore(sc)) {
   14195 			aprint_error_dev(sc->sc_dev,
   14196 			    "%s: failed to get semaphore\n",
   14197 			    __func__);
   14198 			return 1;
   14199 		}
   14200 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14201 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14202 			swfw_sync |= swmask;
   14203 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14204 			wm_put_swsm_semaphore(sc);
   14205 			return 0;
   14206 		}
   14207 		wm_put_swsm_semaphore(sc);
   14208 		delay(5000);
   14209 		timeout--;
   14210 	}
   14211 	device_printf(sc->sc_dev,
   14212 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   14213 	    mask, swfw_sync);
   14214 	return 1;
   14215 }
   14216 
   14217 static void
   14218 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14219 {
   14220 	uint32_t swfw_sync;
   14221 
   14222 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14223 		device_xname(sc->sc_dev), __func__));
   14224 
   14225 	while (wm_get_swsm_semaphore(sc) != 0)
   14226 		continue;
   14227 
   14228 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14229 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   14230 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14231 
   14232 	wm_put_swsm_semaphore(sc);
   14233 }
   14234 
   14235 static int
   14236 wm_get_nvm_80003(struct wm_softc *sc)
   14237 {
   14238 	int rv;
   14239 
   14240 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14241 		device_xname(sc->sc_dev), __func__));
   14242 
   14243 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   14244 		aprint_error_dev(sc->sc_dev,
   14245 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   14246 		return rv;
   14247 	}
   14248 
   14249 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14250 	    && (rv = wm_get_eecd(sc)) != 0) {
   14251 		aprint_error_dev(sc->sc_dev,
   14252 		    "%s: failed to get semaphore(EECD)\n", __func__);
   14253 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14254 		return rv;
   14255 	}
   14256 
   14257 	return 0;
   14258 }
   14259 
   14260 static void
   14261 wm_put_nvm_80003(struct wm_softc *sc)
   14262 {
   14263 
   14264 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14265 		device_xname(sc->sc_dev), __func__));
   14266 
   14267 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14268 		wm_put_eecd(sc);
   14269 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14270 }
   14271 
   14272 static int
   14273 wm_get_nvm_82571(struct wm_softc *sc)
   14274 {
   14275 	int rv;
   14276 
   14277 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14278 		device_xname(sc->sc_dev), __func__));
   14279 
   14280 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   14281 		return rv;
   14282 
   14283 	switch (sc->sc_type) {
   14284 	case WM_T_82573:
   14285 		break;
   14286 	default:
   14287 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14288 			rv = wm_get_eecd(sc);
   14289 		break;
   14290 	}
   14291 
   14292 	if (rv != 0) {
   14293 		aprint_error_dev(sc->sc_dev,
   14294 		    "%s: failed to get semaphore\n",
   14295 		    __func__);
   14296 		wm_put_swsm_semaphore(sc);
   14297 	}
   14298 
   14299 	return rv;
   14300 }
   14301 
   14302 static void
   14303 wm_put_nvm_82571(struct wm_softc *sc)
   14304 {
   14305 
   14306 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14307 		device_xname(sc->sc_dev), __func__));
   14308 
   14309 	switch (sc->sc_type) {
   14310 	case WM_T_82573:
   14311 		break;
   14312 	default:
   14313 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14314 			wm_put_eecd(sc);
   14315 		break;
   14316 	}
   14317 
   14318 	wm_put_swsm_semaphore(sc);
   14319 }
   14320 
   14321 static int
   14322 wm_get_phy_82575(struct wm_softc *sc)
   14323 {
   14324 
   14325 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14326 		device_xname(sc->sc_dev), __func__));
   14327 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14328 }
   14329 
   14330 static void
   14331 wm_put_phy_82575(struct wm_softc *sc)
   14332 {
   14333 
   14334 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14335 		device_xname(sc->sc_dev), __func__));
   14336 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14337 }
   14338 
   14339 static int
   14340 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14341 {
   14342 	uint32_t ext_ctrl;
   14343 	int timeout = 200;
   14344 
   14345 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14346 		device_xname(sc->sc_dev), __func__));
   14347 
   14348 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14349 	for (timeout = 0; timeout < 200; timeout++) {
   14350 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14351 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14352 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14353 
   14354 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14355 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14356 			return 0;
   14357 		delay(5000);
   14358 	}
   14359 	device_printf(sc->sc_dev,
   14360 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14361 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14362 	return 1;
   14363 }
   14364 
   14365 static void
   14366 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14367 {
   14368 	uint32_t ext_ctrl;
   14369 
   14370 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14371 		device_xname(sc->sc_dev), __func__));
   14372 
   14373 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14374 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14375 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14376 
   14377 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14378 }
   14379 
   14380 static int
   14381 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14382 {
   14383 	uint32_t ext_ctrl;
   14384 	int timeout;
   14385 
   14386 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14387 		device_xname(sc->sc_dev), __func__));
   14388 	mutex_enter(sc->sc_ich_phymtx);
   14389 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14390 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14391 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14392 			break;
   14393 		delay(1000);
   14394 	}
   14395 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14396 		device_printf(sc->sc_dev,
   14397 		    "SW has already locked the resource\n");
   14398 		goto out;
   14399 	}
   14400 
   14401 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14402 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14403 	for (timeout = 0; timeout < 1000; timeout++) {
   14404 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14405 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14406 			break;
   14407 		delay(1000);
   14408 	}
   14409 	if (timeout >= 1000) {
   14410 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14411 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14412 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14413 		goto out;
   14414 	}
   14415 	return 0;
   14416 
   14417 out:
   14418 	mutex_exit(sc->sc_ich_phymtx);
   14419 	return 1;
   14420 }
   14421 
   14422 static void
   14423 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14424 {
   14425 	uint32_t ext_ctrl;
   14426 
   14427 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14428 		device_xname(sc->sc_dev), __func__));
   14429 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14430 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14431 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14432 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14433 	} else {
   14434 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14435 	}
   14436 
   14437 	mutex_exit(sc->sc_ich_phymtx);
   14438 }
   14439 
   14440 static int
   14441 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14442 {
   14443 
   14444 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14445 		device_xname(sc->sc_dev), __func__));
   14446 	mutex_enter(sc->sc_ich_nvmmtx);
   14447 
   14448 	return 0;
   14449 }
   14450 
   14451 static void
   14452 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14453 {
   14454 
   14455 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14456 		device_xname(sc->sc_dev), __func__));
   14457 	mutex_exit(sc->sc_ich_nvmmtx);
   14458 }
   14459 
   14460 static int
   14461 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14462 {
   14463 	int i = 0;
   14464 	uint32_t reg;
   14465 
   14466 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14467 		device_xname(sc->sc_dev), __func__));
   14468 
   14469 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14470 	do {
   14471 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14472 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14473 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14474 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14475 			break;
   14476 		delay(2*1000);
   14477 		i++;
   14478 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14479 
   14480 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14481 		wm_put_hw_semaphore_82573(sc);
   14482 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14483 		    device_xname(sc->sc_dev));
   14484 		return -1;
   14485 	}
   14486 
   14487 	return 0;
   14488 }
   14489 
   14490 static void
   14491 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14492 {
   14493 	uint32_t reg;
   14494 
   14495 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14496 		device_xname(sc->sc_dev), __func__));
   14497 
   14498 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14499 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14500 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14501 }
   14502 
   14503 /*
   14504  * Management mode and power management related subroutines.
   14505  * BMC, AMT, suspend/resume and EEE.
   14506  */
   14507 
   14508 #ifdef WM_WOL
   14509 static int
   14510 wm_check_mng_mode(struct wm_softc *sc)
   14511 {
   14512 	int rv;
   14513 
   14514 	switch (sc->sc_type) {
   14515 	case WM_T_ICH8:
   14516 	case WM_T_ICH9:
   14517 	case WM_T_ICH10:
   14518 	case WM_T_PCH:
   14519 	case WM_T_PCH2:
   14520 	case WM_T_PCH_LPT:
   14521 	case WM_T_PCH_SPT:
   14522 	case WM_T_PCH_CNP:
   14523 		rv = wm_check_mng_mode_ich8lan(sc);
   14524 		break;
   14525 	case WM_T_82574:
   14526 	case WM_T_82583:
   14527 		rv = wm_check_mng_mode_82574(sc);
   14528 		break;
   14529 	case WM_T_82571:
   14530 	case WM_T_82572:
   14531 	case WM_T_82573:
   14532 	case WM_T_80003:
   14533 		rv = wm_check_mng_mode_generic(sc);
   14534 		break;
   14535 	default:
   14536 		/* Noting to do */
   14537 		rv = 0;
   14538 		break;
   14539 	}
   14540 
   14541 	return rv;
   14542 }
   14543 
   14544 static int
   14545 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14546 {
   14547 	uint32_t fwsm;
   14548 
   14549 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14550 
   14551 	if (((fwsm & FWSM_FW_VALID) != 0)
   14552 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14553 		return 1;
   14554 
   14555 	return 0;
   14556 }
   14557 
   14558 static int
   14559 wm_check_mng_mode_82574(struct wm_softc *sc)
   14560 {
   14561 	uint16_t data;
   14562 
   14563 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14564 
   14565 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14566 		return 1;
   14567 
   14568 	return 0;
   14569 }
   14570 
   14571 static int
   14572 wm_check_mng_mode_generic(struct wm_softc *sc)
   14573 {
   14574 	uint32_t fwsm;
   14575 
   14576 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14577 
   14578 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14579 		return 1;
   14580 
   14581 	return 0;
   14582 }
   14583 #endif /* WM_WOL */
   14584 
   14585 static int
   14586 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14587 {
   14588 	uint32_t manc, fwsm, factps;
   14589 
   14590 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14591 		return 0;
   14592 
   14593 	manc = CSR_READ(sc, WMREG_MANC);
   14594 
   14595 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14596 		device_xname(sc->sc_dev), manc));
   14597 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14598 		return 0;
   14599 
   14600 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14601 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14602 		factps = CSR_READ(sc, WMREG_FACTPS);
   14603 		if (((factps & FACTPS_MNGCG) == 0)
   14604 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14605 			return 1;
   14606 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14607 		uint16_t data;
   14608 
   14609 		factps = CSR_READ(sc, WMREG_FACTPS);
   14610 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14611 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14612 			device_xname(sc->sc_dev), factps, data));
   14613 		if (((factps & FACTPS_MNGCG) == 0)
   14614 		    && ((data & NVM_CFG2_MNGM_MASK)
   14615 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14616 			return 1;
   14617 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14618 	    && ((manc & MANC_ASF_EN) == 0))
   14619 		return 1;
   14620 
   14621 	return 0;
   14622 }
   14623 
   14624 static bool
   14625 wm_phy_resetisblocked(struct wm_softc *sc)
   14626 {
   14627 	bool blocked = false;
   14628 	uint32_t reg;
   14629 	int i = 0;
   14630 
   14631 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14632 		device_xname(sc->sc_dev), __func__));
   14633 
   14634 	switch (sc->sc_type) {
   14635 	case WM_T_ICH8:
   14636 	case WM_T_ICH9:
   14637 	case WM_T_ICH10:
   14638 	case WM_T_PCH:
   14639 	case WM_T_PCH2:
   14640 	case WM_T_PCH_LPT:
   14641 	case WM_T_PCH_SPT:
   14642 	case WM_T_PCH_CNP:
   14643 		do {
   14644 			reg = CSR_READ(sc, WMREG_FWSM);
   14645 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14646 				blocked = true;
   14647 				delay(10*1000);
   14648 				continue;
   14649 			}
   14650 			blocked = false;
   14651 		} while (blocked && (i++ < 30));
   14652 		return blocked;
   14653 		break;
   14654 	case WM_T_82571:
   14655 	case WM_T_82572:
   14656 	case WM_T_82573:
   14657 	case WM_T_82574:
   14658 	case WM_T_82583:
   14659 	case WM_T_80003:
   14660 		reg = CSR_READ(sc, WMREG_MANC);
   14661 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14662 			return true;
   14663 		else
   14664 			return false;
   14665 		break;
   14666 	default:
   14667 		/* No problem */
   14668 		break;
   14669 	}
   14670 
   14671 	return false;
   14672 }
   14673 
   14674 static void
   14675 wm_get_hw_control(struct wm_softc *sc)
   14676 {
   14677 	uint32_t reg;
   14678 
   14679 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14680 		device_xname(sc->sc_dev), __func__));
   14681 
   14682 	if (sc->sc_type == WM_T_82573) {
   14683 		reg = CSR_READ(sc, WMREG_SWSM);
   14684 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14685 	} else if (sc->sc_type >= WM_T_82571) {
   14686 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14687 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14688 	}
   14689 }
   14690 
   14691 static void
   14692 wm_release_hw_control(struct wm_softc *sc)
   14693 {
   14694 	uint32_t reg;
   14695 
   14696 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14697 		device_xname(sc->sc_dev), __func__));
   14698 
   14699 	if (sc->sc_type == WM_T_82573) {
   14700 		reg = CSR_READ(sc, WMREG_SWSM);
   14701 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14702 	} else if (sc->sc_type >= WM_T_82571) {
   14703 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14704 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14705 	}
   14706 }
   14707 
   14708 static void
   14709 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14710 {
   14711 	uint32_t reg;
   14712 
   14713 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14714 		device_xname(sc->sc_dev), __func__));
   14715 
   14716 	if (sc->sc_type < WM_T_PCH2)
   14717 		return;
   14718 
   14719 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14720 
   14721 	if (gate)
   14722 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14723 	else
   14724 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14725 
   14726 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14727 }
   14728 
   14729 static int
   14730 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14731 {
   14732 	uint32_t fwsm, reg;
   14733 	int rv = 0;
   14734 
   14735 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14736 		device_xname(sc->sc_dev), __func__));
   14737 
   14738 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14739 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14740 
   14741 	/* Disable ULP */
   14742 	wm_ulp_disable(sc);
   14743 
   14744 	/* Acquire PHY semaphore */
   14745 	rv = sc->phy.acquire(sc);
   14746 	if (rv != 0) {
   14747 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14748 		device_xname(sc->sc_dev), __func__));
   14749 		return -1;
   14750 	}
   14751 
   14752 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14753 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14754 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14755 	 */
   14756 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14757 	switch (sc->sc_type) {
   14758 	case WM_T_PCH_LPT:
   14759 	case WM_T_PCH_SPT:
   14760 	case WM_T_PCH_CNP:
   14761 		if (wm_phy_is_accessible_pchlan(sc))
   14762 			break;
   14763 
   14764 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14765 		 * forcing MAC to SMBus mode first.
   14766 		 */
   14767 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14768 		reg |= CTRL_EXT_FORCE_SMBUS;
   14769 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14770 #if 0
   14771 		/* XXX Isn't this required??? */
   14772 		CSR_WRITE_FLUSH(sc);
   14773 #endif
   14774 		/* Wait 50 milliseconds for MAC to finish any retries
   14775 		 * that it might be trying to perform from previous
   14776 		 * attempts to acknowledge any phy read requests.
   14777 		 */
   14778 		delay(50 * 1000);
   14779 		/* FALLTHROUGH */
   14780 	case WM_T_PCH2:
   14781 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14782 			break;
   14783 		/* FALLTHROUGH */
   14784 	case WM_T_PCH:
   14785 		if (sc->sc_type == WM_T_PCH)
   14786 			if ((fwsm & FWSM_FW_VALID) != 0)
   14787 				break;
   14788 
   14789 		if (wm_phy_resetisblocked(sc) == true) {
   14790 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   14791 			break;
   14792 		}
   14793 
   14794 		/* Toggle LANPHYPC Value bit */
   14795 		wm_toggle_lanphypc_pch_lpt(sc);
   14796 
   14797 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14798 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14799 				break;
   14800 
   14801 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14802 			 * so ensure that the MAC is also out of SMBus mode
   14803 			 */
   14804 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14805 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14806 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14807 
   14808 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14809 				break;
   14810 			rv = -1;
   14811 		}
   14812 		break;
   14813 	default:
   14814 		break;
   14815 	}
   14816 
   14817 	/* Release semaphore */
   14818 	sc->phy.release(sc);
   14819 
   14820 	if (rv == 0) {
   14821 		/* Check to see if able to reset PHY.  Print error if not */
   14822 		if (wm_phy_resetisblocked(sc)) {
   14823 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14824 			goto out;
   14825 		}
   14826 
   14827 		/* Reset the PHY before any access to it.  Doing so, ensures
   14828 		 * that the PHY is in a known good state before we read/write
   14829 		 * PHY registers.  The generic reset is sufficient here,
   14830 		 * because we haven't determined the PHY type yet.
   14831 		 */
   14832 		if (wm_reset_phy(sc) != 0)
   14833 			goto out;
   14834 
   14835 		/* On a successful reset, possibly need to wait for the PHY
   14836 		 * to quiesce to an accessible state before returning control
   14837 		 * to the calling function.  If the PHY does not quiesce, then
   14838 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14839 		 *  the PHY is in.
   14840 		 */
   14841 		if (wm_phy_resetisblocked(sc))
   14842 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14843 	}
   14844 
   14845 out:
   14846 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14847 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14848 		delay(10*1000);
   14849 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14850 	}
   14851 
   14852 	return 0;
   14853 }
   14854 
   14855 static void
   14856 wm_init_manageability(struct wm_softc *sc)
   14857 {
   14858 
   14859 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14860 		device_xname(sc->sc_dev), __func__));
   14861 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14862 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14863 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14864 
   14865 		/* Disable hardware interception of ARP */
   14866 		manc &= ~MANC_ARP_EN;
   14867 
   14868 		/* Enable receiving management packets to the host */
   14869 		if (sc->sc_type >= WM_T_82571) {
   14870 			manc |= MANC_EN_MNG2HOST;
   14871 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14872 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14873 		}
   14874 
   14875 		CSR_WRITE(sc, WMREG_MANC, manc);
   14876 	}
   14877 }
   14878 
   14879 static void
   14880 wm_release_manageability(struct wm_softc *sc)
   14881 {
   14882 
   14883 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14884 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14885 
   14886 		manc |= MANC_ARP_EN;
   14887 		if (sc->sc_type >= WM_T_82571)
   14888 			manc &= ~MANC_EN_MNG2HOST;
   14889 
   14890 		CSR_WRITE(sc, WMREG_MANC, manc);
   14891 	}
   14892 }
   14893 
   14894 static void
   14895 wm_get_wakeup(struct wm_softc *sc)
   14896 {
   14897 
   14898 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14899 	switch (sc->sc_type) {
   14900 	case WM_T_82573:
   14901 	case WM_T_82583:
   14902 		sc->sc_flags |= WM_F_HAS_AMT;
   14903 		/* FALLTHROUGH */
   14904 	case WM_T_80003:
   14905 	case WM_T_82575:
   14906 	case WM_T_82576:
   14907 	case WM_T_82580:
   14908 	case WM_T_I350:
   14909 	case WM_T_I354:
   14910 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14911 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14912 		/* FALLTHROUGH */
   14913 	case WM_T_82541:
   14914 	case WM_T_82541_2:
   14915 	case WM_T_82547:
   14916 	case WM_T_82547_2:
   14917 	case WM_T_82571:
   14918 	case WM_T_82572:
   14919 	case WM_T_82574:
   14920 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14921 		break;
   14922 	case WM_T_ICH8:
   14923 	case WM_T_ICH9:
   14924 	case WM_T_ICH10:
   14925 	case WM_T_PCH:
   14926 	case WM_T_PCH2:
   14927 	case WM_T_PCH_LPT:
   14928 	case WM_T_PCH_SPT:
   14929 	case WM_T_PCH_CNP:
   14930 		sc->sc_flags |= WM_F_HAS_AMT;
   14931 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14932 		break;
   14933 	default:
   14934 		break;
   14935 	}
   14936 
   14937 	/* 1: HAS_MANAGE */
   14938 	if (wm_enable_mng_pass_thru(sc) != 0)
   14939 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14940 
   14941 	/*
   14942 	 * Note that the WOL flags is set after the resetting of the eeprom
   14943 	 * stuff
   14944 	 */
   14945 }
   14946 
   14947 /*
   14948  * Unconfigure Ultra Low Power mode.
   14949  * Only for I217 and newer (see below).
   14950  */
   14951 static int
   14952 wm_ulp_disable(struct wm_softc *sc)
   14953 {
   14954 	uint32_t reg;
   14955 	uint16_t phyreg;
   14956 	int i = 0, rv = 0;
   14957 
   14958 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14959 		device_xname(sc->sc_dev), __func__));
   14960 	/* Exclude old devices */
   14961 	if ((sc->sc_type < WM_T_PCH_LPT)
   14962 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14963 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14964 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14965 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14966 		return 0;
   14967 
   14968 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14969 		/* Request ME un-configure ULP mode in the PHY */
   14970 		reg = CSR_READ(sc, WMREG_H2ME);
   14971 		reg &= ~H2ME_ULP;
   14972 		reg |= H2ME_ENFORCE_SETTINGS;
   14973 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14974 
   14975 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14976 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14977 			if (i++ == 30) {
   14978 				device_printf(sc->sc_dev, "%s timed out\n",
   14979 				    __func__);
   14980 				return -1;
   14981 			}
   14982 			delay(10 * 1000);
   14983 		}
   14984 		reg = CSR_READ(sc, WMREG_H2ME);
   14985 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14986 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14987 
   14988 		return 0;
   14989 	}
   14990 
   14991 	/* Acquire semaphore */
   14992 	rv = sc->phy.acquire(sc);
   14993 	if (rv != 0) {
   14994 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14995 		device_xname(sc->sc_dev), __func__));
   14996 		return -1;
   14997 	}
   14998 
   14999 	/* Toggle LANPHYPC */
   15000 	wm_toggle_lanphypc_pch_lpt(sc);
   15001 
   15002 	/* Unforce SMBus mode in PHY */
   15003 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15004 	if (rv != 0) {
   15005 		uint32_t reg2;
   15006 
   15007 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15008 			__func__);
   15009 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15010 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15011 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15012 		delay(50 * 1000);
   15013 
   15014 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15015 		    &phyreg);
   15016 		if (rv != 0)
   15017 			goto release;
   15018 	}
   15019 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15020 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15021 
   15022 	/* Unforce SMBus mode in MAC */
   15023 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15024 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15025 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15026 
   15027 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15028 	if (rv != 0)
   15029 		goto release;
   15030 	phyreg |= HV_PM_CTRL_K1_ENA;
   15031 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15032 
   15033 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15034 		&phyreg);
   15035 	if (rv != 0)
   15036 		goto release;
   15037 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15038 	    | I218_ULP_CONFIG1_STICKY_ULP
   15039 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15040 	    | I218_ULP_CONFIG1_WOL_HOST
   15041 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15042 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15043 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15044 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15045 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15046 	phyreg |= I218_ULP_CONFIG1_START;
   15047 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15048 
   15049 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15050 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15051 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15052 
   15053 release:
   15054 	/* Release semaphore */
   15055 	sc->phy.release(sc);
   15056 	wm_gmii_reset(sc);
   15057 	delay(50 * 1000);
   15058 
   15059 	return rv;
   15060 }
   15061 
   15062 /* WOL in the newer chipset interfaces (pchlan) */
   15063 static int
   15064 wm_enable_phy_wakeup(struct wm_softc *sc)
   15065 {
   15066 	device_t dev = sc->sc_dev;
   15067 	uint32_t mreg, moff;
   15068 	uint16_t wuce, wuc, wufc, preg;
   15069 	int i, rv;
   15070 
   15071 	KASSERT(sc->sc_type >= WM_T_PCH);
   15072 
   15073 	/* Copy MAC RARs to PHY RARs */
   15074 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15075 
   15076 	/* Activate PHY wakeup */
   15077 	rv = sc->phy.acquire(sc);
   15078 	if (rv != 0) {
   15079 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15080 		    __func__);
   15081 		return rv;
   15082 	}
   15083 
   15084 	/*
   15085 	 * Enable access to PHY wakeup registers.
   15086 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15087 	 */
   15088 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15089 	if (rv != 0) {
   15090 		device_printf(dev,
   15091 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15092 		goto release;
   15093 	}
   15094 
   15095 	/* Copy MAC MTA to PHY MTA */
   15096 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15097 		uint16_t lo, hi;
   15098 
   15099 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15100 		lo = (uint16_t)(mreg & 0xffff);
   15101 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15102 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15103 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15104 	}
   15105 
   15106 	/* Configure PHY Rx Control register */
   15107 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15108 	mreg = CSR_READ(sc, WMREG_RCTL);
   15109 	if (mreg & RCTL_UPE)
   15110 		preg |= BM_RCTL_UPE;
   15111 	if (mreg & RCTL_MPE)
   15112 		preg |= BM_RCTL_MPE;
   15113 	preg &= ~(BM_RCTL_MO_MASK);
   15114 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15115 	if (moff != 0)
   15116 		preg |= moff << BM_RCTL_MO_SHIFT;
   15117 	if (mreg & RCTL_BAM)
   15118 		preg |= BM_RCTL_BAM;
   15119 	if (mreg & RCTL_PMCF)
   15120 		preg |= BM_RCTL_PMCF;
   15121 	mreg = CSR_READ(sc, WMREG_CTRL);
   15122 	if (mreg & CTRL_RFCE)
   15123 		preg |= BM_RCTL_RFCE;
   15124 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15125 
   15126 	wuc = WUC_APME | WUC_PME_EN;
   15127 	wufc = WUFC_MAG;
   15128 	/* Enable PHY wakeup in MAC register */
   15129 	CSR_WRITE(sc, WMREG_WUC,
   15130 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15131 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15132 
   15133 	/* Configure and enable PHY wakeup in PHY registers */
   15134 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15135 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15136 
   15137 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15138 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15139 
   15140 release:
   15141 	sc->phy.release(sc);
   15142 
   15143 	return 0;
   15144 }
   15145 
   15146 /* Power down workaround on D3 */
   15147 static void
   15148 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15149 {
   15150 	uint32_t reg;
   15151 	uint16_t phyreg;
   15152 	int i;
   15153 
   15154 	for (i = 0; i < 2; i++) {
   15155 		/* Disable link */
   15156 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15157 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15158 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15159 
   15160 		/*
   15161 		 * Call gig speed drop workaround on Gig disable before
   15162 		 * accessing any PHY registers
   15163 		 */
   15164 		if (sc->sc_type == WM_T_ICH8)
   15165 			wm_gig_downshift_workaround_ich8lan(sc);
   15166 
   15167 		/* Write VR power-down enable */
   15168 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15169 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15170 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15171 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15172 
   15173 		/* Read it back and test */
   15174 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15175 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15176 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15177 			break;
   15178 
   15179 		/* Issue PHY reset and repeat at most one more time */
   15180 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15181 	}
   15182 }
   15183 
   15184 /*
   15185  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15186  *  @sc: pointer to the HW structure
   15187  *
   15188  *  During S0 to Sx transition, it is possible the link remains at gig
   15189  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15190  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15191  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15192  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15193  *  needs to be written.
   15194  *  Parts that support (and are linked to a partner which support) EEE in
   15195  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15196  *  than 10Mbps w/o EEE.
   15197  */
   15198 static void
   15199 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15200 {
   15201 	device_t dev = sc->sc_dev;
   15202 	struct ethercom *ec = &sc->sc_ethercom;
   15203 	uint32_t phy_ctrl;
   15204 	int rv;
   15205 
   15206 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15207 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15208 
   15209 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   15210 
   15211 	if (sc->sc_phytype == WMPHY_I217) {
   15212 		uint16_t devid = sc->sc_pcidevid;
   15213 
   15214 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   15215 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   15216 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   15217 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   15218 		    (sc->sc_type >= WM_T_PCH_SPT))
   15219 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   15220 			    CSR_READ(sc, WMREG_FEXTNVM6)
   15221 			    & ~FEXTNVM6_REQ_PLL_CLK);
   15222 
   15223 		if (sc->phy.acquire(sc) != 0)
   15224 			goto out;
   15225 
   15226 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15227 			uint16_t eee_advert;
   15228 
   15229 			rv = wm_read_emi_reg_locked(dev,
   15230 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   15231 			if (rv)
   15232 				goto release;
   15233 
   15234 			/*
   15235 			 * Disable LPLU if both link partners support 100BaseT
   15236 			 * EEE and 100Full is advertised on both ends of the
   15237 			 * link, and enable Auto Enable LPI since there will
   15238 			 * be no driver to enable LPI while in Sx.
   15239 			 */
   15240 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   15241 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   15242 				uint16_t anar, phy_reg;
   15243 
   15244 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   15245 				    &anar);
   15246 				if (anar & ANAR_TX_FD) {
   15247 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   15248 					    PHY_CTRL_NOND0A_LPLU);
   15249 
   15250 					/* Set Auto Enable LPI after link up */
   15251 					sc->phy.readreg_locked(dev, 2,
   15252 					    I217_LPI_GPIO_CTRL, &phy_reg);
   15253 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15254 					sc->phy.writereg_locked(dev, 2,
   15255 					    I217_LPI_GPIO_CTRL, phy_reg);
   15256 				}
   15257 			}
   15258 		}
   15259 
   15260 		/*
   15261 		 * For i217 Intel Rapid Start Technology support,
   15262 		 * when the system is going into Sx and no manageability engine
   15263 		 * is present, the driver must configure proxy to reset only on
   15264 		 * power good.	LPI (Low Power Idle) state must also reset only
   15265 		 * on power good, as well as the MTA (Multicast table array).
   15266 		 * The SMBus release must also be disabled on LCD reset.
   15267 		 */
   15268 
   15269 		/*
   15270 		 * Enable MTA to reset for Intel Rapid Start Technology
   15271 		 * Support
   15272 		 */
   15273 
   15274 release:
   15275 		sc->phy.release(sc);
   15276 	}
   15277 out:
   15278 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   15279 
   15280 	if (sc->sc_type == WM_T_ICH8)
   15281 		wm_gig_downshift_workaround_ich8lan(sc);
   15282 
   15283 	if (sc->sc_type >= WM_T_PCH) {
   15284 		wm_oem_bits_config_ich8lan(sc, false);
   15285 
   15286 		/* Reset PHY to activate OEM bits on 82577/8 */
   15287 		if (sc->sc_type == WM_T_PCH)
   15288 			wm_reset_phy(sc);
   15289 
   15290 		if (sc->phy.acquire(sc) != 0)
   15291 			return;
   15292 		wm_write_smbus_addr(sc);
   15293 		sc->phy.release(sc);
   15294 	}
   15295 }
   15296 
   15297 /*
   15298  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   15299  *  @sc: pointer to the HW structure
   15300  *
   15301  *  During Sx to S0 transitions on non-managed devices or managed devices
   15302  *  on which PHY resets are not blocked, if the PHY registers cannot be
   15303  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   15304  *  the PHY.
   15305  *  On i217, setup Intel Rapid Start Technology.
   15306  */
   15307 static int
   15308 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   15309 {
   15310 	device_t dev = sc->sc_dev;
   15311 	int rv;
   15312 
   15313 	if (sc->sc_type < WM_T_PCH2)
   15314 		return 0;
   15315 
   15316 	rv = wm_init_phy_workarounds_pchlan(sc);
   15317 	if (rv != 0)
   15318 		return -1;
   15319 
   15320 	/* For i217 Intel Rapid Start Technology support when the system
   15321 	 * is transitioning from Sx and no manageability engine is present
   15322 	 * configure SMBus to restore on reset, disable proxy, and enable
   15323 	 * the reset on MTA (Multicast table array).
   15324 	 */
   15325 	if (sc->sc_phytype == WMPHY_I217) {
   15326 		uint16_t phy_reg;
   15327 
   15328 		if (sc->phy.acquire(sc) != 0)
   15329 			return -1;
   15330 
   15331 		/* Clear Auto Enable LPI after link up */
   15332 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15333 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15334 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15335 
   15336 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15337 			/* Restore clear on SMB if no manageability engine
   15338 			 * is present
   15339 			 */
   15340 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15341 			    &phy_reg);
   15342 			if (rv != 0)
   15343 				goto release;
   15344 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15345 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15346 
   15347 			/* Disable Proxy */
   15348 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15349 		}
   15350 		/* Enable reset on MTA */
   15351 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15352 		if (rv != 0)
   15353 			goto release;
   15354 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15355 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15356 
   15357 release:
   15358 		sc->phy.release(sc);
   15359 		return rv;
   15360 	}
   15361 
   15362 	return 0;
   15363 }
   15364 
   15365 static void
   15366 wm_enable_wakeup(struct wm_softc *sc)
   15367 {
   15368 	uint32_t reg, pmreg;
   15369 	pcireg_t pmode;
   15370 	int rv = 0;
   15371 
   15372 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15373 		device_xname(sc->sc_dev), __func__));
   15374 
   15375 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15376 	    &pmreg, NULL) == 0)
   15377 		return;
   15378 
   15379 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15380 		goto pme;
   15381 
   15382 	/* Advertise the wakeup capability */
   15383 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15384 	    | CTRL_SWDPIN(3));
   15385 
   15386 	/* Keep the laser running on fiber adapters */
   15387 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15388 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15389 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15390 		reg |= CTRL_EXT_SWDPIN(3);
   15391 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15392 	}
   15393 
   15394 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15395 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15396 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15397 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15398 		wm_suspend_workarounds_ich8lan(sc);
   15399 
   15400 #if 0	/* For the multicast packet */
   15401 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15402 	reg |= WUFC_MC;
   15403 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15404 #endif
   15405 
   15406 	if (sc->sc_type >= WM_T_PCH) {
   15407 		rv = wm_enable_phy_wakeup(sc);
   15408 		if (rv != 0)
   15409 			goto pme;
   15410 	} else {
   15411 		/* Enable wakeup by the MAC */
   15412 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15413 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15414 	}
   15415 
   15416 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15417 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15418 		|| (sc->sc_type == WM_T_PCH2))
   15419 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15420 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15421 
   15422 pme:
   15423 	/* Request PME */
   15424 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15425 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   15426 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15427 		/* For WOL */
   15428 		pmode |= PCI_PMCSR_PME_EN;
   15429 	} else {
   15430 		/* Disable WOL */
   15431 		pmode &= ~PCI_PMCSR_PME_EN;
   15432 	}
   15433 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15434 }
   15435 
   15436 /* Disable ASPM L0s and/or L1 for workaround */
   15437 static void
   15438 wm_disable_aspm(struct wm_softc *sc)
   15439 {
   15440 	pcireg_t reg, mask = 0;
   15441 	unsigned const char *str = "";
   15442 
   15443 	/*
   15444 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15445 	 * space.
   15446 	 */
   15447 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15448 		return;
   15449 
   15450 	switch (sc->sc_type) {
   15451 	case WM_T_82571:
   15452 	case WM_T_82572:
   15453 		/*
   15454 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15455 		 * State Power management L1 State (ASPM L1).
   15456 		 */
   15457 		mask = PCIE_LCSR_ASPM_L1;
   15458 		str = "L1 is";
   15459 		break;
   15460 	case WM_T_82573:
   15461 	case WM_T_82574:
   15462 	case WM_T_82583:
   15463 		/*
   15464 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15465 		 *
   15466 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15467 		 * some chipset.  The document of 82574 and 82583 says that
   15468 		 * disabling L0s with some specific chipset is sufficient,
   15469 		 * but we follow as of the Intel em driver does.
   15470 		 *
   15471 		 * References:
   15472 		 * Errata 8 of the Specification Update of i82573.
   15473 		 * Errata 20 of the Specification Update of i82574.
   15474 		 * Errata 9 of the Specification Update of i82583.
   15475 		 */
   15476 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15477 		str = "L0s and L1 are";
   15478 		break;
   15479 	default:
   15480 		return;
   15481 	}
   15482 
   15483 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15484 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15485 	reg &= ~mask;
   15486 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15487 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15488 
   15489 	/* Print only in wm_attach() */
   15490 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15491 		aprint_verbose_dev(sc->sc_dev,
   15492 		    "ASPM %s disabled to workaround the errata.\n", str);
   15493 }
   15494 
   15495 /* LPLU */
   15496 
   15497 static void
   15498 wm_lplu_d0_disable(struct wm_softc *sc)
   15499 {
   15500 	struct mii_data *mii = &sc->sc_mii;
   15501 	uint32_t reg;
   15502 	uint16_t phyval;
   15503 
   15504 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15505 		device_xname(sc->sc_dev), __func__));
   15506 
   15507 	if (sc->sc_phytype == WMPHY_IFE)
   15508 		return;
   15509 
   15510 	switch (sc->sc_type) {
   15511 	case WM_T_82571:
   15512 	case WM_T_82572:
   15513 	case WM_T_82573:
   15514 	case WM_T_82575:
   15515 	case WM_T_82576:
   15516 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   15517 		phyval &= ~PMR_D0_LPLU;
   15518 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   15519 		break;
   15520 	case WM_T_82580:
   15521 	case WM_T_I350:
   15522 	case WM_T_I210:
   15523 	case WM_T_I211:
   15524 		reg = CSR_READ(sc, WMREG_PHPM);
   15525 		reg &= ~PHPM_D0A_LPLU;
   15526 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15527 		break;
   15528 	case WM_T_82574:
   15529 	case WM_T_82583:
   15530 	case WM_T_ICH8:
   15531 	case WM_T_ICH9:
   15532 	case WM_T_ICH10:
   15533 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15534 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15535 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15536 		CSR_WRITE_FLUSH(sc);
   15537 		break;
   15538 	case WM_T_PCH:
   15539 	case WM_T_PCH2:
   15540 	case WM_T_PCH_LPT:
   15541 	case WM_T_PCH_SPT:
   15542 	case WM_T_PCH_CNP:
   15543 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15544 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15545 		if (wm_phy_resetisblocked(sc) == false)
   15546 			phyval |= HV_OEM_BITS_ANEGNOW;
   15547 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15548 		break;
   15549 	default:
   15550 		break;
   15551 	}
   15552 }
   15553 
   15554 /* EEE */
   15555 
   15556 static int
   15557 wm_set_eee_i350(struct wm_softc *sc)
   15558 {
   15559 	struct ethercom *ec = &sc->sc_ethercom;
   15560 	uint32_t ipcnfg, eeer;
   15561 	uint32_t ipcnfg_mask
   15562 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15563 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15564 
   15565 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15566 
   15567 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15568 	eeer = CSR_READ(sc, WMREG_EEER);
   15569 
   15570 	/* Enable or disable per user setting */
   15571 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15572 		ipcnfg |= ipcnfg_mask;
   15573 		eeer |= eeer_mask;
   15574 	} else {
   15575 		ipcnfg &= ~ipcnfg_mask;
   15576 		eeer &= ~eeer_mask;
   15577 	}
   15578 
   15579 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15580 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15581 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15582 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15583 
   15584 	return 0;
   15585 }
   15586 
   15587 static int
   15588 wm_set_eee_pchlan(struct wm_softc *sc)
   15589 {
   15590 	device_t dev = sc->sc_dev;
   15591 	struct ethercom *ec = &sc->sc_ethercom;
   15592 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15593 	int rv = 0;
   15594 
   15595 	switch (sc->sc_phytype) {
   15596 	case WMPHY_82579:
   15597 		lpa = I82579_EEE_LP_ABILITY;
   15598 		pcs_status = I82579_EEE_PCS_STATUS;
   15599 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15600 		break;
   15601 	case WMPHY_I217:
   15602 		lpa = I217_EEE_LP_ABILITY;
   15603 		pcs_status = I217_EEE_PCS_STATUS;
   15604 		adv_addr = I217_EEE_ADVERTISEMENT;
   15605 		break;
   15606 	default:
   15607 		return 0;
   15608 	}
   15609 
   15610 	if (sc->phy.acquire(sc)) {
   15611 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15612 		return 0;
   15613 	}
   15614 
   15615 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15616 	if (rv != 0)
   15617 		goto release;
   15618 
   15619 	/* Clear bits that enable EEE in various speeds */
   15620 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15621 
   15622 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15623 		/* Save off link partner's EEE ability */
   15624 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15625 		if (rv != 0)
   15626 			goto release;
   15627 
   15628 		/* Read EEE advertisement */
   15629 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15630 			goto release;
   15631 
   15632 		/*
   15633 		 * Enable EEE only for speeds in which the link partner is
   15634 		 * EEE capable and for which we advertise EEE.
   15635 		 */
   15636 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15637 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15638 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15639 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15640 			if ((data & ANLPAR_TX_FD) != 0)
   15641 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15642 			else {
   15643 				/*
   15644 				 * EEE is not supported in 100Half, so ignore
   15645 				 * partner's EEE in 100 ability if full-duplex
   15646 				 * is not advertised.
   15647 				 */
   15648 				sc->eee_lp_ability
   15649 				    &= ~AN_EEEADVERT_100_TX;
   15650 			}
   15651 		}
   15652 	}
   15653 
   15654 	if (sc->sc_phytype == WMPHY_82579) {
   15655 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15656 		if (rv != 0)
   15657 			goto release;
   15658 
   15659 		data &= ~I82579_LPI_PLL_SHUT_100;
   15660 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15661 	}
   15662 
   15663 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15664 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15665 		goto release;
   15666 
   15667 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15668 release:
   15669 	sc->phy.release(sc);
   15670 
   15671 	return rv;
   15672 }
   15673 
   15674 static int
   15675 wm_set_eee(struct wm_softc *sc)
   15676 {
   15677 	struct ethercom *ec = &sc->sc_ethercom;
   15678 
   15679 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15680 		return 0;
   15681 
   15682 	if (sc->sc_type == WM_T_I354) {
   15683 		/* I354 uses an external PHY */
   15684 		return 0; /* not yet */
   15685 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15686 		return wm_set_eee_i350(sc);
   15687 	else if (sc->sc_type >= WM_T_PCH2)
   15688 		return wm_set_eee_pchlan(sc);
   15689 
   15690 	return 0;
   15691 }
   15692 
   15693 /*
   15694  * Workarounds (mainly PHY related).
   15695  * Basically, PHY's workarounds are in the PHY drivers.
   15696  */
   15697 
   15698 /* Work-around for 82566 Kumeran PCS lock loss */
   15699 static int
   15700 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15701 {
   15702 	struct mii_data *mii = &sc->sc_mii;
   15703 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15704 	int i, reg, rv;
   15705 	uint16_t phyreg;
   15706 
   15707 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15708 		device_xname(sc->sc_dev), __func__));
   15709 
   15710 	/* If the link is not up, do nothing */
   15711 	if ((status & STATUS_LU) == 0)
   15712 		return 0;
   15713 
   15714 	/* Nothing to do if the link is other than 1Gbps */
   15715 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15716 		return 0;
   15717 
   15718 	for (i = 0; i < 10; i++) {
   15719 		/* read twice */
   15720 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15721 		if (rv != 0)
   15722 			return rv;
   15723 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15724 		if (rv != 0)
   15725 			return rv;
   15726 
   15727 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15728 			goto out;	/* GOOD! */
   15729 
   15730 		/* Reset the PHY */
   15731 		wm_reset_phy(sc);
   15732 		delay(5*1000);
   15733 	}
   15734 
   15735 	/* Disable GigE link negotiation */
   15736 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15737 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15738 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15739 
   15740 	/*
   15741 	 * Call gig speed drop workaround on Gig disable before accessing
   15742 	 * any PHY registers.
   15743 	 */
   15744 	wm_gig_downshift_workaround_ich8lan(sc);
   15745 
   15746 out:
   15747 	return 0;
   15748 }
   15749 
   15750 /*
   15751  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15752  *  @sc: pointer to the HW structure
   15753  *
   15754  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15755  *  LPLU, Gig disable, MDIC PHY reset):
   15756  *    1) Set Kumeran Near-end loopback
   15757  *    2) Clear Kumeran Near-end loopback
   15758  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15759  */
   15760 static void
   15761 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15762 {
   15763 	uint16_t kmreg;
   15764 
   15765 	/* Only for igp3 */
   15766 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15767 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15768 			return;
   15769 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15770 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15771 			return;
   15772 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15773 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15774 	}
   15775 }
   15776 
   15777 /*
   15778  * Workaround for pch's PHYs
   15779  * XXX should be moved to new PHY driver?
   15780  */
   15781 static int
   15782 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15783 {
   15784 	device_t dev = sc->sc_dev;
   15785 	struct mii_data *mii = &sc->sc_mii;
   15786 	struct mii_softc *child;
   15787 	uint16_t phy_data, phyrev = 0;
   15788 	int phytype = sc->sc_phytype;
   15789 	int rv;
   15790 
   15791 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15792 		device_xname(dev), __func__));
   15793 	KASSERT(sc->sc_type == WM_T_PCH);
   15794 
   15795 	/* Set MDIO slow mode before any other MDIO access */
   15796 	if (phytype == WMPHY_82577)
   15797 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15798 			return rv;
   15799 
   15800 	child = LIST_FIRST(&mii->mii_phys);
   15801 	if (child != NULL)
   15802 		phyrev = child->mii_mpd_rev;
   15803 
   15804 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15805 	if ((child != NULL) &&
   15806 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   15807 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   15808 		/* Disable generation of early preamble (0x4431) */
   15809 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15810 		    &phy_data);
   15811 		if (rv != 0)
   15812 			return rv;
   15813 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   15814 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   15815 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15816 		    phy_data);
   15817 		if (rv != 0)
   15818 			return rv;
   15819 
   15820 		/* Preamble tuning for SSC */
   15821 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   15822 		if (rv != 0)
   15823 			return rv;
   15824 	}
   15825 
   15826 	/* 82578 */
   15827 	if (phytype == WMPHY_82578) {
   15828 		/*
   15829 		 * Return registers to default by doing a soft reset then
   15830 		 * writing 0x3140 to the control register
   15831 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15832 		 */
   15833 		if ((child != NULL) && (phyrev < 2)) {
   15834 			PHY_RESET(child);
   15835 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   15836 			if (rv != 0)
   15837 				return rv;
   15838 		}
   15839 	}
   15840 
   15841 	/* Select page 0 */
   15842 	if ((rv = sc->phy.acquire(sc)) != 0)
   15843 		return rv;
   15844 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   15845 	sc->phy.release(sc);
   15846 	if (rv != 0)
   15847 		return rv;
   15848 
   15849 	/*
   15850 	 * Configure the K1 Si workaround during phy reset assuming there is
   15851 	 * link so that it disables K1 if link is in 1Gbps.
   15852 	 */
   15853 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15854 		return rv;
   15855 
   15856 	/* Workaround for link disconnects on a busy hub in half duplex */
   15857 	rv = sc->phy.acquire(sc);
   15858 	if (rv)
   15859 		return rv;
   15860 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   15861 	if (rv)
   15862 		goto release;
   15863 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   15864 	    phy_data & 0x00ff);
   15865 	if (rv)
   15866 		goto release;
   15867 
   15868 	/* Set MSE higher to enable link to stay up when noise is high */
   15869 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   15870 release:
   15871 	sc->phy.release(sc);
   15872 
   15873 	return rv;
   15874 }
   15875 
   15876 /*
   15877  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15878  *  @sc:   pointer to the HW structure
   15879  */
   15880 static void
   15881 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15882 {
   15883 	device_t dev = sc->sc_dev;
   15884 	uint32_t mac_reg;
   15885 	uint16_t i, wuce;
   15886 	int count;
   15887 
   15888 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15889 		device_xname(sc->sc_dev), __func__));
   15890 
   15891 	if (sc->phy.acquire(sc) != 0)
   15892 		return;
   15893 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15894 		goto release;
   15895 
   15896 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15897 	count = wm_rar_count(sc);
   15898 	for (i = 0; i < count; i++) {
   15899 		uint16_t lo, hi;
   15900 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15901 		lo = (uint16_t)(mac_reg & 0xffff);
   15902 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15903 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15904 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15905 
   15906 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15907 		lo = (uint16_t)(mac_reg & 0xffff);
   15908 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15909 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15910 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15911 	}
   15912 
   15913 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15914 
   15915 release:
   15916 	sc->phy.release(sc);
   15917 }
   15918 
   15919 /*
   15920  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15921  *  done after every PHY reset.
   15922  */
   15923 static int
   15924 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15925 {
   15926 	device_t dev = sc->sc_dev;
   15927 	int rv;
   15928 
   15929 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15930 		device_xname(dev), __func__));
   15931 	KASSERT(sc->sc_type == WM_T_PCH2);
   15932 
   15933 	/* Set MDIO slow mode before any other MDIO access */
   15934 	rv = wm_set_mdio_slow_mode_hv(sc);
   15935 	if (rv != 0)
   15936 		return rv;
   15937 
   15938 	rv = sc->phy.acquire(sc);
   15939 	if (rv != 0)
   15940 		return rv;
   15941 	/* Set MSE higher to enable link to stay up when noise is high */
   15942 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   15943 	if (rv != 0)
   15944 		goto release;
   15945 	/* Drop link after 5 times MSE threshold was reached */
   15946 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   15947 release:
   15948 	sc->phy.release(sc);
   15949 
   15950 	return rv;
   15951 }
   15952 
   15953 /**
   15954  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15955  *  @link: link up bool flag
   15956  *
   15957  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15958  *  preventing further DMA write requests.  Workaround the issue by disabling
   15959  *  the de-assertion of the clock request when in 1Gpbs mode.
   15960  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15961  *  speeds in order to avoid Tx hangs.
   15962  **/
   15963 static int
   15964 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15965 {
   15966 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15967 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15968 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15969 	uint16_t phyreg;
   15970 
   15971 	if (link && (speed == STATUS_SPEED_1000)) {
   15972 		sc->phy.acquire(sc);
   15973 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15974 		    &phyreg);
   15975 		if (rv != 0)
   15976 			goto release;
   15977 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15978 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15979 		if (rv != 0)
   15980 			goto release;
   15981 		delay(20);
   15982 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15983 
   15984 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15985 		    &phyreg);
   15986 release:
   15987 		sc->phy.release(sc);
   15988 		return rv;
   15989 	}
   15990 
   15991 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15992 
   15993 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15994 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15995 	    || !link
   15996 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15997 		goto update_fextnvm6;
   15998 
   15999 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   16000 
   16001 	/* Clear link status transmit timeout */
   16002 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   16003 	if (speed == STATUS_SPEED_100) {
   16004 		/* Set inband Tx timeout to 5x10us for 100Half */
   16005 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16006 
   16007 		/* Do not extend the K1 entry latency for 100Half */
   16008 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16009 	} else {
   16010 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   16011 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16012 
   16013 		/* Extend the K1 entry latency for 10 Mbps */
   16014 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16015 	}
   16016 
   16017 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   16018 
   16019 update_fextnvm6:
   16020 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   16021 	return 0;
   16022 }
   16023 
   16024 /*
   16025  *  wm_k1_gig_workaround_hv - K1 Si workaround
   16026  *  @sc:   pointer to the HW structure
   16027  *  @link: link up bool flag
   16028  *
   16029  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   16030  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   16031  *  If link is down, the function will restore the default K1 setting located
   16032  *  in the NVM.
   16033  */
   16034 static int
   16035 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   16036 {
   16037 	int k1_enable = sc->sc_nvm_k1_enabled;
   16038 
   16039 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16040 		device_xname(sc->sc_dev), __func__));
   16041 
   16042 	if (sc->phy.acquire(sc) != 0)
   16043 		return -1;
   16044 
   16045 	if (link) {
   16046 		k1_enable = 0;
   16047 
   16048 		/* Link stall fix for link up */
   16049 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16050 		    0x0100);
   16051 	} else {
   16052 		/* Link stall fix for link down */
   16053 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16054 		    0x4100);
   16055 	}
   16056 
   16057 	wm_configure_k1_ich8lan(sc, k1_enable);
   16058 	sc->phy.release(sc);
   16059 
   16060 	return 0;
   16061 }
   16062 
   16063 /*
   16064  *  wm_k1_workaround_lv - K1 Si workaround
   16065  *  @sc:   pointer to the HW structure
   16066  *
   16067  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   16068  *  Disable K1 for 1000 and 100 speeds
   16069  */
   16070 static int
   16071 wm_k1_workaround_lv(struct wm_softc *sc)
   16072 {
   16073 	uint32_t reg;
   16074 	uint16_t phyreg;
   16075 	int rv;
   16076 
   16077 	if (sc->sc_type != WM_T_PCH2)
   16078 		return 0;
   16079 
   16080 	/* Set K1 beacon duration based on 10Mbps speed */
   16081 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   16082 	if (rv != 0)
   16083 		return rv;
   16084 
   16085 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   16086 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   16087 		if (phyreg &
   16088 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   16089 			/* LV 1G/100 Packet drop issue wa  */
   16090 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   16091 			    &phyreg);
   16092 			if (rv != 0)
   16093 				return rv;
   16094 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   16095 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   16096 			    phyreg);
   16097 			if (rv != 0)
   16098 				return rv;
   16099 		} else {
   16100 			/* For 10Mbps */
   16101 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   16102 			reg &= ~FEXTNVM4_BEACON_DURATION;
   16103 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   16104 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   16105 		}
   16106 	}
   16107 
   16108 	return 0;
   16109 }
   16110 
   16111 /*
   16112  *  wm_link_stall_workaround_hv - Si workaround
   16113  *  @sc: pointer to the HW structure
   16114  *
   16115  *  This function works around a Si bug where the link partner can get
   16116  *  a link up indication before the PHY does. If small packets are sent
   16117  *  by the link partner they can be placed in the packet buffer without
   16118  *  being properly accounted for by the PHY and will stall preventing
   16119  *  further packets from being received.  The workaround is to clear the
   16120  *  packet buffer after the PHY detects link up.
   16121  */
   16122 static int
   16123 wm_link_stall_workaround_hv(struct wm_softc *sc)
   16124 {
   16125 	uint16_t phyreg;
   16126 
   16127 	if (sc->sc_phytype != WMPHY_82578)
   16128 		return 0;
   16129 
   16130 	/* Do not apply workaround if in PHY loopback bit 14 set */
   16131 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   16132 	if ((phyreg & BMCR_LOOP) != 0)
   16133 		return 0;
   16134 
   16135 	/* Check if link is up and at 1Gbps */
   16136 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   16137 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16138 	    | BM_CS_STATUS_SPEED_MASK;
   16139 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16140 		| BM_CS_STATUS_SPEED_1000))
   16141 		return 0;
   16142 
   16143 	delay(200 * 1000);	/* XXX too big */
   16144 
   16145 	/* Flush the packets in the fifo buffer */
   16146 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16147 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   16148 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16149 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   16150 
   16151 	return 0;
   16152 }
   16153 
   16154 static int
   16155 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   16156 {
   16157 	int rv;
   16158 	uint16_t reg;
   16159 
   16160 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   16161 	if (rv != 0)
   16162 		return rv;
   16163 
   16164 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   16165 	    reg | HV_KMRN_MDIO_SLOW);
   16166 }
   16167 
   16168 /*
   16169  *  wm_configure_k1_ich8lan - Configure K1 power state
   16170  *  @sc: pointer to the HW structure
   16171  *  @enable: K1 state to configure
   16172  *
   16173  *  Configure the K1 power state based on the provided parameter.
   16174  *  Assumes semaphore already acquired.
   16175  */
   16176 static void
   16177 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   16178 {
   16179 	uint32_t ctrl, ctrl_ext, tmp;
   16180 	uint16_t kmreg;
   16181 	int rv;
   16182 
   16183 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16184 
   16185 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   16186 	if (rv != 0)
   16187 		return;
   16188 
   16189 	if (k1_enable)
   16190 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   16191 	else
   16192 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   16193 
   16194 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   16195 	if (rv != 0)
   16196 		return;
   16197 
   16198 	delay(20);
   16199 
   16200 	ctrl = CSR_READ(sc, WMREG_CTRL);
   16201 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   16202 
   16203 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   16204 	tmp |= CTRL_FRCSPD;
   16205 
   16206 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   16207 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   16208 	CSR_WRITE_FLUSH(sc);
   16209 	delay(20);
   16210 
   16211 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   16212 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   16213 	CSR_WRITE_FLUSH(sc);
   16214 	delay(20);
   16215 
   16216 	return;
   16217 }
   16218 
   16219 /* special case - for 82575 - need to do manual init ... */
   16220 static void
   16221 wm_reset_init_script_82575(struct wm_softc *sc)
   16222 {
   16223 	/*
   16224 	 * Remark: this is untested code - we have no board without EEPROM
   16225 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   16226 	 */
   16227 
   16228 	/* SerDes configuration via SERDESCTRL */
   16229 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   16230 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   16231 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   16232 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   16233 
   16234 	/* CCM configuration via CCMCTL register */
   16235 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   16236 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   16237 
   16238 	/* PCIe lanes configuration */
   16239 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   16240 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   16241 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   16242 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   16243 
   16244 	/* PCIe PLL Configuration */
   16245 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   16246 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   16247 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   16248 }
   16249 
   16250 static void
   16251 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   16252 {
   16253 	uint32_t reg;
   16254 	uint16_t nvmword;
   16255 	int rv;
   16256 
   16257 	if (sc->sc_type != WM_T_82580)
   16258 		return;
   16259 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   16260 		return;
   16261 
   16262 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   16263 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   16264 	if (rv != 0) {
   16265 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   16266 		    __func__);
   16267 		return;
   16268 	}
   16269 
   16270 	reg = CSR_READ(sc, WMREG_MDICNFG);
   16271 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   16272 		reg |= MDICNFG_DEST;
   16273 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   16274 		reg |= MDICNFG_COM_MDIO;
   16275 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16276 }
   16277 
   16278 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   16279 
   16280 static bool
   16281 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   16282 {
   16283 	uint32_t reg;
   16284 	uint16_t id1, id2;
   16285 	int i, rv;
   16286 
   16287 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16288 		device_xname(sc->sc_dev), __func__));
   16289 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16290 
   16291 	id1 = id2 = 0xffff;
   16292 	for (i = 0; i < 2; i++) {
   16293 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   16294 		    &id1);
   16295 		if ((rv != 0) || MII_INVALIDID(id1))
   16296 			continue;
   16297 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   16298 		    &id2);
   16299 		if ((rv != 0) || MII_INVALIDID(id2))
   16300 			continue;
   16301 		break;
   16302 	}
   16303 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   16304 		goto out;
   16305 
   16306 	/*
   16307 	 * In case the PHY needs to be in mdio slow mode,
   16308 	 * set slow mode and try to get the PHY id again.
   16309 	 */
   16310 	rv = 0;
   16311 	if (sc->sc_type < WM_T_PCH_LPT) {
   16312 		sc->phy.release(sc);
   16313 		wm_set_mdio_slow_mode_hv(sc);
   16314 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   16315 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   16316 		sc->phy.acquire(sc);
   16317 	}
   16318 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   16319 		device_printf(sc->sc_dev, "XXX return with false\n");
   16320 		return false;
   16321 	}
   16322 out:
   16323 	if (sc->sc_type >= WM_T_PCH_LPT) {
   16324 		/* Only unforce SMBus if ME is not active */
   16325 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16326 			uint16_t phyreg;
   16327 
   16328 			/* Unforce SMBus mode in PHY */
   16329 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   16330 			    CV_SMB_CTRL, &phyreg);
   16331 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16332 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16333 			    CV_SMB_CTRL, phyreg);
   16334 
   16335 			/* Unforce SMBus mode in MAC */
   16336 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16337 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16338 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16339 		}
   16340 	}
   16341 	return true;
   16342 }
   16343 
   16344 static void
   16345 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16346 {
   16347 	uint32_t reg;
   16348 	int i;
   16349 
   16350 	/* Set PHY Config Counter to 50msec */
   16351 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16352 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16353 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16354 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16355 
   16356 	/* Toggle LANPHYPC */
   16357 	reg = CSR_READ(sc, WMREG_CTRL);
   16358 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16359 	reg &= ~CTRL_LANPHYPC_VALUE;
   16360 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16361 	CSR_WRITE_FLUSH(sc);
   16362 	delay(1000);
   16363 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16364 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16365 	CSR_WRITE_FLUSH(sc);
   16366 
   16367 	if (sc->sc_type < WM_T_PCH_LPT)
   16368 		delay(50 * 1000);
   16369 	else {
   16370 		i = 20;
   16371 
   16372 		do {
   16373 			delay(5 * 1000);
   16374 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16375 		    && i--);
   16376 
   16377 		delay(30 * 1000);
   16378 	}
   16379 }
   16380 
   16381 static int
   16382 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16383 {
   16384 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16385 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16386 	uint32_t rxa;
   16387 	uint16_t scale = 0, lat_enc = 0;
   16388 	int32_t obff_hwm = 0;
   16389 	int64_t lat_ns, value;
   16390 
   16391 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16392 		device_xname(sc->sc_dev), __func__));
   16393 
   16394 	if (link) {
   16395 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16396 		uint32_t status;
   16397 		uint16_t speed;
   16398 		pcireg_t preg;
   16399 
   16400 		status = CSR_READ(sc, WMREG_STATUS);
   16401 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16402 		case STATUS_SPEED_10:
   16403 			speed = 10;
   16404 			break;
   16405 		case STATUS_SPEED_100:
   16406 			speed = 100;
   16407 			break;
   16408 		case STATUS_SPEED_1000:
   16409 			speed = 1000;
   16410 			break;
   16411 		default:
   16412 			device_printf(sc->sc_dev, "Unknown speed "
   16413 			    "(status = %08x)\n", status);
   16414 			return -1;
   16415 		}
   16416 
   16417 		/* Rx Packet Buffer Allocation size (KB) */
   16418 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16419 
   16420 		/*
   16421 		 * Determine the maximum latency tolerated by the device.
   16422 		 *
   16423 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16424 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16425 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16426 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16427 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16428 		 */
   16429 		lat_ns = ((int64_t)rxa * 1024 -
   16430 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16431 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16432 		if (lat_ns < 0)
   16433 			lat_ns = 0;
   16434 		else
   16435 			lat_ns /= speed;
   16436 		value = lat_ns;
   16437 
   16438 		while (value > LTRV_VALUE) {
   16439 			scale ++;
   16440 			value = howmany(value, __BIT(5));
   16441 		}
   16442 		if (scale > LTRV_SCALE_MAX) {
   16443 			device_printf(sc->sc_dev,
   16444 			    "Invalid LTR latency scale %d\n", scale);
   16445 			return -1;
   16446 		}
   16447 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16448 
   16449 		/* Determine the maximum latency tolerated by the platform */
   16450 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16451 		    WM_PCI_LTR_CAP_LPT);
   16452 		max_snoop = preg & 0xffff;
   16453 		max_nosnoop = preg >> 16;
   16454 
   16455 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16456 
   16457 		if (lat_enc > max_ltr_enc) {
   16458 			lat_enc = max_ltr_enc;
   16459 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16460 			    * PCI_LTR_SCALETONS(
   16461 				    __SHIFTOUT(lat_enc,
   16462 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16463 		}
   16464 
   16465 		if (lat_ns) {
   16466 			lat_ns *= speed * 1000;
   16467 			lat_ns /= 8;
   16468 			lat_ns /= 1000000000;
   16469 			obff_hwm = (int32_t)(rxa - lat_ns);
   16470 		}
   16471 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16472 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16473 			    "(rxa = %d, lat_ns = %d)\n",
   16474 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16475 			return -1;
   16476 		}
   16477 	}
   16478 	/* Snoop and No-Snoop latencies the same */
   16479 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16480 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16481 
   16482 	/* Set OBFF high water mark */
   16483 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16484 	reg |= obff_hwm;
   16485 	CSR_WRITE(sc, WMREG_SVT, reg);
   16486 
   16487 	/* Enable OBFF */
   16488 	reg = CSR_READ(sc, WMREG_SVCR);
   16489 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16490 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16491 
   16492 	return 0;
   16493 }
   16494 
   16495 /*
   16496  * I210 Errata 25 and I211 Errata 10
   16497  * Slow System Clock.
   16498  *
   16499  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   16500  */
   16501 static int
   16502 wm_pll_workaround_i210(struct wm_softc *sc)
   16503 {
   16504 	uint32_t mdicnfg, wuc;
   16505 	uint32_t reg;
   16506 	pcireg_t pcireg;
   16507 	uint32_t pmreg;
   16508 	uint16_t nvmword, tmp_nvmword;
   16509 	uint16_t phyval;
   16510 	bool wa_done = false;
   16511 	int i, rv = 0;
   16512 
   16513 	/* Get Power Management cap offset */
   16514 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16515 	    &pmreg, NULL) == 0)
   16516 		return -1;
   16517 
   16518 	/* Save WUC and MDICNFG registers */
   16519 	wuc = CSR_READ(sc, WMREG_WUC);
   16520 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16521 
   16522 	reg = mdicnfg & ~MDICNFG_DEST;
   16523 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16524 
   16525 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   16526 		/*
   16527 		 * The default value of the Initialization Control Word 1
   16528 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   16529 		 */
   16530 		nvmword = INVM_DEFAULT_AL;
   16531 	}
   16532 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16533 
   16534 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16535 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16536 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16537 
   16538 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16539 			rv = 0;
   16540 			break; /* OK */
   16541 		} else
   16542 			rv = -1;
   16543 
   16544 		wa_done = true;
   16545 		/* Directly reset the internal PHY */
   16546 		reg = CSR_READ(sc, WMREG_CTRL);
   16547 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16548 
   16549 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16550 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16551 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16552 
   16553 		CSR_WRITE(sc, WMREG_WUC, 0);
   16554 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16555 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16556 
   16557 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16558 		    pmreg + PCI_PMCSR);
   16559 		pcireg |= PCI_PMCSR_STATE_D3;
   16560 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16561 		    pmreg + PCI_PMCSR, pcireg);
   16562 		delay(1000);
   16563 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16564 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16565 		    pmreg + PCI_PMCSR, pcireg);
   16566 
   16567 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16568 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16569 
   16570 		/* Restore WUC register */
   16571 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16572 	}
   16573 
   16574 	/* Restore MDICNFG setting */
   16575 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16576 	if (wa_done)
   16577 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16578 	return rv;
   16579 }
   16580 
   16581 static void
   16582 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16583 {
   16584 	uint32_t reg;
   16585 
   16586 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16587 		device_xname(sc->sc_dev), __func__));
   16588 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16589 	    || (sc->sc_type == WM_T_PCH_CNP));
   16590 
   16591 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16592 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16593 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16594 
   16595 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16596 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16597 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16598 }
   16599