Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.687
      1 /*	$NetBSD: if_wm.c,v 1.687 2020/09/15 08:39:04 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.687 2020/09/15 08:39:04 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/workqueue.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <net/rss_config.h>
    121 
    122 #include <netinet/in.h>			/* XXX for struct ip */
    123 #include <netinet/in_systm.h>		/* XXX for struct ip */
    124 #include <netinet/ip.h>			/* XXX for struct ip */
    125 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    126 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    127 
    128 #include <sys/bus.h>
    129 #include <sys/intr.h>
    130 #include <machine/endian.h>
    131 
    132 #include <dev/mii/mii.h>
    133 #include <dev/mii/mdio.h>
    134 #include <dev/mii/miivar.h>
    135 #include <dev/mii/miidevs.h>
    136 #include <dev/mii/mii_bitbang.h>
    137 #include <dev/mii/ikphyreg.h>
    138 #include <dev/mii/igphyreg.h>
    139 #include <dev/mii/igphyvar.h>
    140 #include <dev/mii/inbmphyreg.h>
    141 #include <dev/mii/ihphyreg.h>
    142 #include <dev/mii/makphyreg.h>
    143 
    144 #include <dev/pci/pcireg.h>
    145 #include <dev/pci/pcivar.h>
    146 #include <dev/pci/pcidevs.h>
    147 
    148 #include <dev/pci/if_wmreg.h>
    149 #include <dev/pci/if_wmvar.h>
    150 
    151 #ifdef WM_DEBUG
    152 #define	WM_DEBUG_LINK		__BIT(0)
    153 #define	WM_DEBUG_TX		__BIT(1)
    154 #define	WM_DEBUG_RX		__BIT(2)
    155 #define	WM_DEBUG_GMII		__BIT(3)
    156 #define	WM_DEBUG_MANAGE		__BIT(4)
    157 #define	WM_DEBUG_NVM		__BIT(5)
    158 #define	WM_DEBUG_INIT		__BIT(6)
    159 #define	WM_DEBUG_LOCK		__BIT(7)
    160 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    161     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    162 #define	DPRINTF(x, y)	do { if (wm_debug & (x)) printf y; } while (0)
    163 #else
    164 #define	DPRINTF(x, y)	__nothing
    165 #endif /* WM_DEBUG */
    166 
    167 #ifdef NET_MPSAFE
    168 #define WM_MPSAFE	1
    169 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
    170 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
    171 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    172 #else
    173 #define WM_CALLOUT_FLAGS	0
    174 #define WM_SOFTINT_FLAGS	0
    175 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    176 #endif
    177 
    178 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    179 
    180 /*
    181  * This device driver's max interrupt numbers.
    182  */
    183 #define WM_MAX_NQUEUEINTR	16
    184 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    185 
    186 #ifndef WM_DISABLE_MSI
    187 #define	WM_DISABLE_MSI 0
    188 #endif
    189 #ifndef WM_DISABLE_MSIX
    190 #define	WM_DISABLE_MSIX 0
    191 #endif
    192 
    193 int wm_disable_msi = WM_DISABLE_MSI;
    194 int wm_disable_msix = WM_DISABLE_MSIX;
    195 
    196 #ifndef WM_WATCHDOG_TIMEOUT
    197 #define WM_WATCHDOG_TIMEOUT 5
    198 #endif
    199 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    200 
    201 /*
    202  * Transmit descriptor list size.  Due to errata, we can only have
    203  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    204  * on >= 82544. We tell the upper layers that they can queue a lot
    205  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    206  * of them at a time.
    207  *
    208  * We allow up to 64 DMA segments per packet.  Pathological packet
    209  * chains containing many small mbufs have been observed in zero-copy
    210  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    211  * m_defrag() is called to reduce it.
    212  */
    213 #define	WM_NTXSEGS		64
    214 #define	WM_IFQUEUELEN		256
    215 #define	WM_TXQUEUELEN_MAX	64
    216 #define	WM_TXQUEUELEN_MAX_82547	16
    217 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    218 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    219 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    220 #define	WM_NTXDESC_82542	256
    221 #define	WM_NTXDESC_82544	4096
    222 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    223 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    224 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    225 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    226 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    227 
    228 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    229 
    230 #define	WM_TXINTERQSIZE		256
    231 
    232 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    233 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    234 #endif
    235 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    236 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    237 #endif
    238 
    239 /*
    240  * Receive descriptor list size.  We have one Rx buffer for normal
    241  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    242  * packet.  We allocate 256 receive descriptors, each with a 2k
    243  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    244  */
    245 #define	WM_NRXDESC		256U
    246 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    247 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    248 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    249 
    250 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    251 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    252 #endif
    253 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    254 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    255 #endif
    256 
    257 typedef union txdescs {
    258 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    259 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    260 } txdescs_t;
    261 
    262 typedef union rxdescs {
    263 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    264 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    265 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    266 } rxdescs_t;
    267 
    268 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    269 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    270 
    271 /*
    272  * Software state for transmit jobs.
    273  */
    274 struct wm_txsoft {
    275 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    276 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    277 	int txs_firstdesc;		/* first descriptor in packet */
    278 	int txs_lastdesc;		/* last descriptor in packet */
    279 	int txs_ndesc;			/* # of descriptors used */
    280 };
    281 
    282 /*
    283  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    284  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    285  * them together.
    286  */
    287 struct wm_rxsoft {
    288 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    289 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    290 };
    291 
    292 #define WM_LINKUP_TIMEOUT	50
    293 
    294 static uint16_t swfwphysem[] = {
    295 	SWFW_PHY0_SM,
    296 	SWFW_PHY1_SM,
    297 	SWFW_PHY2_SM,
    298 	SWFW_PHY3_SM
    299 };
    300 
    301 static const uint32_t wm_82580_rxpbs_table[] = {
    302 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    303 };
    304 
    305 struct wm_softc;
    306 
    307 #ifdef WM_EVENT_COUNTERS
    308 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    309 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    310 	struct evcnt qname##_ev_##evname;
    311 
    312 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    313 	do {								\
    314 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    315 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    316 		    "%s%02d%s", #qname, (qnum), #evname);		\
    317 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    318 		    (evtype), NULL, (xname),				\
    319 		    (q)->qname##_##evname##_evcnt_name);		\
    320 	} while (0)
    321 
    322 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    323 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    324 
    325 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    326 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    327 
    328 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    329 	evcnt_detach(&(q)->qname##_ev_##evname);
    330 #endif /* WM_EVENT_COUNTERS */
    331 
    332 struct wm_txqueue {
    333 	kmutex_t *txq_lock;		/* lock for tx operations */
    334 
    335 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    336 
    337 	/* Software state for the transmit descriptors. */
    338 	int txq_num;			/* must be a power of two */
    339 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    340 
    341 	/* TX control data structures. */
    342 	int txq_ndesc;			/* must be a power of two */
    343 	size_t txq_descsize;		/* a tx descriptor size */
    344 	txdescs_t *txq_descs_u;
    345 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    346 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    347 	int txq_desc_rseg;		/* real number of control segment */
    348 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    349 #define	txq_descs	txq_descs_u->sctxu_txdescs
    350 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    351 
    352 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    353 
    354 	int txq_free;			/* number of free Tx descriptors */
    355 	int txq_next;			/* next ready Tx descriptor */
    356 
    357 	int txq_sfree;			/* number of free Tx jobs */
    358 	int txq_snext;			/* next free Tx job */
    359 	int txq_sdirty;			/* dirty Tx jobs */
    360 
    361 	/* These 4 variables are used only on the 82547. */
    362 	int txq_fifo_size;		/* Tx FIFO size */
    363 	int txq_fifo_head;		/* current head of FIFO */
    364 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    365 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    366 
    367 	/*
    368 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    369 	 * CPUs. This queue intermediate them without block.
    370 	 */
    371 	pcq_t *txq_interq;
    372 
    373 	/*
    374 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    375 	 * to manage Tx H/W queue's busy flag.
    376 	 */
    377 	int txq_flags;			/* flags for H/W queue, see below */
    378 #define	WM_TXQ_NO_SPACE	0x1
    379 
    380 	bool txq_stopping;
    381 
    382 	bool txq_sending;
    383 	time_t txq_lastsent;
    384 
    385 	/* Checksum flags used for previous packet */
    386 	uint32_t 	txq_last_hw_cmd;
    387 	uint8_t 	txq_last_hw_fields;
    388 	uint16_t	txq_last_hw_ipcs;
    389 	uint16_t	txq_last_hw_tucs;
    390 
    391 	uint32_t txq_packets;		/* for AIM */
    392 	uint32_t txq_bytes;		/* for AIM */
    393 #ifdef WM_EVENT_COUNTERS
    394 	/* TX event counters */
    395 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    396 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    397 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    398 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    399 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    400 					    /* XXX not used? */
    401 
    402 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    403 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    404 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    405 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    406 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    407 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    408 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    409 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    410 					    /* other than toomanyseg */
    411 
    412 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    413 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    414 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    415 	WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
    416 
    417 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    418 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    419 #endif /* WM_EVENT_COUNTERS */
    420 };
    421 
    422 struct wm_rxqueue {
    423 	kmutex_t *rxq_lock;		/* lock for rx operations */
    424 
    425 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    426 
    427 	/* Software state for the receive descriptors. */
    428 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    429 
    430 	/* RX control data structures. */
    431 	int rxq_ndesc;			/* must be a power of two */
    432 	size_t rxq_descsize;		/* a rx descriptor size */
    433 	rxdescs_t *rxq_descs_u;
    434 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    435 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    436 	int rxq_desc_rseg;		/* real number of control segment */
    437 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    438 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    439 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    440 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    441 
    442 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    443 
    444 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    445 	int rxq_discard;
    446 	int rxq_len;
    447 	struct mbuf *rxq_head;
    448 	struct mbuf *rxq_tail;
    449 	struct mbuf **rxq_tailp;
    450 
    451 	bool rxq_stopping;
    452 
    453 	uint32_t rxq_packets;		/* for AIM */
    454 	uint32_t rxq_bytes;		/* for AIM */
    455 #ifdef WM_EVENT_COUNTERS
    456 	/* RX event counters */
    457 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    458 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    459 
    460 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    461 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    462 #endif
    463 };
    464 
    465 struct wm_queue {
    466 	int wmq_id;			/* index of TX/RX queues */
    467 	int wmq_intr_idx;		/* index of MSI-X tables */
    468 
    469 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    470 	bool wmq_set_itr;
    471 
    472 	struct wm_txqueue wmq_txq;
    473 	struct wm_rxqueue wmq_rxq;
    474 
    475 	bool wmq_txrx_use_workqueue;
    476 	struct work wmq_cookie;
    477 	void *wmq_si;
    478 };
    479 
    480 struct wm_phyop {
    481 	int (*acquire)(struct wm_softc *);
    482 	void (*release)(struct wm_softc *);
    483 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    484 	int (*writereg_locked)(device_t, int, int, uint16_t);
    485 	int reset_delay_us;
    486 	bool no_errprint;
    487 };
    488 
    489 struct wm_nvmop {
    490 	int (*acquire)(struct wm_softc *);
    491 	void (*release)(struct wm_softc *);
    492 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    493 };
    494 
    495 /*
    496  * Software state per device.
    497  */
    498 struct wm_softc {
    499 	device_t sc_dev;		/* generic device information */
    500 	bus_space_tag_t sc_st;		/* bus space tag */
    501 	bus_space_handle_t sc_sh;	/* bus space handle */
    502 	bus_size_t sc_ss;		/* bus space size */
    503 	bus_space_tag_t sc_iot;		/* I/O space tag */
    504 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    505 	bus_size_t sc_ios;		/* I/O space size */
    506 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    507 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    508 	bus_size_t sc_flashs;		/* flash registers space size */
    509 	off_t sc_flashreg_offset;	/*
    510 					 * offset to flash registers from
    511 					 * start of BAR
    512 					 */
    513 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    514 
    515 	struct ethercom sc_ethercom;	/* ethernet common data */
    516 	struct mii_data sc_mii;		/* MII/media information */
    517 
    518 	pci_chipset_tag_t sc_pc;
    519 	pcitag_t sc_pcitag;
    520 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    521 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    522 
    523 	uint16_t sc_pcidevid;		/* PCI device ID */
    524 	wm_chip_type sc_type;		/* MAC type */
    525 	int sc_rev;			/* MAC revision */
    526 	wm_phy_type sc_phytype;		/* PHY type */
    527 	uint8_t sc_sfptype;		/* SFP type */
    528 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    529 #define	WM_MEDIATYPE_UNKNOWN		0x00
    530 #define	WM_MEDIATYPE_FIBER		0x01
    531 #define	WM_MEDIATYPE_COPPER		0x02
    532 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    533 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    534 	int sc_flags;			/* flags; see below */
    535 	u_short sc_if_flags;		/* last if_flags */
    536 	int sc_ec_capenable;		/* last ec_capenable */
    537 	int sc_flowflags;		/* 802.3x flow control flags */
    538 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    539 	int sc_align_tweak;
    540 
    541 	void *sc_ihs[WM_MAX_NINTR];	/*
    542 					 * interrupt cookie.
    543 					 * - legacy and msi use sc_ihs[0] only
    544 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    545 					 */
    546 	pci_intr_handle_t *sc_intrs;	/*
    547 					 * legacy and msi use sc_intrs[0] only
    548 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    549 					 */
    550 	int sc_nintrs;			/* number of interrupts */
    551 
    552 	int sc_link_intr_idx;		/* index of MSI-X tables */
    553 
    554 	callout_t sc_tick_ch;		/* tick callout */
    555 	bool sc_core_stopping;
    556 
    557 	int sc_nvm_ver_major;
    558 	int sc_nvm_ver_minor;
    559 	int sc_nvm_ver_build;
    560 	int sc_nvm_addrbits;		/* NVM address bits */
    561 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    562 	int sc_ich8_flash_base;
    563 	int sc_ich8_flash_bank_size;
    564 	int sc_nvm_k1_enabled;
    565 
    566 	int sc_nqueues;
    567 	struct wm_queue *sc_queue;
    568 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    569 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    570 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    571 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    572 	struct workqueue *sc_queue_wq;
    573 	bool sc_txrx_use_workqueue;
    574 
    575 	int sc_affinity_offset;
    576 
    577 #ifdef WM_EVENT_COUNTERS
    578 	/* Event counters. */
    579 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    580 
    581 	/* WM_T_82542_2_1 only */
    582 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    583 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    584 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    585 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    586 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    587 #endif /* WM_EVENT_COUNTERS */
    588 
    589 	struct sysctllog *sc_sysctllog;
    590 
    591 	/* This variable are used only on the 82547. */
    592 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    593 
    594 	uint32_t sc_ctrl;		/* prototype CTRL register */
    595 #if 0
    596 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    597 #endif
    598 	uint32_t sc_icr;		/* prototype interrupt bits */
    599 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    600 	uint32_t sc_tctl;		/* prototype TCTL register */
    601 	uint32_t sc_rctl;		/* prototype RCTL register */
    602 	uint32_t sc_txcw;		/* prototype TXCW register */
    603 	uint32_t sc_tipg;		/* prototype TIPG register */
    604 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    605 	uint32_t sc_pba;		/* prototype PBA register */
    606 
    607 	int sc_tbi_linkup;		/* TBI link status */
    608 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    609 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    610 
    611 	int sc_mchash_type;		/* multicast filter offset */
    612 
    613 	krndsource_t rnd_source;	/* random source */
    614 
    615 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    616 
    617 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    618 	kmutex_t *sc_ich_phymtx;	/*
    619 					 * 82574/82583/ICH/PCH specific PHY
    620 					 * mutex. For 82574/82583, the mutex
    621 					 * is used for both PHY and NVM.
    622 					 */
    623 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    624 
    625 	struct wm_phyop phy;
    626 	struct wm_nvmop nvm;
    627 };
    628 
    629 #define WM_CORE_LOCK(_sc)						\
    630 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    631 #define WM_CORE_UNLOCK(_sc)						\
    632 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    633 #define WM_CORE_LOCKED(_sc)						\
    634 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    635 
    636 #define	WM_RXCHAIN_RESET(rxq)						\
    637 do {									\
    638 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    639 	*(rxq)->rxq_tailp = NULL;					\
    640 	(rxq)->rxq_len = 0;						\
    641 } while (/*CONSTCOND*/0)
    642 
    643 #define	WM_RXCHAIN_LINK(rxq, m)						\
    644 do {									\
    645 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    646 	(rxq)->rxq_tailp = &(m)->m_next;				\
    647 } while (/*CONSTCOND*/0)
    648 
    649 #ifdef WM_EVENT_COUNTERS
    650 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    651 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    652 
    653 #define WM_Q_EVCNT_INCR(qname, evname)			\
    654 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    655 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    656 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    657 #else /* !WM_EVENT_COUNTERS */
    658 #define	WM_EVCNT_INCR(ev)	/* nothing */
    659 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    660 
    661 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    662 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    663 #endif /* !WM_EVENT_COUNTERS */
    664 
    665 #define	CSR_READ(sc, reg)						\
    666 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    667 #define	CSR_WRITE(sc, reg, val)						\
    668 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    669 #define	CSR_WRITE_FLUSH(sc)						\
    670 	(void)CSR_READ((sc), WMREG_STATUS)
    671 
    672 #define ICH8_FLASH_READ32(sc, reg)					\
    673 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    674 	    (reg) + sc->sc_flashreg_offset)
    675 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    676 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    677 	    (reg) + sc->sc_flashreg_offset, (data))
    678 
    679 #define ICH8_FLASH_READ16(sc, reg)					\
    680 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    681 	    (reg) + sc->sc_flashreg_offset)
    682 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    683 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    684 	    (reg) + sc->sc_flashreg_offset, (data))
    685 
    686 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    687 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    688 
    689 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    690 #define	WM_CDTXADDR_HI(txq, x)						\
    691 	(sizeof(bus_addr_t) == 8 ?					\
    692 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    693 
    694 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    695 #define	WM_CDRXADDR_HI(rxq, x)						\
    696 	(sizeof(bus_addr_t) == 8 ?					\
    697 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    698 
    699 /*
    700  * Register read/write functions.
    701  * Other than CSR_{READ|WRITE}().
    702  */
    703 #if 0
    704 static inline uint32_t wm_io_read(struct wm_softc *, int);
    705 #endif
    706 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    707 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    708     uint32_t, uint32_t);
    709 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    710 
    711 /*
    712  * Descriptor sync/init functions.
    713  */
    714 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    715 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    716 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    717 
    718 /*
    719  * Device driver interface functions and commonly used functions.
    720  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    721  */
    722 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    723 static int	wm_match(device_t, cfdata_t, void *);
    724 static void	wm_attach(device_t, device_t, void *);
    725 static int	wm_detach(device_t, int);
    726 static bool	wm_suspend(device_t, const pmf_qual_t *);
    727 static bool	wm_resume(device_t, const pmf_qual_t *);
    728 static void	wm_watchdog(struct ifnet *);
    729 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    730     uint16_t *);
    731 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    732     uint16_t *);
    733 static void	wm_tick(void *);
    734 static int	wm_ifflags_cb(struct ethercom *);
    735 static int	wm_ioctl(struct ifnet *, u_long, void *);
    736 /* MAC address related */
    737 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    738 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    739 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    740 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    741 static int	wm_rar_count(struct wm_softc *);
    742 static void	wm_set_filter(struct wm_softc *);
    743 /* Reset and init related */
    744 static void	wm_set_vlan(struct wm_softc *);
    745 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    746 static void	wm_get_auto_rd_done(struct wm_softc *);
    747 static void	wm_lan_init_done(struct wm_softc *);
    748 static void	wm_get_cfg_done(struct wm_softc *);
    749 static int	wm_phy_post_reset(struct wm_softc *);
    750 static int	wm_write_smbus_addr(struct wm_softc *);
    751 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    752 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    753 static void	wm_initialize_hardware_bits(struct wm_softc *);
    754 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    755 static int	wm_reset_phy(struct wm_softc *);
    756 static void	wm_flush_desc_rings(struct wm_softc *);
    757 static void	wm_reset(struct wm_softc *);
    758 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    759 static void	wm_rxdrain(struct wm_rxqueue *);
    760 static void	wm_init_rss(struct wm_softc *);
    761 static void	wm_adjust_qnum(struct wm_softc *, int);
    762 static inline bool	wm_is_using_msix(struct wm_softc *);
    763 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    764 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    765 static int	wm_setup_legacy(struct wm_softc *);
    766 static int	wm_setup_msix(struct wm_softc *);
    767 static int	wm_init(struct ifnet *);
    768 static int	wm_init_locked(struct ifnet *);
    769 static void	wm_init_sysctls(struct wm_softc *);
    770 static void	wm_unset_stopping_flags(struct wm_softc *);
    771 static void	wm_set_stopping_flags(struct wm_softc *);
    772 static void	wm_stop(struct ifnet *, int);
    773 static void	wm_stop_locked(struct ifnet *, bool, bool);
    774 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    775 static void	wm_82547_txfifo_stall(void *);
    776 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    777 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    778 /* DMA related */
    779 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    780 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    781 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    782 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    783     struct wm_txqueue *);
    784 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    785 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    786 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    787     struct wm_rxqueue *);
    788 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    789 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    790 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    791 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    792 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    793 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    794 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    795     struct wm_txqueue *);
    796 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    797     struct wm_rxqueue *);
    798 static int	wm_alloc_txrx_queues(struct wm_softc *);
    799 static void	wm_free_txrx_queues(struct wm_softc *);
    800 static int	wm_init_txrx_queues(struct wm_softc *);
    801 /* Start */
    802 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    803     struct wm_txsoft *, uint32_t *, uint8_t *);
    804 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    805 static void	wm_start(struct ifnet *);
    806 static void	wm_start_locked(struct ifnet *);
    807 static int	wm_transmit(struct ifnet *, struct mbuf *);
    808 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    809 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    810 		    bool);
    811 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    812     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    813 static void	wm_nq_start(struct ifnet *);
    814 static void	wm_nq_start_locked(struct ifnet *);
    815 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    816 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    817 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    818 		    bool);
    819 static void	wm_deferred_start_locked(struct wm_txqueue *);
    820 static void	wm_handle_queue(void *);
    821 static void	wm_handle_queue_work(struct work *, void *);
    822 /* Interrupt */
    823 static bool	wm_txeof(struct wm_txqueue *, u_int);
    824 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    825 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    826 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    827 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    828 static void	wm_linkintr(struct wm_softc *, uint32_t);
    829 static int	wm_intr_legacy(void *);
    830 static inline void	wm_txrxintr_disable(struct wm_queue *);
    831 static inline void	wm_txrxintr_enable(struct wm_queue *);
    832 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    833 static int	wm_txrxintr_msix(void *);
    834 static int	wm_linkintr_msix(void *);
    835 
    836 /*
    837  * Media related.
    838  * GMII, SGMII, TBI, SERDES and SFP.
    839  */
    840 /* Common */
    841 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    842 /* GMII related */
    843 static void	wm_gmii_reset(struct wm_softc *);
    844 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    845 static int	wm_get_phy_id_82575(struct wm_softc *);
    846 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    847 static int	wm_gmii_mediachange(struct ifnet *);
    848 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    849 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    850 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    851 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    852 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    853 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    854 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    855 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    856 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    857 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    858 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    859 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    860 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    861 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    862 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    863 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    864 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    865 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    866 	bool);
    867 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    868 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    869 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    870 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    871 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    872 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    873 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    874 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    875 static void	wm_gmii_statchg(struct ifnet *);
    876 /*
    877  * kumeran related (80003, ICH* and PCH*).
    878  * These functions are not for accessing MII registers but for accessing
    879  * kumeran specific registers.
    880  */
    881 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    882 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    883 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    884 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    885 /* EMI register related */
    886 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    887 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    888 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    889 /* SGMII */
    890 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    891 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    892 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    893 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    894 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    895 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    896 /* TBI related */
    897 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    898 static void	wm_tbi_mediainit(struct wm_softc *);
    899 static int	wm_tbi_mediachange(struct ifnet *);
    900 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    901 static int	wm_check_for_link(struct wm_softc *);
    902 static void	wm_tbi_tick(struct wm_softc *);
    903 /* SERDES related */
    904 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    905 static int	wm_serdes_mediachange(struct ifnet *);
    906 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    907 static void	wm_serdes_tick(struct wm_softc *);
    908 /* SFP related */
    909 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    910 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    911 
    912 /*
    913  * NVM related.
    914  * Microwire, SPI (w/wo EERD) and Flash.
    915  */
    916 /* Misc functions */
    917 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    918 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    919 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    920 /* Microwire */
    921 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    922 /* SPI */
    923 static int	wm_nvm_ready_spi(struct wm_softc *);
    924 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    925 /* Using with EERD */
    926 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    927 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    928 /* Flash */
    929 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    930     unsigned int *);
    931 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    932 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    933 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    934     uint32_t *);
    935 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    936 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    937 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    938 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    939 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    940 /* iNVM */
    941 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    942 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    943 /* Lock, detecting NVM type, validate checksum and read */
    944 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    945 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    946 static int	wm_nvm_validate_checksum(struct wm_softc *);
    947 static void	wm_nvm_version_invm(struct wm_softc *);
    948 static void	wm_nvm_version(struct wm_softc *);
    949 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    950 
    951 /*
    952  * Hardware semaphores.
    953  * Very complexed...
    954  */
    955 static int	wm_get_null(struct wm_softc *);
    956 static void	wm_put_null(struct wm_softc *);
    957 static int	wm_get_eecd(struct wm_softc *);
    958 static void	wm_put_eecd(struct wm_softc *);
    959 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    960 static void	wm_put_swsm_semaphore(struct wm_softc *);
    961 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    962 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    963 static int	wm_get_nvm_80003(struct wm_softc *);
    964 static void	wm_put_nvm_80003(struct wm_softc *);
    965 static int	wm_get_nvm_82571(struct wm_softc *);
    966 static void	wm_put_nvm_82571(struct wm_softc *);
    967 static int	wm_get_phy_82575(struct wm_softc *);
    968 static void	wm_put_phy_82575(struct wm_softc *);
    969 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    970 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    971 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    972 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    973 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    974 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    975 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    976 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    977 
    978 /*
    979  * Management mode and power management related subroutines.
    980  * BMC, AMT, suspend/resume and EEE.
    981  */
    982 #if 0
    983 static int	wm_check_mng_mode(struct wm_softc *);
    984 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    985 static int	wm_check_mng_mode_82574(struct wm_softc *);
    986 static int	wm_check_mng_mode_generic(struct wm_softc *);
    987 #endif
    988 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    989 static bool	wm_phy_resetisblocked(struct wm_softc *);
    990 static void	wm_get_hw_control(struct wm_softc *);
    991 static void	wm_release_hw_control(struct wm_softc *);
    992 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    993 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    994 static void	wm_init_manageability(struct wm_softc *);
    995 static void	wm_release_manageability(struct wm_softc *);
    996 static void	wm_get_wakeup(struct wm_softc *);
    997 static int	wm_ulp_disable(struct wm_softc *);
    998 static int	wm_enable_phy_wakeup(struct wm_softc *);
    999 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1000 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1001 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1002 static void	wm_enable_wakeup(struct wm_softc *);
   1003 static void	wm_disable_aspm(struct wm_softc *);
   1004 /* LPLU (Low Power Link Up) */
   1005 static void	wm_lplu_d0_disable(struct wm_softc *);
   1006 /* EEE */
   1007 static int	wm_set_eee_i350(struct wm_softc *);
   1008 static int	wm_set_eee_pchlan(struct wm_softc *);
   1009 static int	wm_set_eee(struct wm_softc *);
   1010 
   1011 /*
   1012  * Workarounds (mainly PHY related).
   1013  * Basically, PHY's workarounds are in the PHY drivers.
   1014  */
   1015 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1016 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1017 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1018 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1019 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1020 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1021 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1022 static int	wm_k1_workaround_lv(struct wm_softc *);
   1023 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1024 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1025 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1026 static void	wm_reset_init_script_82575(struct wm_softc *);
   1027 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1028 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1029 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1030 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1031 static int	wm_pll_workaround_i210(struct wm_softc *);
   1032 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1033 
   1034 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1035     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1036 
   1037 /*
   1038  * Devices supported by this driver.
   1039  */
   1040 static const struct wm_product {
   1041 	pci_vendor_id_t		wmp_vendor;
   1042 	pci_product_id_t	wmp_product;
   1043 	const char		*wmp_name;
   1044 	wm_chip_type		wmp_type;
   1045 	uint32_t		wmp_flags;
   1046 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1047 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1048 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1049 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1050 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1051 } wm_products[] = {
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1053 	  "Intel i82542 1000BASE-X Ethernet",
   1054 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1057 	  "Intel i82543GC 1000BASE-X Ethernet",
   1058 	  WM_T_82543,		WMP_F_FIBER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1061 	  "Intel i82543GC 1000BASE-T Ethernet",
   1062 	  WM_T_82543,		WMP_F_COPPER },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1065 	  "Intel i82544EI 1000BASE-T Ethernet",
   1066 	  WM_T_82544,		WMP_F_COPPER },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1069 	  "Intel i82544EI 1000BASE-X Ethernet",
   1070 	  WM_T_82544,		WMP_F_FIBER },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1073 	  "Intel i82544GC 1000BASE-T Ethernet",
   1074 	  WM_T_82544,		WMP_F_COPPER },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1077 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1078 	  WM_T_82544,		WMP_F_COPPER },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1081 	  "Intel i82540EM 1000BASE-T Ethernet",
   1082 	  WM_T_82540,		WMP_F_COPPER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1085 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1086 	  WM_T_82540,		WMP_F_COPPER },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1089 	  "Intel i82540EP 1000BASE-T Ethernet",
   1090 	  WM_T_82540,		WMP_F_COPPER },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1093 	  "Intel i82540EP 1000BASE-T Ethernet",
   1094 	  WM_T_82540,		WMP_F_COPPER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1097 	  "Intel i82540EP 1000BASE-T Ethernet",
   1098 	  WM_T_82540,		WMP_F_COPPER },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1101 	  "Intel i82545EM 1000BASE-T Ethernet",
   1102 	  WM_T_82545,		WMP_F_COPPER },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1105 	  "Intel i82545GM 1000BASE-T Ethernet",
   1106 	  WM_T_82545_3,		WMP_F_COPPER },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1109 	  "Intel i82545GM 1000BASE-X Ethernet",
   1110 	  WM_T_82545_3,		WMP_F_FIBER },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1113 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1114 	  WM_T_82545_3,		WMP_F_SERDES },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1117 	  "Intel i82546EB 1000BASE-T Ethernet",
   1118 	  WM_T_82546,		WMP_F_COPPER },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1121 	  "Intel i82546EB 1000BASE-T Ethernet",
   1122 	  WM_T_82546,		WMP_F_COPPER },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1125 	  "Intel i82545EM 1000BASE-X Ethernet",
   1126 	  WM_T_82545,		WMP_F_FIBER },
   1127 
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1129 	  "Intel i82546EB 1000BASE-X Ethernet",
   1130 	  WM_T_82546,		WMP_F_FIBER },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1133 	  "Intel i82546GB 1000BASE-T Ethernet",
   1134 	  WM_T_82546_3,		WMP_F_COPPER },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1137 	  "Intel i82546GB 1000BASE-X Ethernet",
   1138 	  WM_T_82546_3,		WMP_F_FIBER },
   1139 
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1141 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1142 	  WM_T_82546_3,		WMP_F_SERDES },
   1143 
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1145 	  "i82546GB quad-port Gigabit Ethernet",
   1146 	  WM_T_82546_3,		WMP_F_COPPER },
   1147 
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1149 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1150 	  WM_T_82546_3,		WMP_F_COPPER },
   1151 
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1153 	  "Intel PRO/1000MT (82546GB)",
   1154 	  WM_T_82546_3,		WMP_F_COPPER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1157 	  "Intel i82541EI 1000BASE-T Ethernet",
   1158 	  WM_T_82541,		WMP_F_COPPER },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1161 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1162 	  WM_T_82541,		WMP_F_COPPER },
   1163 
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1165 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1166 	  WM_T_82541,		WMP_F_COPPER },
   1167 
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1169 	  "Intel i82541ER 1000BASE-T Ethernet",
   1170 	  WM_T_82541_2,		WMP_F_COPPER },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1173 	  "Intel i82541GI 1000BASE-T Ethernet",
   1174 	  WM_T_82541_2,		WMP_F_COPPER },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1177 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1178 	  WM_T_82541_2,		WMP_F_COPPER },
   1179 
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1181 	  "Intel i82541PI 1000BASE-T Ethernet",
   1182 	  WM_T_82541_2,		WMP_F_COPPER },
   1183 
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1185 	  "Intel i82547EI 1000BASE-T Ethernet",
   1186 	  WM_T_82547,		WMP_F_COPPER },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1189 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1190 	  WM_T_82547,		WMP_F_COPPER },
   1191 
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1193 	  "Intel i82547GI 1000BASE-T Ethernet",
   1194 	  WM_T_82547_2,		WMP_F_COPPER },
   1195 
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1197 	  "Intel PRO/1000 PT (82571EB)",
   1198 	  WM_T_82571,		WMP_F_COPPER },
   1199 
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1201 	  "Intel PRO/1000 PF (82571EB)",
   1202 	  WM_T_82571,		WMP_F_FIBER },
   1203 
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1205 	  "Intel PRO/1000 PB (82571EB)",
   1206 	  WM_T_82571,		WMP_F_SERDES },
   1207 
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1209 	  "Intel PRO/1000 QT (82571EB)",
   1210 	  WM_T_82571,		WMP_F_COPPER },
   1211 
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1213 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1214 	  WM_T_82571,		WMP_F_COPPER },
   1215 
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1217 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1218 	  WM_T_82571,		WMP_F_COPPER },
   1219 
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1221 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1222 	  WM_T_82571,		WMP_F_SERDES },
   1223 
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1225 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1226 	  WM_T_82571,		WMP_F_SERDES },
   1227 
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1229 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1230 	  WM_T_82571,		WMP_F_FIBER },
   1231 
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1233 	  "Intel i82572EI 1000baseT Ethernet",
   1234 	  WM_T_82572,		WMP_F_COPPER },
   1235 
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1237 	  "Intel i82572EI 1000baseX Ethernet",
   1238 	  WM_T_82572,		WMP_F_FIBER },
   1239 
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1241 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1242 	  WM_T_82572,		WMP_F_SERDES },
   1243 
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1245 	  "Intel i82572EI 1000baseT Ethernet",
   1246 	  WM_T_82572,		WMP_F_COPPER },
   1247 
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1249 	  "Intel i82573E",
   1250 	  WM_T_82573,		WMP_F_COPPER },
   1251 
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1253 	  "Intel i82573E IAMT",
   1254 	  WM_T_82573,		WMP_F_COPPER },
   1255 
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1257 	  "Intel i82573L Gigabit Ethernet",
   1258 	  WM_T_82573,		WMP_F_COPPER },
   1259 
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1261 	  "Intel i82574L",
   1262 	  WM_T_82574,		WMP_F_COPPER },
   1263 
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1265 	  "Intel i82574L",
   1266 	  WM_T_82574,		WMP_F_COPPER },
   1267 
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1269 	  "Intel i82583V",
   1270 	  WM_T_82583,		WMP_F_COPPER },
   1271 
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1273 	  "i80003 dual 1000baseT Ethernet",
   1274 	  WM_T_80003,		WMP_F_COPPER },
   1275 
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1277 	  "i80003 dual 1000baseX Ethernet",
   1278 	  WM_T_80003,		WMP_F_COPPER },
   1279 
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1281 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1282 	  WM_T_80003,		WMP_F_SERDES },
   1283 
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1285 	  "Intel i80003 1000baseT Ethernet",
   1286 	  WM_T_80003,		WMP_F_COPPER },
   1287 
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1289 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1290 	  WM_T_80003,		WMP_F_SERDES },
   1291 
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1293 	  "Intel i82801H (M_AMT) LAN Controller",
   1294 	  WM_T_ICH8,		WMP_F_COPPER },
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1296 	  "Intel i82801H (AMT) LAN Controller",
   1297 	  WM_T_ICH8,		WMP_F_COPPER },
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1299 	  "Intel i82801H LAN Controller",
   1300 	  WM_T_ICH8,		WMP_F_COPPER },
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1302 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1303 	  WM_T_ICH8,		WMP_F_COPPER },
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1305 	  "Intel i82801H (M) LAN Controller",
   1306 	  WM_T_ICH8,		WMP_F_COPPER },
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1308 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1309 	  WM_T_ICH8,		WMP_F_COPPER },
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1311 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1312 	  WM_T_ICH8,		WMP_F_COPPER },
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1314 	  "82567V-3 LAN Controller",
   1315 	  WM_T_ICH8,		WMP_F_COPPER },
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1317 	  "82801I (AMT) LAN Controller",
   1318 	  WM_T_ICH9,		WMP_F_COPPER },
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1320 	  "82801I 10/100 LAN Controller",
   1321 	  WM_T_ICH9,		WMP_F_COPPER },
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1323 	  "82801I (G) 10/100 LAN Controller",
   1324 	  WM_T_ICH9,		WMP_F_COPPER },
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1326 	  "82801I (GT) 10/100 LAN Controller",
   1327 	  WM_T_ICH9,		WMP_F_COPPER },
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1329 	  "82801I (C) LAN Controller",
   1330 	  WM_T_ICH9,		WMP_F_COPPER },
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1332 	  "82801I mobile LAN Controller",
   1333 	  WM_T_ICH9,		WMP_F_COPPER },
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1335 	  "82801I mobile (V) LAN Controller",
   1336 	  WM_T_ICH9,		WMP_F_COPPER },
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1338 	  "82801I mobile (AMT) LAN Controller",
   1339 	  WM_T_ICH9,		WMP_F_COPPER },
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1341 	  "82567LM-4 LAN Controller",
   1342 	  WM_T_ICH9,		WMP_F_COPPER },
   1343 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1344 	  "82567LM-2 LAN Controller",
   1345 	  WM_T_ICH10,		WMP_F_COPPER },
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1347 	  "82567LF-2 LAN Controller",
   1348 	  WM_T_ICH10,		WMP_F_COPPER },
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1350 	  "82567LM-3 LAN Controller",
   1351 	  WM_T_ICH10,		WMP_F_COPPER },
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1353 	  "82567LF-3 LAN Controller",
   1354 	  WM_T_ICH10,		WMP_F_COPPER },
   1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1356 	  "82567V-2 LAN Controller",
   1357 	  WM_T_ICH10,		WMP_F_COPPER },
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1359 	  "82567V-3? LAN Controller",
   1360 	  WM_T_ICH10,		WMP_F_COPPER },
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1362 	  "HANKSVILLE LAN Controller",
   1363 	  WM_T_ICH10,		WMP_F_COPPER },
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1365 	  "PCH LAN (82577LM) Controller",
   1366 	  WM_T_PCH,		WMP_F_COPPER },
   1367 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1368 	  "PCH LAN (82577LC) Controller",
   1369 	  WM_T_PCH,		WMP_F_COPPER },
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1371 	  "PCH LAN (82578DM) Controller",
   1372 	  WM_T_PCH,		WMP_F_COPPER },
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1374 	  "PCH LAN (82578DC) Controller",
   1375 	  WM_T_PCH,		WMP_F_COPPER },
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1377 	  "PCH2 LAN (82579LM) Controller",
   1378 	  WM_T_PCH2,		WMP_F_COPPER },
   1379 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1380 	  "PCH2 LAN (82579V) Controller",
   1381 	  WM_T_PCH2,		WMP_F_COPPER },
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1383 	  "82575EB dual-1000baseT Ethernet",
   1384 	  WM_T_82575,		WMP_F_COPPER },
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1386 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1387 	  WM_T_82575,		WMP_F_SERDES },
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1389 	  "82575GB quad-1000baseT Ethernet",
   1390 	  WM_T_82575,		WMP_F_COPPER },
   1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1392 	  "82575GB quad-1000baseT Ethernet (PM)",
   1393 	  WM_T_82575,		WMP_F_COPPER },
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1395 	  "82576 1000BaseT Ethernet",
   1396 	  WM_T_82576,		WMP_F_COPPER },
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1398 	  "82576 1000BaseX Ethernet",
   1399 	  WM_T_82576,		WMP_F_FIBER },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1402 	  "82576 gigabit Ethernet (SERDES)",
   1403 	  WM_T_82576,		WMP_F_SERDES },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1406 	  "82576 quad-1000BaseT Ethernet",
   1407 	  WM_T_82576,		WMP_F_COPPER },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1410 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1411 	  WM_T_82576,		WMP_F_COPPER },
   1412 
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1414 	  "82576 gigabit Ethernet",
   1415 	  WM_T_82576,		WMP_F_COPPER },
   1416 
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1418 	  "82576 gigabit Ethernet (SERDES)",
   1419 	  WM_T_82576,		WMP_F_SERDES },
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1421 	  "82576 quad-gigabit Ethernet (SERDES)",
   1422 	  WM_T_82576,		WMP_F_SERDES },
   1423 
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1425 	  "82580 1000BaseT Ethernet",
   1426 	  WM_T_82580,		WMP_F_COPPER },
   1427 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1428 	  "82580 1000BaseX Ethernet",
   1429 	  WM_T_82580,		WMP_F_FIBER },
   1430 
   1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1432 	  "82580 1000BaseT Ethernet (SERDES)",
   1433 	  WM_T_82580,		WMP_F_SERDES },
   1434 
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1436 	  "82580 gigabit Ethernet (SGMII)",
   1437 	  WM_T_82580,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1439 	  "82580 dual-1000BaseT Ethernet",
   1440 	  WM_T_82580,		WMP_F_COPPER },
   1441 
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1443 	  "82580 quad-1000BaseX Ethernet",
   1444 	  WM_T_82580,		WMP_F_FIBER },
   1445 
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1447 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1448 	  WM_T_82580,		WMP_F_COPPER },
   1449 
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1451 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1452 	  WM_T_82580,		WMP_F_SERDES },
   1453 
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1455 	  "DH89XXCC 1000BASE-KX Ethernet",
   1456 	  WM_T_82580,		WMP_F_SERDES },
   1457 
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1459 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1460 	  WM_T_82580,		WMP_F_SERDES },
   1461 
   1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1463 	  "I350 Gigabit Network Connection",
   1464 	  WM_T_I350,		WMP_F_COPPER },
   1465 
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1467 	  "I350 Gigabit Fiber Network Connection",
   1468 	  WM_T_I350,		WMP_F_FIBER },
   1469 
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1471 	  "I350 Gigabit Backplane Connection",
   1472 	  WM_T_I350,		WMP_F_SERDES },
   1473 
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1475 	  "I350 Quad Port Gigabit Ethernet",
   1476 	  WM_T_I350,		WMP_F_SERDES },
   1477 
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1479 	  "I350 Gigabit Connection",
   1480 	  WM_T_I350,		WMP_F_COPPER },
   1481 
   1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1483 	  "I354 Gigabit Ethernet (KX)",
   1484 	  WM_T_I354,		WMP_F_SERDES },
   1485 
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1487 	  "I354 Gigabit Ethernet (SGMII)",
   1488 	  WM_T_I354,		WMP_F_COPPER },
   1489 
   1490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1491 	  "I354 Gigabit Ethernet (2.5G)",
   1492 	  WM_T_I354,		WMP_F_COPPER },
   1493 
   1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1495 	  "I210-T1 Ethernet Server Adapter",
   1496 	  WM_T_I210,		WMP_F_COPPER },
   1497 
   1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1499 	  "I210 Ethernet (Copper OEM)",
   1500 	  WM_T_I210,		WMP_F_COPPER },
   1501 
   1502 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1503 	  "I210 Ethernet (Copper IT)",
   1504 	  WM_T_I210,		WMP_F_COPPER },
   1505 
   1506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1507 	  "I210 Ethernet (Copper, FLASH less)",
   1508 	  WM_T_I210,		WMP_F_COPPER },
   1509 
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1511 	  "I210 Gigabit Ethernet (Fiber)",
   1512 	  WM_T_I210,		WMP_F_FIBER },
   1513 
   1514 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1515 	  "I210 Gigabit Ethernet (SERDES)",
   1516 	  WM_T_I210,		WMP_F_SERDES },
   1517 
   1518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1519 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1520 	  WM_T_I210,		WMP_F_SERDES },
   1521 
   1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1523 	  "I210 Gigabit Ethernet (SGMII)",
   1524 	  WM_T_I210,		WMP_F_COPPER },
   1525 
   1526 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1527 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1528 	  WM_T_I210,		WMP_F_COPPER },
   1529 
   1530 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1531 	  "I211 Ethernet (COPPER)",
   1532 	  WM_T_I211,		WMP_F_COPPER },
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1534 	  "I217 V Ethernet Connection",
   1535 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1537 	  "I217 LM Ethernet Connection",
   1538 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1540 	  "I218 V Ethernet Connection",
   1541 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1543 	  "I218 V Ethernet Connection",
   1544 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1545 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1546 	  "I218 V Ethernet Connection",
   1547 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1548 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1549 	  "I218 LM Ethernet Connection",
   1550 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1552 	  "I218 LM Ethernet Connection",
   1553 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1555 	  "I218 LM Ethernet Connection",
   1556 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1557 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1558 	  "I219 LM Ethernet Connection",
   1559 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1560 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1561 	  "I219 LM Ethernet Connection",
   1562 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1563 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1564 	  "I219 LM Ethernet Connection",
   1565 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1566 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1567 	  "I219 LM Ethernet Connection",
   1568 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1569 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1570 	  "I219 LM Ethernet Connection",
   1571 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1573 	  "I219 LM Ethernet Connection",
   1574 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1575 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1576 	  "I219 LM Ethernet Connection",
   1577 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1578 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1579 	  "I219 LM Ethernet Connection",
   1580 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1581 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1582 	  "I219 LM Ethernet Connection",
   1583 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1584 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1585 	  "I219 LM Ethernet Connection",
   1586 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1587 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1588 	  "I219 LM Ethernet Connection",
   1589 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1590 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1591 	  "I219 LM Ethernet Connection",
   1592 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1593 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1594 	  "I219 LM Ethernet Connection",
   1595 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1596 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1597 	  "I219 LM Ethernet Connection",
   1598 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1599 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1600 	  "I219 LM Ethernet Connection",
   1601 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1602 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1603 	  "I219 V Ethernet Connection",
   1604 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1605 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1606 	  "I219 V Ethernet Connection",
   1607 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1608 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1609 	  "I219 V Ethernet Connection",
   1610 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1611 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1612 	  "I219 V Ethernet Connection",
   1613 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1614 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1615 	  "I219 V Ethernet Connection",
   1616 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1617 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1618 	  "I219 V Ethernet Connection",
   1619 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1620 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1621 	  "I219 V Ethernet Connection",
   1622 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1623 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1624 	  "I219 V Ethernet Connection",
   1625 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1626 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1627 	  "I219 V Ethernet Connection",
   1628 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1629 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1630 	  "I219 V Ethernet Connection",
   1631 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1632 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1633 	  "I219 V Ethernet Connection",
   1634 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1635 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1636 	  "I219 V Ethernet Connection",
   1637 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1638 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1639 	  "I219 V Ethernet Connection",
   1640 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1641 	{ 0,			0,
   1642 	  NULL,
   1643 	  0,			0 },
   1644 };
   1645 
   1646 /*
   1647  * Register read/write functions.
   1648  * Other than CSR_{READ|WRITE}().
   1649  */
   1650 
   1651 #if 0 /* Not currently used */
   1652 static inline uint32_t
   1653 wm_io_read(struct wm_softc *sc, int reg)
   1654 {
   1655 
   1656 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1657 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1658 }
   1659 #endif
   1660 
   1661 static inline void
   1662 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1663 {
   1664 
   1665 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1666 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1667 }
   1668 
   1669 static inline void
   1670 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1671     uint32_t data)
   1672 {
   1673 	uint32_t regval;
   1674 	int i;
   1675 
   1676 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1677 
   1678 	CSR_WRITE(sc, reg, regval);
   1679 
   1680 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1681 		delay(5);
   1682 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1683 			break;
   1684 	}
   1685 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1686 		aprint_error("%s: WARNING:"
   1687 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1688 		    device_xname(sc->sc_dev), reg);
   1689 	}
   1690 }
   1691 
   1692 static inline void
   1693 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1694 {
   1695 	wa->wa_low = htole32(v & 0xffffffffU);
   1696 	if (sizeof(bus_addr_t) == 8)
   1697 		wa->wa_high = htole32((uint64_t) v >> 32);
   1698 	else
   1699 		wa->wa_high = 0;
   1700 }
   1701 
   1702 /*
   1703  * Descriptor sync/init functions.
   1704  */
   1705 static inline void
   1706 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1707 {
   1708 	struct wm_softc *sc = txq->txq_sc;
   1709 
   1710 	/* If it will wrap around, sync to the end of the ring. */
   1711 	if ((start + num) > WM_NTXDESC(txq)) {
   1712 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1713 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1714 		    (WM_NTXDESC(txq) - start), ops);
   1715 		num -= (WM_NTXDESC(txq) - start);
   1716 		start = 0;
   1717 	}
   1718 
   1719 	/* Now sync whatever is left. */
   1720 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1721 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1722 }
   1723 
   1724 static inline void
   1725 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1726 {
   1727 	struct wm_softc *sc = rxq->rxq_sc;
   1728 
   1729 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1730 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1731 }
   1732 
   1733 static inline void
   1734 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1735 {
   1736 	struct wm_softc *sc = rxq->rxq_sc;
   1737 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1738 	struct mbuf *m = rxs->rxs_mbuf;
   1739 
   1740 	/*
   1741 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1742 	 * so that the payload after the Ethernet header is aligned
   1743 	 * to a 4-byte boundary.
   1744 
   1745 	 * XXX BRAINDAMAGE ALERT!
   1746 	 * The stupid chip uses the same size for every buffer, which
   1747 	 * is set in the Receive Control register.  We are using the 2K
   1748 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1749 	 * reason, we can't "scoot" packets longer than the standard
   1750 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1751 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1752 	 * the upper layer copy the headers.
   1753 	 */
   1754 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1755 
   1756 	if (sc->sc_type == WM_T_82574) {
   1757 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1758 		rxd->erx_data.erxd_addr =
   1759 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1760 		rxd->erx_data.erxd_dd = 0;
   1761 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1762 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1763 
   1764 		rxd->nqrx_data.nrxd_paddr =
   1765 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1766 		/* Currently, split header is not supported. */
   1767 		rxd->nqrx_data.nrxd_haddr = 0;
   1768 	} else {
   1769 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1770 
   1771 		wm_set_dma_addr(&rxd->wrx_addr,
   1772 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1773 		rxd->wrx_len = 0;
   1774 		rxd->wrx_cksum = 0;
   1775 		rxd->wrx_status = 0;
   1776 		rxd->wrx_errors = 0;
   1777 		rxd->wrx_special = 0;
   1778 	}
   1779 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1780 
   1781 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1782 }
   1783 
   1784 /*
   1785  * Device driver interface functions and commonly used functions.
   1786  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1787  */
   1788 
   1789 /* Lookup supported device table */
   1790 static const struct wm_product *
   1791 wm_lookup(const struct pci_attach_args *pa)
   1792 {
   1793 	const struct wm_product *wmp;
   1794 
   1795 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1796 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1797 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1798 			return wmp;
   1799 	}
   1800 	return NULL;
   1801 }
   1802 
   1803 /* The match function (ca_match) */
   1804 static int
   1805 wm_match(device_t parent, cfdata_t cf, void *aux)
   1806 {
   1807 	struct pci_attach_args *pa = aux;
   1808 
   1809 	if (wm_lookup(pa) != NULL)
   1810 		return 1;
   1811 
   1812 	return 0;
   1813 }
   1814 
   1815 /* The attach function (ca_attach) */
   1816 static void
   1817 wm_attach(device_t parent, device_t self, void *aux)
   1818 {
   1819 	struct wm_softc *sc = device_private(self);
   1820 	struct pci_attach_args *pa = aux;
   1821 	prop_dictionary_t dict;
   1822 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1823 	pci_chipset_tag_t pc = pa->pa_pc;
   1824 	int counts[PCI_INTR_TYPE_SIZE];
   1825 	pci_intr_type_t max_type;
   1826 	const char *eetype, *xname;
   1827 	bus_space_tag_t memt;
   1828 	bus_space_handle_t memh;
   1829 	bus_size_t memsize;
   1830 	int memh_valid;
   1831 	int i, error;
   1832 	const struct wm_product *wmp;
   1833 	prop_data_t ea;
   1834 	prop_number_t pn;
   1835 	uint8_t enaddr[ETHER_ADDR_LEN];
   1836 	char buf[256];
   1837 	char wqname[MAXCOMLEN];
   1838 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1839 	pcireg_t preg, memtype;
   1840 	uint16_t eeprom_data, apme_mask;
   1841 	bool force_clear_smbi;
   1842 	uint32_t link_mode;
   1843 	uint32_t reg;
   1844 
   1845 	sc->sc_dev = self;
   1846 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
   1847 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1848 	sc->sc_core_stopping = false;
   1849 
   1850 	wmp = wm_lookup(pa);
   1851 #ifdef DIAGNOSTIC
   1852 	if (wmp == NULL) {
   1853 		printf("\n");
   1854 		panic("wm_attach: impossible");
   1855 	}
   1856 #endif
   1857 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1858 
   1859 	sc->sc_pc = pa->pa_pc;
   1860 	sc->sc_pcitag = pa->pa_tag;
   1861 
   1862 	if (pci_dma64_available(pa))
   1863 		sc->sc_dmat = pa->pa_dmat64;
   1864 	else
   1865 		sc->sc_dmat = pa->pa_dmat;
   1866 
   1867 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1868 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1869 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1870 
   1871 	sc->sc_type = wmp->wmp_type;
   1872 
   1873 	/* Set default function pointers */
   1874 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1875 	sc->phy.release = sc->nvm.release = wm_put_null;
   1876 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1877 
   1878 	if (sc->sc_type < WM_T_82543) {
   1879 		if (sc->sc_rev < 2) {
   1880 			aprint_error_dev(sc->sc_dev,
   1881 			    "i82542 must be at least rev. 2\n");
   1882 			return;
   1883 		}
   1884 		if (sc->sc_rev < 3)
   1885 			sc->sc_type = WM_T_82542_2_0;
   1886 	}
   1887 
   1888 	/*
   1889 	 * Disable MSI for Errata:
   1890 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1891 	 *
   1892 	 *  82544: Errata 25
   1893 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1894 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1895 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1896 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1897 	 *
   1898 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1899 	 *
   1900 	 *  82571 & 82572: Errata 63
   1901 	 */
   1902 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1903 	    || (sc->sc_type == WM_T_82572))
   1904 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1905 
   1906 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1907 	    || (sc->sc_type == WM_T_82580)
   1908 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1909 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1910 		sc->sc_flags |= WM_F_NEWQUEUE;
   1911 
   1912 	/* Set device properties (mactype) */
   1913 	dict = device_properties(sc->sc_dev);
   1914 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1915 
   1916 	/*
   1917 	 * Map the device.  All devices support memory-mapped acccess,
   1918 	 * and it is really required for normal operation.
   1919 	 */
   1920 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1921 	switch (memtype) {
   1922 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1923 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1924 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1925 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1926 		break;
   1927 	default:
   1928 		memh_valid = 0;
   1929 		break;
   1930 	}
   1931 
   1932 	if (memh_valid) {
   1933 		sc->sc_st = memt;
   1934 		sc->sc_sh = memh;
   1935 		sc->sc_ss = memsize;
   1936 	} else {
   1937 		aprint_error_dev(sc->sc_dev,
   1938 		    "unable to map device registers\n");
   1939 		return;
   1940 	}
   1941 
   1942 	/*
   1943 	 * In addition, i82544 and later support I/O mapped indirect
   1944 	 * register access.  It is not desirable (nor supported in
   1945 	 * this driver) to use it for normal operation, though it is
   1946 	 * required to work around bugs in some chip versions.
   1947 	 */
   1948 	if (sc->sc_type >= WM_T_82544) {
   1949 		/* First we have to find the I/O BAR. */
   1950 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1951 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1952 			if (memtype == PCI_MAPREG_TYPE_IO)
   1953 				break;
   1954 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1955 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1956 				i += 4;	/* skip high bits, too */
   1957 		}
   1958 		if (i < PCI_MAPREG_END) {
   1959 			/*
   1960 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1961 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1962 			 * It's no problem because newer chips has no this
   1963 			 * bug.
   1964 			 *
   1965 			 * The i8254x doesn't apparently respond when the
   1966 			 * I/O BAR is 0, which looks somewhat like it's not
   1967 			 * been configured.
   1968 			 */
   1969 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1970 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1971 				aprint_error_dev(sc->sc_dev,
   1972 				    "WARNING: I/O BAR at zero.\n");
   1973 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1974 					0, &sc->sc_iot, &sc->sc_ioh,
   1975 					NULL, &sc->sc_ios) == 0) {
   1976 				sc->sc_flags |= WM_F_IOH_VALID;
   1977 			} else
   1978 				aprint_error_dev(sc->sc_dev,
   1979 				    "WARNING: unable to map I/O space\n");
   1980 		}
   1981 
   1982 	}
   1983 
   1984 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1985 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1986 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1987 	if (sc->sc_type < WM_T_82542_2_1)
   1988 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1989 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1990 
   1991 	/* Power up chip */
   1992 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1993 	    && error != EOPNOTSUPP) {
   1994 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1995 		return;
   1996 	}
   1997 
   1998 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1999 	/*
   2000 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2001 	 * resource.
   2002 	 */
   2003 	if (sc->sc_nqueues > 1) {
   2004 		max_type = PCI_INTR_TYPE_MSIX;
   2005 		/*
   2006 		 *  82583 has a MSI-X capability in the PCI configuration space
   2007 		 * but it doesn't support it. At least the document doesn't
   2008 		 * say anything about MSI-X.
   2009 		 */
   2010 		counts[PCI_INTR_TYPE_MSIX]
   2011 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2012 	} else {
   2013 		max_type = PCI_INTR_TYPE_MSI;
   2014 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2015 	}
   2016 
   2017 	/* Allocation settings */
   2018 	counts[PCI_INTR_TYPE_MSI] = 1;
   2019 	counts[PCI_INTR_TYPE_INTX] = 1;
   2020 	/* overridden by disable flags */
   2021 	if (wm_disable_msi != 0) {
   2022 		counts[PCI_INTR_TYPE_MSI] = 0;
   2023 		if (wm_disable_msix != 0) {
   2024 			max_type = PCI_INTR_TYPE_INTX;
   2025 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2026 		}
   2027 	} else if (wm_disable_msix != 0) {
   2028 		max_type = PCI_INTR_TYPE_MSI;
   2029 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2030 	}
   2031 
   2032 alloc_retry:
   2033 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2034 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2035 		return;
   2036 	}
   2037 
   2038 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2039 		error = wm_setup_msix(sc);
   2040 		if (error) {
   2041 			pci_intr_release(pc, sc->sc_intrs,
   2042 			    counts[PCI_INTR_TYPE_MSIX]);
   2043 
   2044 			/* Setup for MSI: Disable MSI-X */
   2045 			max_type = PCI_INTR_TYPE_MSI;
   2046 			counts[PCI_INTR_TYPE_MSI] = 1;
   2047 			counts[PCI_INTR_TYPE_INTX] = 1;
   2048 			goto alloc_retry;
   2049 		}
   2050 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2051 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2052 		error = wm_setup_legacy(sc);
   2053 		if (error) {
   2054 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2055 			    counts[PCI_INTR_TYPE_MSI]);
   2056 
   2057 			/* The next try is for INTx: Disable MSI */
   2058 			max_type = PCI_INTR_TYPE_INTX;
   2059 			counts[PCI_INTR_TYPE_INTX] = 1;
   2060 			goto alloc_retry;
   2061 		}
   2062 	} else {
   2063 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2064 		error = wm_setup_legacy(sc);
   2065 		if (error) {
   2066 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2067 			    counts[PCI_INTR_TYPE_INTX]);
   2068 			return;
   2069 		}
   2070 	}
   2071 
   2072 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2073 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2074 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2075 	    WM_WORKQUEUE_FLAGS);
   2076 	if (error) {
   2077 		aprint_error_dev(sc->sc_dev,
   2078 		    "unable to create workqueue\n");
   2079 		goto out;
   2080 	}
   2081 
   2082 	/*
   2083 	 * Check the function ID (unit number of the chip).
   2084 	 */
   2085 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2086 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2087 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2088 	    || (sc->sc_type == WM_T_82580)
   2089 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2090 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2091 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2092 	else
   2093 		sc->sc_funcid = 0;
   2094 
   2095 	/*
   2096 	 * Determine a few things about the bus we're connected to.
   2097 	 */
   2098 	if (sc->sc_type < WM_T_82543) {
   2099 		/* We don't really know the bus characteristics here. */
   2100 		sc->sc_bus_speed = 33;
   2101 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2102 		/*
   2103 		 * CSA (Communication Streaming Architecture) is about as fast
   2104 		 * a 32-bit 66MHz PCI Bus.
   2105 		 */
   2106 		sc->sc_flags |= WM_F_CSA;
   2107 		sc->sc_bus_speed = 66;
   2108 		aprint_verbose_dev(sc->sc_dev,
   2109 		    "Communication Streaming Architecture\n");
   2110 		if (sc->sc_type == WM_T_82547) {
   2111 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
   2112 			callout_setfunc(&sc->sc_txfifo_ch,
   2113 			    wm_82547_txfifo_stall, sc);
   2114 			aprint_verbose_dev(sc->sc_dev,
   2115 			    "using 82547 Tx FIFO stall work-around\n");
   2116 		}
   2117 	} else if (sc->sc_type >= WM_T_82571) {
   2118 		sc->sc_flags |= WM_F_PCIE;
   2119 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2120 		    && (sc->sc_type != WM_T_ICH10)
   2121 		    && (sc->sc_type != WM_T_PCH)
   2122 		    && (sc->sc_type != WM_T_PCH2)
   2123 		    && (sc->sc_type != WM_T_PCH_LPT)
   2124 		    && (sc->sc_type != WM_T_PCH_SPT)
   2125 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2126 			/* ICH* and PCH* have no PCIe capability registers */
   2127 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2128 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2129 				NULL) == 0)
   2130 				aprint_error_dev(sc->sc_dev,
   2131 				    "unable to find PCIe capability\n");
   2132 		}
   2133 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2134 	} else {
   2135 		reg = CSR_READ(sc, WMREG_STATUS);
   2136 		if (reg & STATUS_BUS64)
   2137 			sc->sc_flags |= WM_F_BUS64;
   2138 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2139 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2140 
   2141 			sc->sc_flags |= WM_F_PCIX;
   2142 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2143 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2144 				aprint_error_dev(sc->sc_dev,
   2145 				    "unable to find PCIX capability\n");
   2146 			else if (sc->sc_type != WM_T_82545_3 &&
   2147 				 sc->sc_type != WM_T_82546_3) {
   2148 				/*
   2149 				 * Work around a problem caused by the BIOS
   2150 				 * setting the max memory read byte count
   2151 				 * incorrectly.
   2152 				 */
   2153 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2154 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2155 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2156 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2157 
   2158 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2159 				    PCIX_CMD_BYTECNT_SHIFT;
   2160 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2161 				    PCIX_STATUS_MAXB_SHIFT;
   2162 				if (bytecnt > maxb) {
   2163 					aprint_verbose_dev(sc->sc_dev,
   2164 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2165 					    512 << bytecnt, 512 << maxb);
   2166 					pcix_cmd = (pcix_cmd &
   2167 					    ~PCIX_CMD_BYTECNT_MASK) |
   2168 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2169 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2170 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2171 					    pcix_cmd);
   2172 				}
   2173 			}
   2174 		}
   2175 		/*
   2176 		 * The quad port adapter is special; it has a PCIX-PCIX
   2177 		 * bridge on the board, and can run the secondary bus at
   2178 		 * a higher speed.
   2179 		 */
   2180 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2181 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2182 								      : 66;
   2183 		} else if (sc->sc_flags & WM_F_PCIX) {
   2184 			switch (reg & STATUS_PCIXSPD_MASK) {
   2185 			case STATUS_PCIXSPD_50_66:
   2186 				sc->sc_bus_speed = 66;
   2187 				break;
   2188 			case STATUS_PCIXSPD_66_100:
   2189 				sc->sc_bus_speed = 100;
   2190 				break;
   2191 			case STATUS_PCIXSPD_100_133:
   2192 				sc->sc_bus_speed = 133;
   2193 				break;
   2194 			default:
   2195 				aprint_error_dev(sc->sc_dev,
   2196 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2197 				    reg & STATUS_PCIXSPD_MASK);
   2198 				sc->sc_bus_speed = 66;
   2199 				break;
   2200 			}
   2201 		} else
   2202 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2203 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2204 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2205 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2206 	}
   2207 
   2208 	/* clear interesting stat counters */
   2209 	CSR_READ(sc, WMREG_COLC);
   2210 	CSR_READ(sc, WMREG_RXERRC);
   2211 
   2212 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2213 	    || (sc->sc_type >= WM_T_ICH8))
   2214 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2215 	if (sc->sc_type >= WM_T_ICH8)
   2216 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2217 
   2218 	/* Set PHY, NVM mutex related stuff */
   2219 	switch (sc->sc_type) {
   2220 	case WM_T_82542_2_0:
   2221 	case WM_T_82542_2_1:
   2222 	case WM_T_82543:
   2223 	case WM_T_82544:
   2224 		/* Microwire */
   2225 		sc->nvm.read = wm_nvm_read_uwire;
   2226 		sc->sc_nvm_wordsize = 64;
   2227 		sc->sc_nvm_addrbits = 6;
   2228 		break;
   2229 	case WM_T_82540:
   2230 	case WM_T_82545:
   2231 	case WM_T_82545_3:
   2232 	case WM_T_82546:
   2233 	case WM_T_82546_3:
   2234 		/* Microwire */
   2235 		sc->nvm.read = wm_nvm_read_uwire;
   2236 		reg = CSR_READ(sc, WMREG_EECD);
   2237 		if (reg & EECD_EE_SIZE) {
   2238 			sc->sc_nvm_wordsize = 256;
   2239 			sc->sc_nvm_addrbits = 8;
   2240 		} else {
   2241 			sc->sc_nvm_wordsize = 64;
   2242 			sc->sc_nvm_addrbits = 6;
   2243 		}
   2244 		sc->sc_flags |= WM_F_LOCK_EECD;
   2245 		sc->nvm.acquire = wm_get_eecd;
   2246 		sc->nvm.release = wm_put_eecd;
   2247 		break;
   2248 	case WM_T_82541:
   2249 	case WM_T_82541_2:
   2250 	case WM_T_82547:
   2251 	case WM_T_82547_2:
   2252 		reg = CSR_READ(sc, WMREG_EECD);
   2253 		/*
   2254 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2255 		 * on 8254[17], so set flags and functios before calling it.
   2256 		 */
   2257 		sc->sc_flags |= WM_F_LOCK_EECD;
   2258 		sc->nvm.acquire = wm_get_eecd;
   2259 		sc->nvm.release = wm_put_eecd;
   2260 		if (reg & EECD_EE_TYPE) {
   2261 			/* SPI */
   2262 			sc->nvm.read = wm_nvm_read_spi;
   2263 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2264 			wm_nvm_set_addrbits_size_eecd(sc);
   2265 		} else {
   2266 			/* Microwire */
   2267 			sc->nvm.read = wm_nvm_read_uwire;
   2268 			if ((reg & EECD_EE_ABITS) != 0) {
   2269 				sc->sc_nvm_wordsize = 256;
   2270 				sc->sc_nvm_addrbits = 8;
   2271 			} else {
   2272 				sc->sc_nvm_wordsize = 64;
   2273 				sc->sc_nvm_addrbits = 6;
   2274 			}
   2275 		}
   2276 		break;
   2277 	case WM_T_82571:
   2278 	case WM_T_82572:
   2279 		/* SPI */
   2280 		sc->nvm.read = wm_nvm_read_eerd;
   2281 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2282 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2283 		wm_nvm_set_addrbits_size_eecd(sc);
   2284 		sc->phy.acquire = wm_get_swsm_semaphore;
   2285 		sc->phy.release = wm_put_swsm_semaphore;
   2286 		sc->nvm.acquire = wm_get_nvm_82571;
   2287 		sc->nvm.release = wm_put_nvm_82571;
   2288 		break;
   2289 	case WM_T_82573:
   2290 	case WM_T_82574:
   2291 	case WM_T_82583:
   2292 		sc->nvm.read = wm_nvm_read_eerd;
   2293 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2294 		if (sc->sc_type == WM_T_82573) {
   2295 			sc->phy.acquire = wm_get_swsm_semaphore;
   2296 			sc->phy.release = wm_put_swsm_semaphore;
   2297 			sc->nvm.acquire = wm_get_nvm_82571;
   2298 			sc->nvm.release = wm_put_nvm_82571;
   2299 		} else {
   2300 			/* Both PHY and NVM use the same semaphore. */
   2301 			sc->phy.acquire = sc->nvm.acquire
   2302 			    = wm_get_swfwhw_semaphore;
   2303 			sc->phy.release = sc->nvm.release
   2304 			    = wm_put_swfwhw_semaphore;
   2305 		}
   2306 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2307 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2308 			sc->sc_nvm_wordsize = 2048;
   2309 		} else {
   2310 			/* SPI */
   2311 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2312 			wm_nvm_set_addrbits_size_eecd(sc);
   2313 		}
   2314 		break;
   2315 	case WM_T_82575:
   2316 	case WM_T_82576:
   2317 	case WM_T_82580:
   2318 	case WM_T_I350:
   2319 	case WM_T_I354:
   2320 	case WM_T_80003:
   2321 		/* SPI */
   2322 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2323 		wm_nvm_set_addrbits_size_eecd(sc);
   2324 		if ((sc->sc_type == WM_T_80003)
   2325 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2326 			sc->nvm.read = wm_nvm_read_eerd;
   2327 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2328 		} else {
   2329 			sc->nvm.read = wm_nvm_read_spi;
   2330 			sc->sc_flags |= WM_F_LOCK_EECD;
   2331 		}
   2332 		sc->phy.acquire = wm_get_phy_82575;
   2333 		sc->phy.release = wm_put_phy_82575;
   2334 		sc->nvm.acquire = wm_get_nvm_80003;
   2335 		sc->nvm.release = wm_put_nvm_80003;
   2336 		break;
   2337 	case WM_T_ICH8:
   2338 	case WM_T_ICH9:
   2339 	case WM_T_ICH10:
   2340 	case WM_T_PCH:
   2341 	case WM_T_PCH2:
   2342 	case WM_T_PCH_LPT:
   2343 		sc->nvm.read = wm_nvm_read_ich8;
   2344 		/* FLASH */
   2345 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2346 		sc->sc_nvm_wordsize = 2048;
   2347 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2348 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2349 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2350 			aprint_error_dev(sc->sc_dev,
   2351 			    "can't map FLASH registers\n");
   2352 			goto out;
   2353 		}
   2354 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2355 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2356 		    ICH_FLASH_SECTOR_SIZE;
   2357 		sc->sc_ich8_flash_bank_size =
   2358 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2359 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2360 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2361 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2362 		sc->sc_flashreg_offset = 0;
   2363 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2364 		sc->phy.release = wm_put_swflag_ich8lan;
   2365 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2366 		sc->nvm.release = wm_put_nvm_ich8lan;
   2367 		break;
   2368 	case WM_T_PCH_SPT:
   2369 	case WM_T_PCH_CNP:
   2370 		sc->nvm.read = wm_nvm_read_spt;
   2371 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2372 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2373 		sc->sc_flasht = sc->sc_st;
   2374 		sc->sc_flashh = sc->sc_sh;
   2375 		sc->sc_ich8_flash_base = 0;
   2376 		sc->sc_nvm_wordsize =
   2377 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2378 		    * NVM_SIZE_MULTIPLIER;
   2379 		/* It is size in bytes, we want words */
   2380 		sc->sc_nvm_wordsize /= 2;
   2381 		/* Assume 2 banks */
   2382 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2383 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2384 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2385 		sc->phy.release = wm_put_swflag_ich8lan;
   2386 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2387 		sc->nvm.release = wm_put_nvm_ich8lan;
   2388 		break;
   2389 	case WM_T_I210:
   2390 	case WM_T_I211:
   2391 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2392 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2393 		if (wm_nvm_flash_presence_i210(sc)) {
   2394 			sc->nvm.read = wm_nvm_read_eerd;
   2395 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2396 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2397 			wm_nvm_set_addrbits_size_eecd(sc);
   2398 		} else {
   2399 			sc->nvm.read = wm_nvm_read_invm;
   2400 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2401 			sc->sc_nvm_wordsize = INVM_SIZE;
   2402 		}
   2403 		sc->phy.acquire = wm_get_phy_82575;
   2404 		sc->phy.release = wm_put_phy_82575;
   2405 		sc->nvm.acquire = wm_get_nvm_80003;
   2406 		sc->nvm.release = wm_put_nvm_80003;
   2407 		break;
   2408 	default:
   2409 		break;
   2410 	}
   2411 
   2412 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2413 	switch (sc->sc_type) {
   2414 	case WM_T_82571:
   2415 	case WM_T_82572:
   2416 		reg = CSR_READ(sc, WMREG_SWSM2);
   2417 		if ((reg & SWSM2_LOCK) == 0) {
   2418 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2419 			force_clear_smbi = true;
   2420 		} else
   2421 			force_clear_smbi = false;
   2422 		break;
   2423 	case WM_T_82573:
   2424 	case WM_T_82574:
   2425 	case WM_T_82583:
   2426 		force_clear_smbi = true;
   2427 		break;
   2428 	default:
   2429 		force_clear_smbi = false;
   2430 		break;
   2431 	}
   2432 	if (force_clear_smbi) {
   2433 		reg = CSR_READ(sc, WMREG_SWSM);
   2434 		if ((reg & SWSM_SMBI) != 0)
   2435 			aprint_error_dev(sc->sc_dev,
   2436 			    "Please update the Bootagent\n");
   2437 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2438 	}
   2439 
   2440 	/*
   2441 	 * Defer printing the EEPROM type until after verifying the checksum
   2442 	 * This allows the EEPROM type to be printed correctly in the case
   2443 	 * that no EEPROM is attached.
   2444 	 */
   2445 	/*
   2446 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2447 	 * this for later, so we can fail future reads from the EEPROM.
   2448 	 */
   2449 	if (wm_nvm_validate_checksum(sc)) {
   2450 		/*
   2451 		 * Read twice again because some PCI-e parts fail the
   2452 		 * first check due to the link being in sleep state.
   2453 		 */
   2454 		if (wm_nvm_validate_checksum(sc))
   2455 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2456 	}
   2457 
   2458 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2459 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2460 	else {
   2461 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2462 		    sc->sc_nvm_wordsize);
   2463 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2464 			aprint_verbose("iNVM");
   2465 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2466 			aprint_verbose("FLASH(HW)");
   2467 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2468 			aprint_verbose("FLASH");
   2469 		else {
   2470 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2471 				eetype = "SPI";
   2472 			else
   2473 				eetype = "MicroWire";
   2474 			aprint_verbose("(%d address bits) %s EEPROM",
   2475 			    sc->sc_nvm_addrbits, eetype);
   2476 		}
   2477 	}
   2478 	wm_nvm_version(sc);
   2479 	aprint_verbose("\n");
   2480 
   2481 	/*
   2482 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2483 	 * incorrect.
   2484 	 */
   2485 	wm_gmii_setup_phytype(sc, 0, 0);
   2486 
   2487 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2488 	switch (sc->sc_type) {
   2489 	case WM_T_ICH8:
   2490 	case WM_T_ICH9:
   2491 	case WM_T_ICH10:
   2492 	case WM_T_PCH:
   2493 	case WM_T_PCH2:
   2494 	case WM_T_PCH_LPT:
   2495 	case WM_T_PCH_SPT:
   2496 	case WM_T_PCH_CNP:
   2497 		apme_mask = WUC_APME;
   2498 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2499 		if ((eeprom_data & apme_mask) != 0)
   2500 			sc->sc_flags |= WM_F_WOL;
   2501 		break;
   2502 	default:
   2503 		break;
   2504 	}
   2505 
   2506 	/* Reset the chip to a known state. */
   2507 	wm_reset(sc);
   2508 
   2509 	/*
   2510 	 * Check for I21[01] PLL workaround.
   2511 	 *
   2512 	 * Three cases:
   2513 	 * a) Chip is I211.
   2514 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2515 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2516 	 */
   2517 	if (sc->sc_type == WM_T_I211)
   2518 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2519 	if (sc->sc_type == WM_T_I210) {
   2520 		if (!wm_nvm_flash_presence_i210(sc))
   2521 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2522 		else if ((sc->sc_nvm_ver_major < 3)
   2523 		    || ((sc->sc_nvm_ver_major == 3)
   2524 			&& (sc->sc_nvm_ver_minor < 25))) {
   2525 			aprint_verbose_dev(sc->sc_dev,
   2526 			    "ROM image version %d.%d is older than 3.25\n",
   2527 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2528 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2529 		}
   2530 	}
   2531 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2532 		wm_pll_workaround_i210(sc);
   2533 
   2534 	wm_get_wakeup(sc);
   2535 
   2536 	/* Non-AMT based hardware can now take control from firmware */
   2537 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2538 		wm_get_hw_control(sc);
   2539 
   2540 	/*
   2541 	 * Read the Ethernet address from the EEPROM, if not first found
   2542 	 * in device properties.
   2543 	 */
   2544 	ea = prop_dictionary_get(dict, "mac-address");
   2545 	if (ea != NULL) {
   2546 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2547 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2548 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2549 	} else {
   2550 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2551 			aprint_error_dev(sc->sc_dev,
   2552 			    "unable to read Ethernet address\n");
   2553 			goto out;
   2554 		}
   2555 	}
   2556 
   2557 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2558 	    ether_sprintf(enaddr));
   2559 
   2560 	/*
   2561 	 * Read the config info from the EEPROM, and set up various
   2562 	 * bits in the control registers based on their contents.
   2563 	 */
   2564 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2565 	if (pn != NULL) {
   2566 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2567 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2568 	} else {
   2569 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2570 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2571 			goto out;
   2572 		}
   2573 	}
   2574 
   2575 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2576 	if (pn != NULL) {
   2577 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2578 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2579 	} else {
   2580 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2581 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2582 			goto out;
   2583 		}
   2584 	}
   2585 
   2586 	/* check for WM_F_WOL */
   2587 	switch (sc->sc_type) {
   2588 	case WM_T_82542_2_0:
   2589 	case WM_T_82542_2_1:
   2590 	case WM_T_82543:
   2591 		/* dummy? */
   2592 		eeprom_data = 0;
   2593 		apme_mask = NVM_CFG3_APME;
   2594 		break;
   2595 	case WM_T_82544:
   2596 		apme_mask = NVM_CFG2_82544_APM_EN;
   2597 		eeprom_data = cfg2;
   2598 		break;
   2599 	case WM_T_82546:
   2600 	case WM_T_82546_3:
   2601 	case WM_T_82571:
   2602 	case WM_T_82572:
   2603 	case WM_T_82573:
   2604 	case WM_T_82574:
   2605 	case WM_T_82583:
   2606 	case WM_T_80003:
   2607 	case WM_T_82575:
   2608 	case WM_T_82576:
   2609 		apme_mask = NVM_CFG3_APME;
   2610 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2611 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2612 		break;
   2613 	case WM_T_82580:
   2614 	case WM_T_I350:
   2615 	case WM_T_I354:
   2616 	case WM_T_I210:
   2617 	case WM_T_I211:
   2618 		apme_mask = NVM_CFG3_APME;
   2619 		wm_nvm_read(sc,
   2620 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2621 		    1, &eeprom_data);
   2622 		break;
   2623 	case WM_T_ICH8:
   2624 	case WM_T_ICH9:
   2625 	case WM_T_ICH10:
   2626 	case WM_T_PCH:
   2627 	case WM_T_PCH2:
   2628 	case WM_T_PCH_LPT:
   2629 	case WM_T_PCH_SPT:
   2630 	case WM_T_PCH_CNP:
   2631 		/* Already checked before wm_reset () */
   2632 		apme_mask = eeprom_data = 0;
   2633 		break;
   2634 	default: /* XXX 82540 */
   2635 		apme_mask = NVM_CFG3_APME;
   2636 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2637 		break;
   2638 	}
   2639 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2640 	if ((eeprom_data & apme_mask) != 0)
   2641 		sc->sc_flags |= WM_F_WOL;
   2642 
   2643 	/*
   2644 	 * We have the eeprom settings, now apply the special cases
   2645 	 * where the eeprom may be wrong or the board won't support
   2646 	 * wake on lan on a particular port
   2647 	 */
   2648 	switch (sc->sc_pcidevid) {
   2649 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2650 		sc->sc_flags &= ~WM_F_WOL;
   2651 		break;
   2652 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2653 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2654 		/* Wake events only supported on port A for dual fiber
   2655 		 * regardless of eeprom setting */
   2656 		if (sc->sc_funcid == 1)
   2657 			sc->sc_flags &= ~WM_F_WOL;
   2658 		break;
   2659 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2660 		/* If quad port adapter, disable WoL on all but port A */
   2661 		if (sc->sc_funcid != 0)
   2662 			sc->sc_flags &= ~WM_F_WOL;
   2663 		break;
   2664 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2665 		/* Wake events only supported on port A for dual fiber
   2666 		 * regardless of eeprom setting */
   2667 		if (sc->sc_funcid == 1)
   2668 			sc->sc_flags &= ~WM_F_WOL;
   2669 		break;
   2670 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2671 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2672 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2673 		/* If quad port adapter, disable WoL on all but port A */
   2674 		if (sc->sc_funcid != 0)
   2675 			sc->sc_flags &= ~WM_F_WOL;
   2676 		break;
   2677 	}
   2678 
   2679 	if (sc->sc_type >= WM_T_82575) {
   2680 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2681 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2682 			    nvmword);
   2683 			if ((sc->sc_type == WM_T_82575) ||
   2684 			    (sc->sc_type == WM_T_82576)) {
   2685 				/* Check NVM for autonegotiation */
   2686 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2687 				    != 0)
   2688 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2689 			}
   2690 			if ((sc->sc_type == WM_T_82575) ||
   2691 			    (sc->sc_type == WM_T_I350)) {
   2692 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2693 					sc->sc_flags |= WM_F_MAS;
   2694 			}
   2695 		}
   2696 	}
   2697 
   2698 	/*
   2699 	 * XXX need special handling for some multiple port cards
   2700 	 * to disable a paticular port.
   2701 	 */
   2702 
   2703 	if (sc->sc_type >= WM_T_82544) {
   2704 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2705 		if (pn != NULL) {
   2706 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2707 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2708 		} else {
   2709 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2710 				aprint_error_dev(sc->sc_dev,
   2711 				    "unable to read SWDPIN\n");
   2712 				goto out;
   2713 			}
   2714 		}
   2715 	}
   2716 
   2717 	if (cfg1 & NVM_CFG1_ILOS)
   2718 		sc->sc_ctrl |= CTRL_ILOS;
   2719 
   2720 	/*
   2721 	 * XXX
   2722 	 * This code isn't correct because pin 2 and 3 are located
   2723 	 * in different position on newer chips. Check all datasheet.
   2724 	 *
   2725 	 * Until resolve this problem, check if a chip < 82580
   2726 	 */
   2727 	if (sc->sc_type <= WM_T_82580) {
   2728 		if (sc->sc_type >= WM_T_82544) {
   2729 			sc->sc_ctrl |=
   2730 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2731 			    CTRL_SWDPIO_SHIFT;
   2732 			sc->sc_ctrl |=
   2733 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2734 			    CTRL_SWDPINS_SHIFT;
   2735 		} else {
   2736 			sc->sc_ctrl |=
   2737 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2738 			    CTRL_SWDPIO_SHIFT;
   2739 		}
   2740 	}
   2741 
   2742 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2743 		wm_nvm_read(sc,
   2744 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2745 		    1, &nvmword);
   2746 		if (nvmword & NVM_CFG3_ILOS)
   2747 			sc->sc_ctrl |= CTRL_ILOS;
   2748 	}
   2749 
   2750 #if 0
   2751 	if (sc->sc_type >= WM_T_82544) {
   2752 		if (cfg1 & NVM_CFG1_IPS0)
   2753 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2754 		if (cfg1 & NVM_CFG1_IPS1)
   2755 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2756 		sc->sc_ctrl_ext |=
   2757 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2758 		    CTRL_EXT_SWDPIO_SHIFT;
   2759 		sc->sc_ctrl_ext |=
   2760 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2761 		    CTRL_EXT_SWDPINS_SHIFT;
   2762 	} else {
   2763 		sc->sc_ctrl_ext |=
   2764 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2765 		    CTRL_EXT_SWDPIO_SHIFT;
   2766 	}
   2767 #endif
   2768 
   2769 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2770 #if 0
   2771 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2772 #endif
   2773 
   2774 	if (sc->sc_type == WM_T_PCH) {
   2775 		uint16_t val;
   2776 
   2777 		/* Save the NVM K1 bit setting */
   2778 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2779 
   2780 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2781 			sc->sc_nvm_k1_enabled = 1;
   2782 		else
   2783 			sc->sc_nvm_k1_enabled = 0;
   2784 	}
   2785 
   2786 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2787 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2788 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2789 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2790 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2791 	    || sc->sc_type == WM_T_82573
   2792 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2793 		/* Copper only */
   2794 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2795 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2796 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2797 	    || (sc->sc_type ==WM_T_I211)) {
   2798 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2799 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2800 		switch (link_mode) {
   2801 		case CTRL_EXT_LINK_MODE_1000KX:
   2802 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2803 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2804 			break;
   2805 		case CTRL_EXT_LINK_MODE_SGMII:
   2806 			if (wm_sgmii_uses_mdio(sc)) {
   2807 				aprint_normal_dev(sc->sc_dev,
   2808 				    "SGMII(MDIO)\n");
   2809 				sc->sc_flags |= WM_F_SGMII;
   2810 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2811 				break;
   2812 			}
   2813 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2814 			/*FALLTHROUGH*/
   2815 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2816 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2817 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2818 				if (link_mode
   2819 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2820 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2821 					sc->sc_flags |= WM_F_SGMII;
   2822 					aprint_verbose_dev(sc->sc_dev,
   2823 					    "SGMII\n");
   2824 				} else {
   2825 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2826 					aprint_verbose_dev(sc->sc_dev,
   2827 					    "SERDES\n");
   2828 				}
   2829 				break;
   2830 			}
   2831 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2832 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2833 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2834 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2835 				sc->sc_flags |= WM_F_SGMII;
   2836 			}
   2837 			/* Do not change link mode for 100BaseFX */
   2838 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2839 				break;
   2840 
   2841 			/* Change current link mode setting */
   2842 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2843 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2844 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2845 			else
   2846 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2847 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2848 			break;
   2849 		case CTRL_EXT_LINK_MODE_GMII:
   2850 		default:
   2851 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2852 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2853 			break;
   2854 		}
   2855 
   2856 		reg &= ~CTRL_EXT_I2C_ENA;
   2857 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2858 			reg |= CTRL_EXT_I2C_ENA;
   2859 		else
   2860 			reg &= ~CTRL_EXT_I2C_ENA;
   2861 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2862 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   2863 			wm_gmii_setup_phytype(sc, 0, 0);
   2864 			wm_reset_mdicnfg_82580(sc);
   2865 		}
   2866 	} else if (sc->sc_type < WM_T_82543 ||
   2867 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2868 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2869 			aprint_error_dev(sc->sc_dev,
   2870 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2871 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2872 		}
   2873 	} else {
   2874 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2875 			aprint_error_dev(sc->sc_dev,
   2876 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2877 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2878 		}
   2879 	}
   2880 
   2881 	if (sc->sc_type >= WM_T_PCH2)
   2882 		sc->sc_flags |= WM_F_EEE;
   2883 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2884 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2885 		/* XXX: Need special handling for I354. (not yet) */
   2886 		if (sc->sc_type != WM_T_I354)
   2887 			sc->sc_flags |= WM_F_EEE;
   2888 	}
   2889 
   2890 	/*
   2891 	 * The I350 has a bug where it always strips the CRC whether
   2892 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   2893 	 */
   2894 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2895 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2896 		sc->sc_flags |= WM_F_CRC_STRIP;
   2897 
   2898 	/* Set device properties (macflags) */
   2899 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2900 
   2901 	if (sc->sc_flags != 0) {
   2902 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2903 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2904 	}
   2905 
   2906 #ifdef WM_MPSAFE
   2907 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2908 #else
   2909 	sc->sc_core_lock = NULL;
   2910 #endif
   2911 
   2912 	/* Initialize the media structures accordingly. */
   2913 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2914 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2915 	else
   2916 		wm_tbi_mediainit(sc); /* All others */
   2917 
   2918 	ifp = &sc->sc_ethercom.ec_if;
   2919 	xname = device_xname(sc->sc_dev);
   2920 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2921 	ifp->if_softc = sc;
   2922 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2923 #ifdef WM_MPSAFE
   2924 	ifp->if_extflags = IFEF_MPSAFE;
   2925 #endif
   2926 	ifp->if_ioctl = wm_ioctl;
   2927 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2928 		ifp->if_start = wm_nq_start;
   2929 		/*
   2930 		 * When the number of CPUs is one and the controller can use
   2931 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2932 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2933 		 * and the other is used for link status changing.
   2934 		 * In this situation, wm_nq_transmit() is disadvantageous
   2935 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2936 		 */
   2937 		if (wm_is_using_multiqueue(sc))
   2938 			ifp->if_transmit = wm_nq_transmit;
   2939 	} else {
   2940 		ifp->if_start = wm_start;
   2941 		/*
   2942 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2943 		 */
   2944 		if (wm_is_using_multiqueue(sc))
   2945 			ifp->if_transmit = wm_transmit;
   2946 	}
   2947 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2948 	ifp->if_init = wm_init;
   2949 	ifp->if_stop = wm_stop;
   2950 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2951 	IFQ_SET_READY(&ifp->if_snd);
   2952 
   2953 	/* Check for jumbo frame */
   2954 	switch (sc->sc_type) {
   2955 	case WM_T_82573:
   2956 		/* XXX limited to 9234 if ASPM is disabled */
   2957 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2958 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2959 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2960 		break;
   2961 	case WM_T_82571:
   2962 	case WM_T_82572:
   2963 	case WM_T_82574:
   2964 	case WM_T_82583:
   2965 	case WM_T_82575:
   2966 	case WM_T_82576:
   2967 	case WM_T_82580:
   2968 	case WM_T_I350:
   2969 	case WM_T_I354:
   2970 	case WM_T_I210:
   2971 	case WM_T_I211:
   2972 	case WM_T_80003:
   2973 	case WM_T_ICH9:
   2974 	case WM_T_ICH10:
   2975 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2976 	case WM_T_PCH_LPT:
   2977 	case WM_T_PCH_SPT:
   2978 	case WM_T_PCH_CNP:
   2979 		/* XXX limited to 9234 */
   2980 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2981 		break;
   2982 	case WM_T_PCH:
   2983 		/* XXX limited to 4096 */
   2984 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2985 		break;
   2986 	case WM_T_82542_2_0:
   2987 	case WM_T_82542_2_1:
   2988 	case WM_T_ICH8:
   2989 		/* No support for jumbo frame */
   2990 		break;
   2991 	default:
   2992 		/* ETHER_MAX_LEN_JUMBO */
   2993 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2994 		break;
   2995 	}
   2996 
   2997 	/* If we're a i82543 or greater, we can support VLANs. */
   2998 	if (sc->sc_type >= WM_T_82543) {
   2999 		sc->sc_ethercom.ec_capabilities |=
   3000 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3001 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3002 	}
   3003 
   3004 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3005 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3006 
   3007 	/*
   3008 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   3009 	 * on i82543 and later.
   3010 	 */
   3011 	if (sc->sc_type >= WM_T_82543) {
   3012 		ifp->if_capabilities |=
   3013 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3014 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3015 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3016 		    IFCAP_CSUM_TCPv6_Tx |
   3017 		    IFCAP_CSUM_UDPv6_Tx;
   3018 	}
   3019 
   3020 	/*
   3021 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3022 	 *
   3023 	 *	82541GI (8086:1076) ... no
   3024 	 *	82572EI (8086:10b9) ... yes
   3025 	 */
   3026 	if (sc->sc_type >= WM_T_82571) {
   3027 		ifp->if_capabilities |=
   3028 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3029 	}
   3030 
   3031 	/*
   3032 	 * If we're a i82544 or greater (except i82547), we can do
   3033 	 * TCP segmentation offload.
   3034 	 */
   3035 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   3036 		ifp->if_capabilities |= IFCAP_TSOv4;
   3037 	}
   3038 
   3039 	if (sc->sc_type >= WM_T_82571) {
   3040 		ifp->if_capabilities |= IFCAP_TSOv6;
   3041 	}
   3042 
   3043 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3044 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3045 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3046 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3047 
   3048 	/* Attach the interface. */
   3049 	error = if_initialize(ifp);
   3050 	if (error != 0) {
   3051 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   3052 		    error);
   3053 		return; /* Error */
   3054 	}
   3055 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3056 	ether_ifattach(ifp, enaddr);
   3057 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3058 	if_register(ifp);
   3059 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3060 	    RND_FLAG_DEFAULT);
   3061 
   3062 #ifdef WM_EVENT_COUNTERS
   3063 	/* Attach event counters. */
   3064 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3065 	    NULL, xname, "linkintr");
   3066 
   3067 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3068 	    NULL, xname, "tx_xoff");
   3069 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3070 	    NULL, xname, "tx_xon");
   3071 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3072 	    NULL, xname, "rx_xoff");
   3073 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3074 	    NULL, xname, "rx_xon");
   3075 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3076 	    NULL, xname, "rx_macctl");
   3077 #endif /* WM_EVENT_COUNTERS */
   3078 
   3079 	sc->sc_txrx_use_workqueue = false;
   3080 
   3081 	wm_init_sysctls(sc);
   3082 
   3083 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3084 		pmf_class_network_register(self, ifp);
   3085 	else
   3086 		aprint_error_dev(self, "couldn't establish power handler\n");
   3087 
   3088 	sc->sc_flags |= WM_F_ATTACHED;
   3089 out:
   3090 	return;
   3091 }
   3092 
   3093 /* The detach function (ca_detach) */
   3094 static int
   3095 wm_detach(device_t self, int flags __unused)
   3096 {
   3097 	struct wm_softc *sc = device_private(self);
   3098 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3099 	int i;
   3100 
   3101 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3102 		return 0;
   3103 
   3104 	/* Stop the interface. Callouts are stopped in it. */
   3105 	wm_stop(ifp, 1);
   3106 
   3107 	pmf_device_deregister(self);
   3108 
   3109 	sysctl_teardown(&sc->sc_sysctllog);
   3110 
   3111 #ifdef WM_EVENT_COUNTERS
   3112 	evcnt_detach(&sc->sc_ev_linkintr);
   3113 
   3114 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3115 	evcnt_detach(&sc->sc_ev_tx_xon);
   3116 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3117 	evcnt_detach(&sc->sc_ev_rx_xon);
   3118 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3119 #endif /* WM_EVENT_COUNTERS */
   3120 
   3121 	rnd_detach_source(&sc->rnd_source);
   3122 
   3123 	/* Tell the firmware about the release */
   3124 	WM_CORE_LOCK(sc);
   3125 	wm_release_manageability(sc);
   3126 	wm_release_hw_control(sc);
   3127 	wm_enable_wakeup(sc);
   3128 	WM_CORE_UNLOCK(sc);
   3129 
   3130 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3131 
   3132 	ether_ifdetach(ifp);
   3133 	if_detach(ifp);
   3134 	if_percpuq_destroy(sc->sc_ipq);
   3135 
   3136 	/* Delete all remaining media. */
   3137 	ifmedia_fini(&sc->sc_mii.mii_media);
   3138 
   3139 	/* Unload RX dmamaps and free mbufs */
   3140 	for (i = 0; i < sc->sc_nqueues; i++) {
   3141 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3142 		mutex_enter(rxq->rxq_lock);
   3143 		wm_rxdrain(rxq);
   3144 		mutex_exit(rxq->rxq_lock);
   3145 	}
   3146 	/* Must unlock here */
   3147 
   3148 	/* Disestablish the interrupt handler */
   3149 	for (i = 0; i < sc->sc_nintrs; i++) {
   3150 		if (sc->sc_ihs[i] != NULL) {
   3151 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3152 			sc->sc_ihs[i] = NULL;
   3153 		}
   3154 	}
   3155 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3156 
   3157 	/* wm_stop() ensure workqueue is stopped. */
   3158 	workqueue_destroy(sc->sc_queue_wq);
   3159 
   3160 	for (i = 0; i < sc->sc_nqueues; i++)
   3161 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3162 
   3163 	wm_free_txrx_queues(sc);
   3164 
   3165 	/* Unmap the registers */
   3166 	if (sc->sc_ss) {
   3167 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3168 		sc->sc_ss = 0;
   3169 	}
   3170 	if (sc->sc_ios) {
   3171 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3172 		sc->sc_ios = 0;
   3173 	}
   3174 	if (sc->sc_flashs) {
   3175 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3176 		sc->sc_flashs = 0;
   3177 	}
   3178 
   3179 	if (sc->sc_core_lock)
   3180 		mutex_obj_free(sc->sc_core_lock);
   3181 	if (sc->sc_ich_phymtx)
   3182 		mutex_obj_free(sc->sc_ich_phymtx);
   3183 	if (sc->sc_ich_nvmmtx)
   3184 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3185 
   3186 	return 0;
   3187 }
   3188 
   3189 static bool
   3190 wm_suspend(device_t self, const pmf_qual_t *qual)
   3191 {
   3192 	struct wm_softc *sc = device_private(self);
   3193 
   3194 	wm_release_manageability(sc);
   3195 	wm_release_hw_control(sc);
   3196 	wm_enable_wakeup(sc);
   3197 
   3198 	return true;
   3199 }
   3200 
   3201 static bool
   3202 wm_resume(device_t self, const pmf_qual_t *qual)
   3203 {
   3204 	struct wm_softc *sc = device_private(self);
   3205 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3206 	pcireg_t reg;
   3207 	char buf[256];
   3208 
   3209 	reg = CSR_READ(sc, WMREG_WUS);
   3210 	if (reg != 0) {
   3211 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3212 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3213 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3214 	}
   3215 
   3216 	if (sc->sc_type >= WM_T_PCH2)
   3217 		wm_resume_workarounds_pchlan(sc);
   3218 	if ((ifp->if_flags & IFF_UP) == 0) {
   3219 		wm_reset(sc);
   3220 		/* Non-AMT based hardware can now take control from firmware */
   3221 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3222 			wm_get_hw_control(sc);
   3223 		wm_init_manageability(sc);
   3224 	} else {
   3225 		/*
   3226 		 * We called pmf_class_network_register(), so if_init() is
   3227 		 * automatically called when IFF_UP. wm_reset(),
   3228 		 * wm_get_hw_control() and wm_init_manageability() are called
   3229 		 * via wm_init().
   3230 		 */
   3231 	}
   3232 
   3233 	return true;
   3234 }
   3235 
   3236 /*
   3237  * wm_watchdog:		[ifnet interface function]
   3238  *
   3239  *	Watchdog timer handler.
   3240  */
   3241 static void
   3242 wm_watchdog(struct ifnet *ifp)
   3243 {
   3244 	int qid;
   3245 	struct wm_softc *sc = ifp->if_softc;
   3246 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3247 
   3248 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3249 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3250 
   3251 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3252 	}
   3253 
   3254 	/* IF any of queues hanged up, reset the interface. */
   3255 	if (hang_queue != 0) {
   3256 		(void)wm_init(ifp);
   3257 
   3258 		/*
   3259 		 * There are still some upper layer processing which call
   3260 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3261 		 */
   3262 		/* Try to get more packets going. */
   3263 		ifp->if_start(ifp);
   3264 	}
   3265 }
   3266 
   3267 
   3268 static void
   3269 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3270 {
   3271 
   3272 	mutex_enter(txq->txq_lock);
   3273 	if (txq->txq_sending &&
   3274 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3275 		wm_watchdog_txq_locked(ifp, txq, hang);
   3276 
   3277 	mutex_exit(txq->txq_lock);
   3278 }
   3279 
   3280 static void
   3281 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3282     uint16_t *hang)
   3283 {
   3284 	struct wm_softc *sc = ifp->if_softc;
   3285 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3286 
   3287 	KASSERT(mutex_owned(txq->txq_lock));
   3288 
   3289 	/*
   3290 	 * Since we're using delayed interrupts, sweep up
   3291 	 * before we report an error.
   3292 	 */
   3293 	wm_txeof(txq, UINT_MAX);
   3294 
   3295 	if (txq->txq_sending)
   3296 		*hang |= __BIT(wmq->wmq_id);
   3297 
   3298 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3299 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3300 		    device_xname(sc->sc_dev));
   3301 	} else {
   3302 #ifdef WM_DEBUG
   3303 		int i, j;
   3304 		struct wm_txsoft *txs;
   3305 #endif
   3306 		log(LOG_ERR,
   3307 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3308 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3309 		    txq->txq_next);
   3310 		if_statinc(ifp, if_oerrors);
   3311 #ifdef WM_DEBUG
   3312 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3313 		    i = WM_NEXTTXS(txq, i)) {
   3314 			txs = &txq->txq_soft[i];
   3315 			printf("txs %d tx %d -> %d\n",
   3316 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3317 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3318 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3319 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3320 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3321 					printf("\t %#08x%08x\n",
   3322 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3323 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3324 				} else {
   3325 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3326 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3327 					    txq->txq_descs[j].wtx_addr.wa_low);
   3328 					printf("\t %#04x%02x%02x%08x\n",
   3329 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3330 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3331 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3332 					    txq->txq_descs[j].wtx_cmdlen);
   3333 				}
   3334 				if (j == txs->txs_lastdesc)
   3335 					break;
   3336 			}
   3337 		}
   3338 #endif
   3339 	}
   3340 }
   3341 
   3342 /*
   3343  * wm_tick:
   3344  *
   3345  *	One second timer, used to check link status, sweep up
   3346  *	completed transmit jobs, etc.
   3347  */
   3348 static void
   3349 wm_tick(void *arg)
   3350 {
   3351 	struct wm_softc *sc = arg;
   3352 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3353 #ifndef WM_MPSAFE
   3354 	int s = splnet();
   3355 #endif
   3356 
   3357 	WM_CORE_LOCK(sc);
   3358 
   3359 	if (sc->sc_core_stopping) {
   3360 		WM_CORE_UNLOCK(sc);
   3361 #ifndef WM_MPSAFE
   3362 		splx(s);
   3363 #endif
   3364 		return;
   3365 	}
   3366 
   3367 	if (sc->sc_type >= WM_T_82542_2_1) {
   3368 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3369 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3370 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3371 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3372 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3373 	}
   3374 
   3375 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3376 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
   3377 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
   3378 	    + CSR_READ(sc, WMREG_CRCERRS)
   3379 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3380 	    + CSR_READ(sc, WMREG_SYMERRC)
   3381 	    + CSR_READ(sc, WMREG_RXERRC)
   3382 	    + CSR_READ(sc, WMREG_SEC)
   3383 	    + CSR_READ(sc, WMREG_CEXTERR)
   3384 	    + CSR_READ(sc, WMREG_RLEC));
   3385 	/*
   3386 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3387 	 * memory. It does not mean the number of dropped packet. Because
   3388 	 * ethernet controller can receive packets in such case if there is
   3389 	 * space in phy's FIFO.
   3390 	 *
   3391 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3392 	 * own EVCNT instead of if_iqdrops.
   3393 	 */
   3394 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
   3395 	IF_STAT_PUTREF(ifp);
   3396 
   3397 	if (sc->sc_flags & WM_F_HAS_MII)
   3398 		mii_tick(&sc->sc_mii);
   3399 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3400 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3401 		wm_serdes_tick(sc);
   3402 	else
   3403 		wm_tbi_tick(sc);
   3404 
   3405 	WM_CORE_UNLOCK(sc);
   3406 
   3407 	wm_watchdog(ifp);
   3408 
   3409 	callout_schedule(&sc->sc_tick_ch, hz);
   3410 }
   3411 
   3412 static int
   3413 wm_ifflags_cb(struct ethercom *ec)
   3414 {
   3415 	struct ifnet *ifp = &ec->ec_if;
   3416 	struct wm_softc *sc = ifp->if_softc;
   3417 	u_short iffchange;
   3418 	int ecchange;
   3419 	bool needreset = false;
   3420 	int rc = 0;
   3421 
   3422 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3423 		device_xname(sc->sc_dev), __func__));
   3424 
   3425 	WM_CORE_LOCK(sc);
   3426 
   3427 	/*
   3428 	 * Check for if_flags.
   3429 	 * Main usage is to prevent linkdown when opening bpf.
   3430 	 */
   3431 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3432 	sc->sc_if_flags = ifp->if_flags;
   3433 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3434 		needreset = true;
   3435 		goto ec;
   3436 	}
   3437 
   3438 	/* iff related updates */
   3439 	if ((iffchange & IFF_PROMISC) != 0)
   3440 		wm_set_filter(sc);
   3441 
   3442 	wm_set_vlan(sc);
   3443 
   3444 ec:
   3445 	/* Check for ec_capenable. */
   3446 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3447 	sc->sc_ec_capenable = ec->ec_capenable;
   3448 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3449 		needreset = true;
   3450 		goto out;
   3451 	}
   3452 
   3453 	/* ec related updates */
   3454 	wm_set_eee(sc);
   3455 
   3456 out:
   3457 	if (needreset)
   3458 		rc = ENETRESET;
   3459 	WM_CORE_UNLOCK(sc);
   3460 
   3461 	return rc;
   3462 }
   3463 
   3464 /*
   3465  * wm_ioctl:		[ifnet interface function]
   3466  *
   3467  *	Handle control requests from the operator.
   3468  */
   3469 static int
   3470 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3471 {
   3472 	struct wm_softc *sc = ifp->if_softc;
   3473 	struct ifreq *ifr = (struct ifreq *)data;
   3474 	struct ifaddr *ifa = (struct ifaddr *)data;
   3475 	struct sockaddr_dl *sdl;
   3476 	int s, error;
   3477 
   3478 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3479 		device_xname(sc->sc_dev), __func__));
   3480 
   3481 #ifndef WM_MPSAFE
   3482 	s = splnet();
   3483 #endif
   3484 	switch (cmd) {
   3485 	case SIOCSIFMEDIA:
   3486 		WM_CORE_LOCK(sc);
   3487 		/* Flow control requires full-duplex mode. */
   3488 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3489 		    (ifr->ifr_media & IFM_FDX) == 0)
   3490 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3491 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3492 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3493 				/* We can do both TXPAUSE and RXPAUSE. */
   3494 				ifr->ifr_media |=
   3495 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3496 			}
   3497 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3498 		}
   3499 		WM_CORE_UNLOCK(sc);
   3500 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3501 		break;
   3502 	case SIOCINITIFADDR:
   3503 		WM_CORE_LOCK(sc);
   3504 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3505 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3506 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3507 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3508 			/* Unicast address is the first multicast entry */
   3509 			wm_set_filter(sc);
   3510 			error = 0;
   3511 			WM_CORE_UNLOCK(sc);
   3512 			break;
   3513 		}
   3514 		WM_CORE_UNLOCK(sc);
   3515 		/*FALLTHROUGH*/
   3516 	default:
   3517 #ifdef WM_MPSAFE
   3518 		s = splnet();
   3519 #endif
   3520 		/* It may call wm_start, so unlock here */
   3521 		error = ether_ioctl(ifp, cmd, data);
   3522 #ifdef WM_MPSAFE
   3523 		splx(s);
   3524 #endif
   3525 		if (error != ENETRESET)
   3526 			break;
   3527 
   3528 		error = 0;
   3529 
   3530 		if (cmd == SIOCSIFCAP)
   3531 			error = (*ifp->if_init)(ifp);
   3532 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3533 			;
   3534 		else if (ifp->if_flags & IFF_RUNNING) {
   3535 			/*
   3536 			 * Multicast list has changed; set the hardware filter
   3537 			 * accordingly.
   3538 			 */
   3539 			WM_CORE_LOCK(sc);
   3540 			wm_set_filter(sc);
   3541 			WM_CORE_UNLOCK(sc);
   3542 		}
   3543 		break;
   3544 	}
   3545 
   3546 #ifndef WM_MPSAFE
   3547 	splx(s);
   3548 #endif
   3549 	return error;
   3550 }
   3551 
   3552 /* MAC address related */
   3553 
   3554 /*
   3555  * Get the offset of MAC address and return it.
   3556  * If error occured, use offset 0.
   3557  */
   3558 static uint16_t
   3559 wm_check_alt_mac_addr(struct wm_softc *sc)
   3560 {
   3561 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3562 	uint16_t offset = NVM_OFF_MACADDR;
   3563 
   3564 	/* Try to read alternative MAC address pointer */
   3565 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3566 		return 0;
   3567 
   3568 	/* Check pointer if it's valid or not. */
   3569 	if ((offset == 0x0000) || (offset == 0xffff))
   3570 		return 0;
   3571 
   3572 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3573 	/*
   3574 	 * Check whether alternative MAC address is valid or not.
   3575 	 * Some cards have non 0xffff pointer but those don't use
   3576 	 * alternative MAC address in reality.
   3577 	 *
   3578 	 * Check whether the broadcast bit is set or not.
   3579 	 */
   3580 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3581 		if (((myea[0] & 0xff) & 0x01) == 0)
   3582 			return offset; /* Found */
   3583 
   3584 	/* Not found */
   3585 	return 0;
   3586 }
   3587 
   3588 static int
   3589 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3590 {
   3591 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3592 	uint16_t offset = NVM_OFF_MACADDR;
   3593 	int do_invert = 0;
   3594 
   3595 	switch (sc->sc_type) {
   3596 	case WM_T_82580:
   3597 	case WM_T_I350:
   3598 	case WM_T_I354:
   3599 		/* EEPROM Top Level Partitioning */
   3600 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3601 		break;
   3602 	case WM_T_82571:
   3603 	case WM_T_82575:
   3604 	case WM_T_82576:
   3605 	case WM_T_80003:
   3606 	case WM_T_I210:
   3607 	case WM_T_I211:
   3608 		offset = wm_check_alt_mac_addr(sc);
   3609 		if (offset == 0)
   3610 			if ((sc->sc_funcid & 0x01) == 1)
   3611 				do_invert = 1;
   3612 		break;
   3613 	default:
   3614 		if ((sc->sc_funcid & 0x01) == 1)
   3615 			do_invert = 1;
   3616 		break;
   3617 	}
   3618 
   3619 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3620 		goto bad;
   3621 
   3622 	enaddr[0] = myea[0] & 0xff;
   3623 	enaddr[1] = myea[0] >> 8;
   3624 	enaddr[2] = myea[1] & 0xff;
   3625 	enaddr[3] = myea[1] >> 8;
   3626 	enaddr[4] = myea[2] & 0xff;
   3627 	enaddr[5] = myea[2] >> 8;
   3628 
   3629 	/*
   3630 	 * Toggle the LSB of the MAC address on the second port
   3631 	 * of some dual port cards.
   3632 	 */
   3633 	if (do_invert != 0)
   3634 		enaddr[5] ^= 1;
   3635 
   3636 	return 0;
   3637 
   3638  bad:
   3639 	return -1;
   3640 }
   3641 
   3642 /*
   3643  * wm_set_ral:
   3644  *
   3645  *	Set an entery in the receive address list.
   3646  */
   3647 static void
   3648 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3649 {
   3650 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3651 	uint32_t wlock_mac;
   3652 	int rv;
   3653 
   3654 	if (enaddr != NULL) {
   3655 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3656 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3657 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3658 		ral_hi |= RAL_AV;
   3659 	} else {
   3660 		ral_lo = 0;
   3661 		ral_hi = 0;
   3662 	}
   3663 
   3664 	switch (sc->sc_type) {
   3665 	case WM_T_82542_2_0:
   3666 	case WM_T_82542_2_1:
   3667 	case WM_T_82543:
   3668 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3669 		CSR_WRITE_FLUSH(sc);
   3670 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3671 		CSR_WRITE_FLUSH(sc);
   3672 		break;
   3673 	case WM_T_PCH2:
   3674 	case WM_T_PCH_LPT:
   3675 	case WM_T_PCH_SPT:
   3676 	case WM_T_PCH_CNP:
   3677 		if (idx == 0) {
   3678 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3679 			CSR_WRITE_FLUSH(sc);
   3680 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3681 			CSR_WRITE_FLUSH(sc);
   3682 			return;
   3683 		}
   3684 		if (sc->sc_type != WM_T_PCH2) {
   3685 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3686 			    FWSM_WLOCK_MAC);
   3687 			addrl = WMREG_SHRAL(idx - 1);
   3688 			addrh = WMREG_SHRAH(idx - 1);
   3689 		} else {
   3690 			wlock_mac = 0;
   3691 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3692 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3693 		}
   3694 
   3695 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3696 			rv = wm_get_swflag_ich8lan(sc);
   3697 			if (rv != 0)
   3698 				return;
   3699 			CSR_WRITE(sc, addrl, ral_lo);
   3700 			CSR_WRITE_FLUSH(sc);
   3701 			CSR_WRITE(sc, addrh, ral_hi);
   3702 			CSR_WRITE_FLUSH(sc);
   3703 			wm_put_swflag_ich8lan(sc);
   3704 		}
   3705 
   3706 		break;
   3707 	default:
   3708 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3709 		CSR_WRITE_FLUSH(sc);
   3710 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3711 		CSR_WRITE_FLUSH(sc);
   3712 		break;
   3713 	}
   3714 }
   3715 
   3716 /*
   3717  * wm_mchash:
   3718  *
   3719  *	Compute the hash of the multicast address for the 4096-bit
   3720  *	multicast filter.
   3721  */
   3722 static uint32_t
   3723 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3724 {
   3725 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3726 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3727 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3728 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3729 	uint32_t hash;
   3730 
   3731 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3732 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3733 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3734 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3735 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3736 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3737 		return (hash & 0x3ff);
   3738 	}
   3739 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3740 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3741 
   3742 	return (hash & 0xfff);
   3743 }
   3744 
   3745 /*
   3746  *
   3747  *
   3748  */
   3749 static int
   3750 wm_rar_count(struct wm_softc *sc)
   3751 {
   3752 	int size;
   3753 
   3754 	switch (sc->sc_type) {
   3755 	case WM_T_ICH8:
   3756 		size = WM_RAL_TABSIZE_ICH8 -1;
   3757 		break;
   3758 	case WM_T_ICH9:
   3759 	case WM_T_ICH10:
   3760 	case WM_T_PCH:
   3761 		size = WM_RAL_TABSIZE_ICH8;
   3762 		break;
   3763 	case WM_T_PCH2:
   3764 		size = WM_RAL_TABSIZE_PCH2;
   3765 		break;
   3766 	case WM_T_PCH_LPT:
   3767 	case WM_T_PCH_SPT:
   3768 	case WM_T_PCH_CNP:
   3769 		size = WM_RAL_TABSIZE_PCH_LPT;
   3770 		break;
   3771 	case WM_T_82575:
   3772 	case WM_T_I210:
   3773 	case WM_T_I211:
   3774 		size = WM_RAL_TABSIZE_82575;
   3775 		break;
   3776 	case WM_T_82576:
   3777 	case WM_T_82580:
   3778 		size = WM_RAL_TABSIZE_82576;
   3779 		break;
   3780 	case WM_T_I350:
   3781 	case WM_T_I354:
   3782 		size = WM_RAL_TABSIZE_I350;
   3783 		break;
   3784 	default:
   3785 		size = WM_RAL_TABSIZE;
   3786 	}
   3787 
   3788 	return size;
   3789 }
   3790 
   3791 /*
   3792  * wm_set_filter:
   3793  *
   3794  *	Set up the receive filter.
   3795  */
   3796 static void
   3797 wm_set_filter(struct wm_softc *sc)
   3798 {
   3799 	struct ethercom *ec = &sc->sc_ethercom;
   3800 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3801 	struct ether_multi *enm;
   3802 	struct ether_multistep step;
   3803 	bus_addr_t mta_reg;
   3804 	uint32_t hash, reg, bit;
   3805 	int i, size, ralmax;
   3806 
   3807 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3808 		device_xname(sc->sc_dev), __func__));
   3809 
   3810 	if (sc->sc_type >= WM_T_82544)
   3811 		mta_reg = WMREG_CORDOVA_MTA;
   3812 	else
   3813 		mta_reg = WMREG_MTA;
   3814 
   3815 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3816 
   3817 	if (ifp->if_flags & IFF_BROADCAST)
   3818 		sc->sc_rctl |= RCTL_BAM;
   3819 	if (ifp->if_flags & IFF_PROMISC) {
   3820 		sc->sc_rctl |= RCTL_UPE;
   3821 		ETHER_LOCK(ec);
   3822 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3823 		ETHER_UNLOCK(ec);
   3824 		goto allmulti;
   3825 	}
   3826 
   3827 	/*
   3828 	 * Set the station address in the first RAL slot, and
   3829 	 * clear the remaining slots.
   3830 	 */
   3831 	size = wm_rar_count(sc);
   3832 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3833 
   3834 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3835 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3836 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3837 		switch (i) {
   3838 		case 0:
   3839 			/* We can use all entries */
   3840 			ralmax = size;
   3841 			break;
   3842 		case 1:
   3843 			/* Only RAR[0] */
   3844 			ralmax = 1;
   3845 			break;
   3846 		default:
   3847 			/* Available SHRA + RAR[0] */
   3848 			ralmax = i + 1;
   3849 		}
   3850 	} else
   3851 		ralmax = size;
   3852 	for (i = 1; i < size; i++) {
   3853 		if (i < ralmax)
   3854 			wm_set_ral(sc, NULL, i);
   3855 	}
   3856 
   3857 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3858 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3859 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3860 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3861 		size = WM_ICH8_MC_TABSIZE;
   3862 	else
   3863 		size = WM_MC_TABSIZE;
   3864 	/* Clear out the multicast table. */
   3865 	for (i = 0; i < size; i++) {
   3866 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3867 		CSR_WRITE_FLUSH(sc);
   3868 	}
   3869 
   3870 	ETHER_LOCK(ec);
   3871 	ETHER_FIRST_MULTI(step, ec, enm);
   3872 	while (enm != NULL) {
   3873 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3874 			ec->ec_flags |= ETHER_F_ALLMULTI;
   3875 			ETHER_UNLOCK(ec);
   3876 			/*
   3877 			 * We must listen to a range of multicast addresses.
   3878 			 * For now, just accept all multicasts, rather than
   3879 			 * trying to set only those filter bits needed to match
   3880 			 * the range.  (At this time, the only use of address
   3881 			 * ranges is for IP multicast routing, for which the
   3882 			 * range is big enough to require all bits set.)
   3883 			 */
   3884 			goto allmulti;
   3885 		}
   3886 
   3887 		hash = wm_mchash(sc, enm->enm_addrlo);
   3888 
   3889 		reg = (hash >> 5);
   3890 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3891 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3892 		    || (sc->sc_type == WM_T_PCH2)
   3893 		    || (sc->sc_type == WM_T_PCH_LPT)
   3894 		    || (sc->sc_type == WM_T_PCH_SPT)
   3895 		    || (sc->sc_type == WM_T_PCH_CNP))
   3896 			reg &= 0x1f;
   3897 		else
   3898 			reg &= 0x7f;
   3899 		bit = hash & 0x1f;
   3900 
   3901 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3902 		hash |= 1U << bit;
   3903 
   3904 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3905 			/*
   3906 			 * 82544 Errata 9: Certain register cannot be written
   3907 			 * with particular alignments in PCI-X bus operation
   3908 			 * (FCAH, MTA and VFTA).
   3909 			 */
   3910 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3911 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3912 			CSR_WRITE_FLUSH(sc);
   3913 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3914 			CSR_WRITE_FLUSH(sc);
   3915 		} else {
   3916 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3917 			CSR_WRITE_FLUSH(sc);
   3918 		}
   3919 
   3920 		ETHER_NEXT_MULTI(step, enm);
   3921 	}
   3922 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   3923 	ETHER_UNLOCK(ec);
   3924 
   3925 	goto setit;
   3926 
   3927  allmulti:
   3928 	sc->sc_rctl |= RCTL_MPE;
   3929 
   3930  setit:
   3931 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3932 }
   3933 
   3934 /* Reset and init related */
   3935 
   3936 static void
   3937 wm_set_vlan(struct wm_softc *sc)
   3938 {
   3939 
   3940 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3941 		device_xname(sc->sc_dev), __func__));
   3942 
   3943 	/* Deal with VLAN enables. */
   3944 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3945 		sc->sc_ctrl |= CTRL_VME;
   3946 	else
   3947 		sc->sc_ctrl &= ~CTRL_VME;
   3948 
   3949 	/* Write the control registers. */
   3950 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3951 }
   3952 
   3953 static void
   3954 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3955 {
   3956 	uint32_t gcr;
   3957 	pcireg_t ctrl2;
   3958 
   3959 	gcr = CSR_READ(sc, WMREG_GCR);
   3960 
   3961 	/* Only take action if timeout value is defaulted to 0 */
   3962 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3963 		goto out;
   3964 
   3965 	if ((gcr & GCR_CAP_VER2) == 0) {
   3966 		gcr |= GCR_CMPL_TMOUT_10MS;
   3967 		goto out;
   3968 	}
   3969 
   3970 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3971 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3972 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3973 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3974 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3975 
   3976 out:
   3977 	/* Disable completion timeout resend */
   3978 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3979 
   3980 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3981 }
   3982 
   3983 void
   3984 wm_get_auto_rd_done(struct wm_softc *sc)
   3985 {
   3986 	int i;
   3987 
   3988 	/* wait for eeprom to reload */
   3989 	switch (sc->sc_type) {
   3990 	case WM_T_82571:
   3991 	case WM_T_82572:
   3992 	case WM_T_82573:
   3993 	case WM_T_82574:
   3994 	case WM_T_82583:
   3995 	case WM_T_82575:
   3996 	case WM_T_82576:
   3997 	case WM_T_82580:
   3998 	case WM_T_I350:
   3999 	case WM_T_I354:
   4000 	case WM_T_I210:
   4001 	case WM_T_I211:
   4002 	case WM_T_80003:
   4003 	case WM_T_ICH8:
   4004 	case WM_T_ICH9:
   4005 		for (i = 0; i < 10; i++) {
   4006 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4007 				break;
   4008 			delay(1000);
   4009 		}
   4010 		if (i == 10) {
   4011 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4012 			    "complete\n", device_xname(sc->sc_dev));
   4013 		}
   4014 		break;
   4015 	default:
   4016 		break;
   4017 	}
   4018 }
   4019 
   4020 void
   4021 wm_lan_init_done(struct wm_softc *sc)
   4022 {
   4023 	uint32_t reg = 0;
   4024 	int i;
   4025 
   4026 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4027 		device_xname(sc->sc_dev), __func__));
   4028 
   4029 	/* Wait for eeprom to reload */
   4030 	switch (sc->sc_type) {
   4031 	case WM_T_ICH10:
   4032 	case WM_T_PCH:
   4033 	case WM_T_PCH2:
   4034 	case WM_T_PCH_LPT:
   4035 	case WM_T_PCH_SPT:
   4036 	case WM_T_PCH_CNP:
   4037 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4038 			reg = CSR_READ(sc, WMREG_STATUS);
   4039 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4040 				break;
   4041 			delay(100);
   4042 		}
   4043 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4044 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4045 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4046 		}
   4047 		break;
   4048 	default:
   4049 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4050 		    __func__);
   4051 		break;
   4052 	}
   4053 
   4054 	reg &= ~STATUS_LAN_INIT_DONE;
   4055 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4056 }
   4057 
   4058 void
   4059 wm_get_cfg_done(struct wm_softc *sc)
   4060 {
   4061 	int mask;
   4062 	uint32_t reg;
   4063 	int i;
   4064 
   4065 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4066 		device_xname(sc->sc_dev), __func__));
   4067 
   4068 	/* Wait for eeprom to reload */
   4069 	switch (sc->sc_type) {
   4070 	case WM_T_82542_2_0:
   4071 	case WM_T_82542_2_1:
   4072 		/* null */
   4073 		break;
   4074 	case WM_T_82543:
   4075 	case WM_T_82544:
   4076 	case WM_T_82540:
   4077 	case WM_T_82545:
   4078 	case WM_T_82545_3:
   4079 	case WM_T_82546:
   4080 	case WM_T_82546_3:
   4081 	case WM_T_82541:
   4082 	case WM_T_82541_2:
   4083 	case WM_T_82547:
   4084 	case WM_T_82547_2:
   4085 	case WM_T_82573:
   4086 	case WM_T_82574:
   4087 	case WM_T_82583:
   4088 		/* generic */
   4089 		delay(10*1000);
   4090 		break;
   4091 	case WM_T_80003:
   4092 	case WM_T_82571:
   4093 	case WM_T_82572:
   4094 	case WM_T_82575:
   4095 	case WM_T_82576:
   4096 	case WM_T_82580:
   4097 	case WM_T_I350:
   4098 	case WM_T_I354:
   4099 	case WM_T_I210:
   4100 	case WM_T_I211:
   4101 		if (sc->sc_type == WM_T_82571) {
   4102 			/* Only 82571 shares port 0 */
   4103 			mask = EEMNGCTL_CFGDONE_0;
   4104 		} else
   4105 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4106 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4107 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4108 				break;
   4109 			delay(1000);
   4110 		}
   4111 		if (i >= WM_PHY_CFG_TIMEOUT)
   4112 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   4113 				device_xname(sc->sc_dev), __func__));
   4114 		break;
   4115 	case WM_T_ICH8:
   4116 	case WM_T_ICH9:
   4117 	case WM_T_ICH10:
   4118 	case WM_T_PCH:
   4119 	case WM_T_PCH2:
   4120 	case WM_T_PCH_LPT:
   4121 	case WM_T_PCH_SPT:
   4122 	case WM_T_PCH_CNP:
   4123 		delay(10*1000);
   4124 		if (sc->sc_type >= WM_T_ICH10)
   4125 			wm_lan_init_done(sc);
   4126 		else
   4127 			wm_get_auto_rd_done(sc);
   4128 
   4129 		/* Clear PHY Reset Asserted bit */
   4130 		reg = CSR_READ(sc, WMREG_STATUS);
   4131 		if ((reg & STATUS_PHYRA) != 0)
   4132 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4133 		break;
   4134 	default:
   4135 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4136 		    __func__);
   4137 		break;
   4138 	}
   4139 }
   4140 
   4141 int
   4142 wm_phy_post_reset(struct wm_softc *sc)
   4143 {
   4144 	device_t dev = sc->sc_dev;
   4145 	uint16_t reg;
   4146 	int rv = 0;
   4147 
   4148 	/* This function is only for ICH8 and newer. */
   4149 	if (sc->sc_type < WM_T_ICH8)
   4150 		return 0;
   4151 
   4152 	if (wm_phy_resetisblocked(sc)) {
   4153 		/* XXX */
   4154 		device_printf(dev, "PHY is blocked\n");
   4155 		return -1;
   4156 	}
   4157 
   4158 	/* Allow time for h/w to get to quiescent state after reset */
   4159 	delay(10*1000);
   4160 
   4161 	/* Perform any necessary post-reset workarounds */
   4162 	if (sc->sc_type == WM_T_PCH)
   4163 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4164 	else if (sc->sc_type == WM_T_PCH2)
   4165 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4166 	if (rv != 0)
   4167 		return rv;
   4168 
   4169 	/* Clear the host wakeup bit after lcd reset */
   4170 	if (sc->sc_type >= WM_T_PCH) {
   4171 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4172 		reg &= ~BM_WUC_HOST_WU_BIT;
   4173 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4174 	}
   4175 
   4176 	/* Configure the LCD with the extended configuration region in NVM */
   4177 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4178 		return rv;
   4179 
   4180 	/* Configure the LCD with the OEM bits in NVM */
   4181 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4182 
   4183 	if (sc->sc_type == WM_T_PCH2) {
   4184 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4185 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4186 			delay(10 * 1000);
   4187 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4188 		}
   4189 		/* Set EEE LPI Update Timer to 200usec */
   4190 		rv = sc->phy.acquire(sc);
   4191 		if (rv)
   4192 			return rv;
   4193 		rv = wm_write_emi_reg_locked(dev,
   4194 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4195 		sc->phy.release(sc);
   4196 	}
   4197 
   4198 	return rv;
   4199 }
   4200 
   4201 /* Only for PCH and newer */
   4202 static int
   4203 wm_write_smbus_addr(struct wm_softc *sc)
   4204 {
   4205 	uint32_t strap, freq;
   4206 	uint16_t phy_data;
   4207 	int rv;
   4208 
   4209 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4210 		device_xname(sc->sc_dev), __func__));
   4211 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4212 
   4213 	strap = CSR_READ(sc, WMREG_STRAP);
   4214 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4215 
   4216 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4217 	if (rv != 0)
   4218 		return -1;
   4219 
   4220 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4221 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4222 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4223 
   4224 	if (sc->sc_phytype == WMPHY_I217) {
   4225 		/* Restore SMBus frequency */
   4226 		if (freq --) {
   4227 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4228 			    | HV_SMB_ADDR_FREQ_HIGH);
   4229 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4230 			    HV_SMB_ADDR_FREQ_LOW);
   4231 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4232 			    HV_SMB_ADDR_FREQ_HIGH);
   4233 		} else
   4234 			DPRINTF(WM_DEBUG_INIT,
   4235 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4236 				device_xname(sc->sc_dev), __func__));
   4237 	}
   4238 
   4239 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4240 	    phy_data);
   4241 }
   4242 
   4243 static int
   4244 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4245 {
   4246 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4247 	uint16_t phy_page = 0;
   4248 	int rv = 0;
   4249 
   4250 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4251 		device_xname(sc->sc_dev), __func__));
   4252 
   4253 	switch (sc->sc_type) {
   4254 	case WM_T_ICH8:
   4255 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4256 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4257 			return 0;
   4258 
   4259 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4260 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4261 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4262 			break;
   4263 		}
   4264 		/* FALLTHROUGH */
   4265 	case WM_T_PCH:
   4266 	case WM_T_PCH2:
   4267 	case WM_T_PCH_LPT:
   4268 	case WM_T_PCH_SPT:
   4269 	case WM_T_PCH_CNP:
   4270 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4271 		break;
   4272 	default:
   4273 		return 0;
   4274 	}
   4275 
   4276 	if ((rv = sc->phy.acquire(sc)) != 0)
   4277 		return rv;
   4278 
   4279 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4280 	if ((reg & sw_cfg_mask) == 0)
   4281 		goto release;
   4282 
   4283 	/*
   4284 	 * Make sure HW does not configure LCD from PHY extended configuration
   4285 	 * before SW configuration
   4286 	 */
   4287 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4288 	if ((sc->sc_type < WM_T_PCH2)
   4289 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4290 		goto release;
   4291 
   4292 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4293 		device_xname(sc->sc_dev), __func__));
   4294 	/* word_addr is in DWORD */
   4295 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4296 
   4297 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4298 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4299 	if (cnf_size == 0)
   4300 		goto release;
   4301 
   4302 	if (((sc->sc_type == WM_T_PCH)
   4303 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4304 	    || (sc->sc_type > WM_T_PCH)) {
   4305 		/*
   4306 		 * HW configures the SMBus address and LEDs when the OEM and
   4307 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4308 		 * are cleared, SW will configure them instead.
   4309 		 */
   4310 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4311 			device_xname(sc->sc_dev), __func__));
   4312 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4313 			goto release;
   4314 
   4315 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4316 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4317 		    (uint16_t)reg);
   4318 		if (rv != 0)
   4319 			goto release;
   4320 	}
   4321 
   4322 	/* Configure LCD from extended configuration region. */
   4323 	for (i = 0; i < cnf_size; i++) {
   4324 		uint16_t reg_data, reg_addr;
   4325 
   4326 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4327 			goto release;
   4328 
   4329 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4330 			goto release;
   4331 
   4332 		if (reg_addr == IGPHY_PAGE_SELECT)
   4333 			phy_page = reg_data;
   4334 
   4335 		reg_addr &= IGPHY_MAXREGADDR;
   4336 		reg_addr |= phy_page;
   4337 
   4338 		KASSERT(sc->phy.writereg_locked != NULL);
   4339 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4340 		    reg_data);
   4341 	}
   4342 
   4343 release:
   4344 	sc->phy.release(sc);
   4345 	return rv;
   4346 }
   4347 
   4348 /*
   4349  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4350  *  @sc:       pointer to the HW structure
   4351  *  @d0_state: boolean if entering d0 or d3 device state
   4352  *
   4353  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4354  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4355  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4356  */
   4357 int
   4358 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4359 {
   4360 	uint32_t mac_reg;
   4361 	uint16_t oem_reg;
   4362 	int rv;
   4363 
   4364 	if (sc->sc_type < WM_T_PCH)
   4365 		return 0;
   4366 
   4367 	rv = sc->phy.acquire(sc);
   4368 	if (rv != 0)
   4369 		return rv;
   4370 
   4371 	if (sc->sc_type == WM_T_PCH) {
   4372 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4373 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4374 			goto release;
   4375 	}
   4376 
   4377 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4378 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4379 		goto release;
   4380 
   4381 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4382 
   4383 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4384 	if (rv != 0)
   4385 		goto release;
   4386 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4387 
   4388 	if (d0_state) {
   4389 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4390 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4391 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4392 			oem_reg |= HV_OEM_BITS_LPLU;
   4393 	} else {
   4394 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4395 		    != 0)
   4396 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4397 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4398 		    != 0)
   4399 			oem_reg |= HV_OEM_BITS_LPLU;
   4400 	}
   4401 
   4402 	/* Set Restart auto-neg to activate the bits */
   4403 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4404 	    && (wm_phy_resetisblocked(sc) == false))
   4405 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4406 
   4407 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4408 
   4409 release:
   4410 	sc->phy.release(sc);
   4411 
   4412 	return rv;
   4413 }
   4414 
   4415 /* Init hardware bits */
   4416 void
   4417 wm_initialize_hardware_bits(struct wm_softc *sc)
   4418 {
   4419 	uint32_t tarc0, tarc1, reg;
   4420 
   4421 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4422 		device_xname(sc->sc_dev), __func__));
   4423 
   4424 	/* For 82571 variant, 80003 and ICHs */
   4425 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4426 	    || (sc->sc_type >= WM_T_80003)) {
   4427 
   4428 		/* Transmit Descriptor Control 0 */
   4429 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4430 		reg |= TXDCTL_COUNT_DESC;
   4431 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4432 
   4433 		/* Transmit Descriptor Control 1 */
   4434 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4435 		reg |= TXDCTL_COUNT_DESC;
   4436 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4437 
   4438 		/* TARC0 */
   4439 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4440 		switch (sc->sc_type) {
   4441 		case WM_T_82571:
   4442 		case WM_T_82572:
   4443 		case WM_T_82573:
   4444 		case WM_T_82574:
   4445 		case WM_T_82583:
   4446 		case WM_T_80003:
   4447 			/* Clear bits 30..27 */
   4448 			tarc0 &= ~__BITS(30, 27);
   4449 			break;
   4450 		default:
   4451 			break;
   4452 		}
   4453 
   4454 		switch (sc->sc_type) {
   4455 		case WM_T_82571:
   4456 		case WM_T_82572:
   4457 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4458 
   4459 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4460 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4461 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4462 			/* 8257[12] Errata No.7 */
   4463 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4464 
   4465 			/* TARC1 bit 28 */
   4466 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4467 				tarc1 &= ~__BIT(28);
   4468 			else
   4469 				tarc1 |= __BIT(28);
   4470 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4471 
   4472 			/*
   4473 			 * 8257[12] Errata No.13
   4474 			 * Disable Dyamic Clock Gating.
   4475 			 */
   4476 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4477 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4478 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4479 			break;
   4480 		case WM_T_82573:
   4481 		case WM_T_82574:
   4482 		case WM_T_82583:
   4483 			if ((sc->sc_type == WM_T_82574)
   4484 			    || (sc->sc_type == WM_T_82583))
   4485 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4486 
   4487 			/* Extended Device Control */
   4488 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4489 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4490 			reg |= __BIT(22);	/* Set bit 22 */
   4491 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4492 
   4493 			/* Device Control */
   4494 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4495 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4496 
   4497 			/* PCIe Control Register */
   4498 			/*
   4499 			 * 82573 Errata (unknown).
   4500 			 *
   4501 			 * 82574 Errata 25 and 82583 Errata 12
   4502 			 * "Dropped Rx Packets":
   4503 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4504 			 */
   4505 			reg = CSR_READ(sc, WMREG_GCR);
   4506 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4507 			CSR_WRITE(sc, WMREG_GCR, reg);
   4508 
   4509 			if ((sc->sc_type == WM_T_82574)
   4510 			    || (sc->sc_type == WM_T_82583)) {
   4511 				/*
   4512 				 * Document says this bit must be set for
   4513 				 * proper operation.
   4514 				 */
   4515 				reg = CSR_READ(sc, WMREG_GCR);
   4516 				reg |= __BIT(22);
   4517 				CSR_WRITE(sc, WMREG_GCR, reg);
   4518 
   4519 				/*
   4520 				 * Apply workaround for hardware errata
   4521 				 * documented in errata docs Fixes issue where
   4522 				 * some error prone or unreliable PCIe
   4523 				 * completions are occurring, particularly
   4524 				 * with ASPM enabled. Without fix, issue can
   4525 				 * cause Tx timeouts.
   4526 				 */
   4527 				reg = CSR_READ(sc, WMREG_GCR2);
   4528 				reg |= __BIT(0);
   4529 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4530 			}
   4531 			break;
   4532 		case WM_T_80003:
   4533 			/* TARC0 */
   4534 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4535 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4536 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4537 
   4538 			/* TARC1 bit 28 */
   4539 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4540 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4541 				tarc1 &= ~__BIT(28);
   4542 			else
   4543 				tarc1 |= __BIT(28);
   4544 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4545 			break;
   4546 		case WM_T_ICH8:
   4547 		case WM_T_ICH9:
   4548 		case WM_T_ICH10:
   4549 		case WM_T_PCH:
   4550 		case WM_T_PCH2:
   4551 		case WM_T_PCH_LPT:
   4552 		case WM_T_PCH_SPT:
   4553 		case WM_T_PCH_CNP:
   4554 			/* TARC0 */
   4555 			if (sc->sc_type == WM_T_ICH8) {
   4556 				/* Set TARC0 bits 29 and 28 */
   4557 				tarc0 |= __BITS(29, 28);
   4558 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4559 				tarc0 |= __BIT(29);
   4560 				/*
   4561 				 *  Drop bit 28. From Linux.
   4562 				 * See I218/I219 spec update
   4563 				 * "5. Buffer Overrun While the I219 is
   4564 				 * Processing DMA Transactions"
   4565 				 */
   4566 				tarc0 &= ~__BIT(28);
   4567 			}
   4568 			/* Set TARC0 bits 23,24,26,27 */
   4569 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4570 
   4571 			/* CTRL_EXT */
   4572 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4573 			reg |= __BIT(22);	/* Set bit 22 */
   4574 			/*
   4575 			 * Enable PHY low-power state when MAC is at D3
   4576 			 * w/o WoL
   4577 			 */
   4578 			if (sc->sc_type >= WM_T_PCH)
   4579 				reg |= CTRL_EXT_PHYPDEN;
   4580 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4581 
   4582 			/* TARC1 */
   4583 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4584 			/* bit 28 */
   4585 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4586 				tarc1 &= ~__BIT(28);
   4587 			else
   4588 				tarc1 |= __BIT(28);
   4589 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4590 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4591 
   4592 			/* Device Status */
   4593 			if (sc->sc_type == WM_T_ICH8) {
   4594 				reg = CSR_READ(sc, WMREG_STATUS);
   4595 				reg &= ~__BIT(31);
   4596 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4597 
   4598 			}
   4599 
   4600 			/* IOSFPC */
   4601 			if (sc->sc_type == WM_T_PCH_SPT) {
   4602 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4603 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4604 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4605 			}
   4606 			/*
   4607 			 * Work-around descriptor data corruption issue during
   4608 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4609 			 * capability.
   4610 			 */
   4611 			reg = CSR_READ(sc, WMREG_RFCTL);
   4612 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4613 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4614 			break;
   4615 		default:
   4616 			break;
   4617 		}
   4618 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4619 
   4620 		switch (sc->sc_type) {
   4621 		/*
   4622 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4623 		 * Avoid RSS Hash Value bug.
   4624 		 */
   4625 		case WM_T_82571:
   4626 		case WM_T_82572:
   4627 		case WM_T_82573:
   4628 		case WM_T_80003:
   4629 		case WM_T_ICH8:
   4630 			reg = CSR_READ(sc, WMREG_RFCTL);
   4631 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4632 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4633 			break;
   4634 		case WM_T_82574:
   4635 			/* Use extened Rx descriptor. */
   4636 			reg = CSR_READ(sc, WMREG_RFCTL);
   4637 			reg |= WMREG_RFCTL_EXSTEN;
   4638 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4639 			break;
   4640 		default:
   4641 			break;
   4642 		}
   4643 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4644 		/*
   4645 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4646 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4647 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4648 		 * Correctly by the Device"
   4649 		 *
   4650 		 * I354(C2000) Errata AVR53:
   4651 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4652 		 * Hang"
   4653 		 */
   4654 		reg = CSR_READ(sc, WMREG_RFCTL);
   4655 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4656 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4657 	}
   4658 }
   4659 
   4660 static uint32_t
   4661 wm_rxpbs_adjust_82580(uint32_t val)
   4662 {
   4663 	uint32_t rv = 0;
   4664 
   4665 	if (val < __arraycount(wm_82580_rxpbs_table))
   4666 		rv = wm_82580_rxpbs_table[val];
   4667 
   4668 	return rv;
   4669 }
   4670 
   4671 /*
   4672  * wm_reset_phy:
   4673  *
   4674  *	generic PHY reset function.
   4675  *	Same as e1000_phy_hw_reset_generic()
   4676  */
   4677 static int
   4678 wm_reset_phy(struct wm_softc *sc)
   4679 {
   4680 	uint32_t reg;
   4681 
   4682 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4683 		device_xname(sc->sc_dev), __func__));
   4684 	if (wm_phy_resetisblocked(sc))
   4685 		return -1;
   4686 
   4687 	sc->phy.acquire(sc);
   4688 
   4689 	reg = CSR_READ(sc, WMREG_CTRL);
   4690 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4691 	CSR_WRITE_FLUSH(sc);
   4692 
   4693 	delay(sc->phy.reset_delay_us);
   4694 
   4695 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4696 	CSR_WRITE_FLUSH(sc);
   4697 
   4698 	delay(150);
   4699 
   4700 	sc->phy.release(sc);
   4701 
   4702 	wm_get_cfg_done(sc);
   4703 	wm_phy_post_reset(sc);
   4704 
   4705 	return 0;
   4706 }
   4707 
   4708 /*
   4709  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4710  * so it is enough to check sc->sc_queue[0] only.
   4711  */
   4712 static void
   4713 wm_flush_desc_rings(struct wm_softc *sc)
   4714 {
   4715 	pcireg_t preg;
   4716 	uint32_t reg;
   4717 	struct wm_txqueue *txq;
   4718 	wiseman_txdesc_t *txd;
   4719 	int nexttx;
   4720 	uint32_t rctl;
   4721 
   4722 	/* First, disable MULR fix in FEXTNVM11 */
   4723 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4724 	reg |= FEXTNVM11_DIS_MULRFIX;
   4725 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4726 
   4727 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4728 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4729 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4730 		return;
   4731 
   4732 	/* TX */
   4733 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
   4734 	    preg, reg);
   4735 	reg = CSR_READ(sc, WMREG_TCTL);
   4736 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4737 
   4738 	txq = &sc->sc_queue[0].wmq_txq;
   4739 	nexttx = txq->txq_next;
   4740 	txd = &txq->txq_descs[nexttx];
   4741 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4742 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4743 	txd->wtx_fields.wtxu_status = 0;
   4744 	txd->wtx_fields.wtxu_options = 0;
   4745 	txd->wtx_fields.wtxu_vlan = 0;
   4746 
   4747 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4748 	    BUS_SPACE_BARRIER_WRITE);
   4749 
   4750 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4751 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4752 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4753 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4754 	delay(250);
   4755 
   4756 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4757 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4758 		return;
   4759 
   4760 	/* RX */
   4761 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4762 	rctl = CSR_READ(sc, WMREG_RCTL);
   4763 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4764 	CSR_WRITE_FLUSH(sc);
   4765 	delay(150);
   4766 
   4767 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4768 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4769 	reg &= 0xffffc000;
   4770 	/*
   4771 	 * Update thresholds: prefetch threshold to 31, host threshold
   4772 	 * to 1 and make sure the granularity is "descriptors" and not
   4773 	 * "cache lines"
   4774 	 */
   4775 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4776 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4777 
   4778 	/* Momentarily enable the RX ring for the changes to take effect */
   4779 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4780 	CSR_WRITE_FLUSH(sc);
   4781 	delay(150);
   4782 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4783 }
   4784 
   4785 /*
   4786  * wm_reset:
   4787  *
   4788  *	Reset the i82542 chip.
   4789  */
   4790 static void
   4791 wm_reset(struct wm_softc *sc)
   4792 {
   4793 	int phy_reset = 0;
   4794 	int i, error = 0;
   4795 	uint32_t reg;
   4796 	uint16_t kmreg;
   4797 	int rv;
   4798 
   4799 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4800 		device_xname(sc->sc_dev), __func__));
   4801 	KASSERT(sc->sc_type != 0);
   4802 
   4803 	/*
   4804 	 * Allocate on-chip memory according to the MTU size.
   4805 	 * The Packet Buffer Allocation register must be written
   4806 	 * before the chip is reset.
   4807 	 */
   4808 	switch (sc->sc_type) {
   4809 	case WM_T_82547:
   4810 	case WM_T_82547_2:
   4811 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4812 		    PBA_22K : PBA_30K;
   4813 		for (i = 0; i < sc->sc_nqueues; i++) {
   4814 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4815 			txq->txq_fifo_head = 0;
   4816 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4817 			txq->txq_fifo_size =
   4818 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4819 			txq->txq_fifo_stall = 0;
   4820 		}
   4821 		break;
   4822 	case WM_T_82571:
   4823 	case WM_T_82572:
   4824 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4825 	case WM_T_80003:
   4826 		sc->sc_pba = PBA_32K;
   4827 		break;
   4828 	case WM_T_82573:
   4829 		sc->sc_pba = PBA_12K;
   4830 		break;
   4831 	case WM_T_82574:
   4832 	case WM_T_82583:
   4833 		sc->sc_pba = PBA_20K;
   4834 		break;
   4835 	case WM_T_82576:
   4836 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4837 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4838 		break;
   4839 	case WM_T_82580:
   4840 	case WM_T_I350:
   4841 	case WM_T_I354:
   4842 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4843 		break;
   4844 	case WM_T_I210:
   4845 	case WM_T_I211:
   4846 		sc->sc_pba = PBA_34K;
   4847 		break;
   4848 	case WM_T_ICH8:
   4849 		/* Workaround for a bit corruption issue in FIFO memory */
   4850 		sc->sc_pba = PBA_8K;
   4851 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4852 		break;
   4853 	case WM_T_ICH9:
   4854 	case WM_T_ICH10:
   4855 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4856 		    PBA_14K : PBA_10K;
   4857 		break;
   4858 	case WM_T_PCH:
   4859 	case WM_T_PCH2:	/* XXX 14K? */
   4860 	case WM_T_PCH_LPT:
   4861 	case WM_T_PCH_SPT:
   4862 	case WM_T_PCH_CNP:
   4863 		sc->sc_pba = PBA_26K;
   4864 		break;
   4865 	default:
   4866 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4867 		    PBA_40K : PBA_48K;
   4868 		break;
   4869 	}
   4870 	/*
   4871 	 * Only old or non-multiqueue devices have the PBA register
   4872 	 * XXX Need special handling for 82575.
   4873 	 */
   4874 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4875 	    || (sc->sc_type == WM_T_82575))
   4876 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4877 
   4878 	/* Prevent the PCI-E bus from sticking */
   4879 	if (sc->sc_flags & WM_F_PCIE) {
   4880 		int timeout = 800;
   4881 
   4882 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4883 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4884 
   4885 		while (timeout--) {
   4886 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4887 			    == 0)
   4888 				break;
   4889 			delay(100);
   4890 		}
   4891 		if (timeout == 0)
   4892 			device_printf(sc->sc_dev,
   4893 			    "failed to disable busmastering\n");
   4894 	}
   4895 
   4896 	/* Set the completion timeout for interface */
   4897 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4898 	    || (sc->sc_type == WM_T_82580)
   4899 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4900 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4901 		wm_set_pcie_completion_timeout(sc);
   4902 
   4903 	/* Clear interrupt */
   4904 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4905 	if (wm_is_using_msix(sc)) {
   4906 		if (sc->sc_type != WM_T_82574) {
   4907 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4908 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4909 		} else
   4910 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4911 	}
   4912 
   4913 	/* Stop the transmit and receive processes. */
   4914 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4915 	sc->sc_rctl &= ~RCTL_EN;
   4916 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4917 	CSR_WRITE_FLUSH(sc);
   4918 
   4919 	/* XXX set_tbi_sbp_82543() */
   4920 
   4921 	delay(10*1000);
   4922 
   4923 	/* Must acquire the MDIO ownership before MAC reset */
   4924 	switch (sc->sc_type) {
   4925 	case WM_T_82573:
   4926 	case WM_T_82574:
   4927 	case WM_T_82583:
   4928 		error = wm_get_hw_semaphore_82573(sc);
   4929 		break;
   4930 	default:
   4931 		break;
   4932 	}
   4933 
   4934 	/*
   4935 	 * 82541 Errata 29? & 82547 Errata 28?
   4936 	 * See also the description about PHY_RST bit in CTRL register
   4937 	 * in 8254x_GBe_SDM.pdf.
   4938 	 */
   4939 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4940 		CSR_WRITE(sc, WMREG_CTRL,
   4941 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4942 		CSR_WRITE_FLUSH(sc);
   4943 		delay(5000);
   4944 	}
   4945 
   4946 	switch (sc->sc_type) {
   4947 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4948 	case WM_T_82541:
   4949 	case WM_T_82541_2:
   4950 	case WM_T_82547:
   4951 	case WM_T_82547_2:
   4952 		/*
   4953 		 * On some chipsets, a reset through a memory-mapped write
   4954 		 * cycle can cause the chip to reset before completing the
   4955 		 * write cycle. This causes major headache that can be avoided
   4956 		 * by issuing the reset via indirect register writes through
   4957 		 * I/O space.
   4958 		 *
   4959 		 * So, if we successfully mapped the I/O BAR at attach time,
   4960 		 * use that. Otherwise, try our luck with a memory-mapped
   4961 		 * reset.
   4962 		 */
   4963 		if (sc->sc_flags & WM_F_IOH_VALID)
   4964 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4965 		else
   4966 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4967 		break;
   4968 	case WM_T_82545_3:
   4969 	case WM_T_82546_3:
   4970 		/* Use the shadow control register on these chips. */
   4971 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4972 		break;
   4973 	case WM_T_80003:
   4974 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4975 		sc->phy.acquire(sc);
   4976 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4977 		sc->phy.release(sc);
   4978 		break;
   4979 	case WM_T_ICH8:
   4980 	case WM_T_ICH9:
   4981 	case WM_T_ICH10:
   4982 	case WM_T_PCH:
   4983 	case WM_T_PCH2:
   4984 	case WM_T_PCH_LPT:
   4985 	case WM_T_PCH_SPT:
   4986 	case WM_T_PCH_CNP:
   4987 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4988 		if (wm_phy_resetisblocked(sc) == false) {
   4989 			/*
   4990 			 * Gate automatic PHY configuration by hardware on
   4991 			 * non-managed 82579
   4992 			 */
   4993 			if ((sc->sc_type == WM_T_PCH2)
   4994 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4995 				== 0))
   4996 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4997 
   4998 			reg |= CTRL_PHY_RESET;
   4999 			phy_reset = 1;
   5000 		} else
   5001 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5002 		sc->phy.acquire(sc);
   5003 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5004 		/* Don't insert a completion barrier when reset */
   5005 		delay(20*1000);
   5006 		mutex_exit(sc->sc_ich_phymtx);
   5007 		break;
   5008 	case WM_T_82580:
   5009 	case WM_T_I350:
   5010 	case WM_T_I354:
   5011 	case WM_T_I210:
   5012 	case WM_T_I211:
   5013 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5014 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5015 			CSR_WRITE_FLUSH(sc);
   5016 		delay(5000);
   5017 		break;
   5018 	case WM_T_82542_2_0:
   5019 	case WM_T_82542_2_1:
   5020 	case WM_T_82543:
   5021 	case WM_T_82540:
   5022 	case WM_T_82545:
   5023 	case WM_T_82546:
   5024 	case WM_T_82571:
   5025 	case WM_T_82572:
   5026 	case WM_T_82573:
   5027 	case WM_T_82574:
   5028 	case WM_T_82575:
   5029 	case WM_T_82576:
   5030 	case WM_T_82583:
   5031 	default:
   5032 		/* Everything else can safely use the documented method. */
   5033 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5034 		break;
   5035 	}
   5036 
   5037 	/* Must release the MDIO ownership after MAC reset */
   5038 	switch (sc->sc_type) {
   5039 	case WM_T_82573:
   5040 	case WM_T_82574:
   5041 	case WM_T_82583:
   5042 		if (error == 0)
   5043 			wm_put_hw_semaphore_82573(sc);
   5044 		break;
   5045 	default:
   5046 		break;
   5047 	}
   5048 
   5049 	/* Set Phy Config Counter to 50msec */
   5050 	if (sc->sc_type == WM_T_PCH2) {
   5051 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5052 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5053 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5054 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5055 	}
   5056 
   5057 	if (phy_reset != 0)
   5058 		wm_get_cfg_done(sc);
   5059 
   5060 	/* Reload EEPROM */
   5061 	switch (sc->sc_type) {
   5062 	case WM_T_82542_2_0:
   5063 	case WM_T_82542_2_1:
   5064 	case WM_T_82543:
   5065 	case WM_T_82544:
   5066 		delay(10);
   5067 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5068 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5069 		CSR_WRITE_FLUSH(sc);
   5070 		delay(2000);
   5071 		break;
   5072 	case WM_T_82540:
   5073 	case WM_T_82545:
   5074 	case WM_T_82545_3:
   5075 	case WM_T_82546:
   5076 	case WM_T_82546_3:
   5077 		delay(5*1000);
   5078 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5079 		break;
   5080 	case WM_T_82541:
   5081 	case WM_T_82541_2:
   5082 	case WM_T_82547:
   5083 	case WM_T_82547_2:
   5084 		delay(20000);
   5085 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5086 		break;
   5087 	case WM_T_82571:
   5088 	case WM_T_82572:
   5089 	case WM_T_82573:
   5090 	case WM_T_82574:
   5091 	case WM_T_82583:
   5092 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5093 			delay(10);
   5094 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5095 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5096 			CSR_WRITE_FLUSH(sc);
   5097 		}
   5098 		/* check EECD_EE_AUTORD */
   5099 		wm_get_auto_rd_done(sc);
   5100 		/*
   5101 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5102 		 * is set.
   5103 		 */
   5104 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5105 		    || (sc->sc_type == WM_T_82583))
   5106 			delay(25*1000);
   5107 		break;
   5108 	case WM_T_82575:
   5109 	case WM_T_82576:
   5110 	case WM_T_82580:
   5111 	case WM_T_I350:
   5112 	case WM_T_I354:
   5113 	case WM_T_I210:
   5114 	case WM_T_I211:
   5115 	case WM_T_80003:
   5116 		/* check EECD_EE_AUTORD */
   5117 		wm_get_auto_rd_done(sc);
   5118 		break;
   5119 	case WM_T_ICH8:
   5120 	case WM_T_ICH9:
   5121 	case WM_T_ICH10:
   5122 	case WM_T_PCH:
   5123 	case WM_T_PCH2:
   5124 	case WM_T_PCH_LPT:
   5125 	case WM_T_PCH_SPT:
   5126 	case WM_T_PCH_CNP:
   5127 		break;
   5128 	default:
   5129 		panic("%s: unknown type\n", __func__);
   5130 	}
   5131 
   5132 	/* Check whether EEPROM is present or not */
   5133 	switch (sc->sc_type) {
   5134 	case WM_T_82575:
   5135 	case WM_T_82576:
   5136 	case WM_T_82580:
   5137 	case WM_T_I350:
   5138 	case WM_T_I354:
   5139 	case WM_T_ICH8:
   5140 	case WM_T_ICH9:
   5141 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5142 			/* Not found */
   5143 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5144 			if (sc->sc_type == WM_T_82575)
   5145 				wm_reset_init_script_82575(sc);
   5146 		}
   5147 		break;
   5148 	default:
   5149 		break;
   5150 	}
   5151 
   5152 	if (phy_reset != 0)
   5153 		wm_phy_post_reset(sc);
   5154 
   5155 	if ((sc->sc_type == WM_T_82580)
   5156 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5157 		/* Clear global device reset status bit */
   5158 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5159 	}
   5160 
   5161 	/* Clear any pending interrupt events. */
   5162 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5163 	reg = CSR_READ(sc, WMREG_ICR);
   5164 	if (wm_is_using_msix(sc)) {
   5165 		if (sc->sc_type != WM_T_82574) {
   5166 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5167 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5168 		} else
   5169 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5170 	}
   5171 
   5172 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5173 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5174 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5175 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5176 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5177 		reg |= KABGTXD_BGSQLBIAS;
   5178 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5179 	}
   5180 
   5181 	/* Reload sc_ctrl */
   5182 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5183 
   5184 	wm_set_eee(sc);
   5185 
   5186 	/*
   5187 	 * For PCH, this write will make sure that any noise will be detected
   5188 	 * as a CRC error and be dropped rather than show up as a bad packet
   5189 	 * to the DMA engine
   5190 	 */
   5191 	if (sc->sc_type == WM_T_PCH)
   5192 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5193 
   5194 	if (sc->sc_type >= WM_T_82544)
   5195 		CSR_WRITE(sc, WMREG_WUC, 0);
   5196 
   5197 	if (sc->sc_type < WM_T_82575)
   5198 		wm_disable_aspm(sc); /* Workaround for some chips */
   5199 
   5200 	wm_reset_mdicnfg_82580(sc);
   5201 
   5202 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5203 		wm_pll_workaround_i210(sc);
   5204 
   5205 	if (sc->sc_type == WM_T_80003) {
   5206 		/* Default to TRUE to enable the MDIC W/A */
   5207 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5208 
   5209 		rv = wm_kmrn_readreg(sc,
   5210 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5211 		if (rv == 0) {
   5212 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5213 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5214 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5215 			else
   5216 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5217 		}
   5218 	}
   5219 }
   5220 
   5221 /*
   5222  * wm_add_rxbuf:
   5223  *
   5224  *	Add a receive buffer to the indiciated descriptor.
   5225  */
   5226 static int
   5227 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5228 {
   5229 	struct wm_softc *sc = rxq->rxq_sc;
   5230 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5231 	struct mbuf *m;
   5232 	int error;
   5233 
   5234 	KASSERT(mutex_owned(rxq->rxq_lock));
   5235 
   5236 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5237 	if (m == NULL)
   5238 		return ENOBUFS;
   5239 
   5240 	MCLGET(m, M_DONTWAIT);
   5241 	if ((m->m_flags & M_EXT) == 0) {
   5242 		m_freem(m);
   5243 		return ENOBUFS;
   5244 	}
   5245 
   5246 	if (rxs->rxs_mbuf != NULL)
   5247 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5248 
   5249 	rxs->rxs_mbuf = m;
   5250 
   5251 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5252 	/*
   5253 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5254 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5255 	 */
   5256 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5257 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5258 	if (error) {
   5259 		/* XXX XXX XXX */
   5260 		aprint_error_dev(sc->sc_dev,
   5261 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5262 		panic("wm_add_rxbuf");
   5263 	}
   5264 
   5265 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5266 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5267 
   5268 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5269 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5270 			wm_init_rxdesc(rxq, idx);
   5271 	} else
   5272 		wm_init_rxdesc(rxq, idx);
   5273 
   5274 	return 0;
   5275 }
   5276 
   5277 /*
   5278  * wm_rxdrain:
   5279  *
   5280  *	Drain the receive queue.
   5281  */
   5282 static void
   5283 wm_rxdrain(struct wm_rxqueue *rxq)
   5284 {
   5285 	struct wm_softc *sc = rxq->rxq_sc;
   5286 	struct wm_rxsoft *rxs;
   5287 	int i;
   5288 
   5289 	KASSERT(mutex_owned(rxq->rxq_lock));
   5290 
   5291 	for (i = 0; i < WM_NRXDESC; i++) {
   5292 		rxs = &rxq->rxq_soft[i];
   5293 		if (rxs->rxs_mbuf != NULL) {
   5294 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5295 			m_freem(rxs->rxs_mbuf);
   5296 			rxs->rxs_mbuf = NULL;
   5297 		}
   5298 	}
   5299 }
   5300 
   5301 /*
   5302  * Setup registers for RSS.
   5303  *
   5304  * XXX not yet VMDq support
   5305  */
   5306 static void
   5307 wm_init_rss(struct wm_softc *sc)
   5308 {
   5309 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5310 	int i;
   5311 
   5312 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5313 
   5314 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5315 		unsigned int qid, reta_ent;
   5316 
   5317 		qid  = i % sc->sc_nqueues;
   5318 		switch (sc->sc_type) {
   5319 		case WM_T_82574:
   5320 			reta_ent = __SHIFTIN(qid,
   5321 			    RETA_ENT_QINDEX_MASK_82574);
   5322 			break;
   5323 		case WM_T_82575:
   5324 			reta_ent = __SHIFTIN(qid,
   5325 			    RETA_ENT_QINDEX1_MASK_82575);
   5326 			break;
   5327 		default:
   5328 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5329 			break;
   5330 		}
   5331 
   5332 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5333 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5334 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5335 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5336 	}
   5337 
   5338 	rss_getkey((uint8_t *)rss_key);
   5339 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5340 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5341 
   5342 	if (sc->sc_type == WM_T_82574)
   5343 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5344 	else
   5345 		mrqc = MRQC_ENABLE_RSS_MQ;
   5346 
   5347 	/*
   5348 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5349 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5350 	 */
   5351 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5352 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5353 #if 0
   5354 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5355 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5356 #endif
   5357 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5358 
   5359 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5360 }
   5361 
   5362 /*
   5363  * Adjust TX and RX queue numbers which the system actulally uses.
   5364  *
   5365  * The numbers are affected by below parameters.
   5366  *     - The nubmer of hardware queues
   5367  *     - The number of MSI-X vectors (= "nvectors" argument)
   5368  *     - ncpu
   5369  */
   5370 static void
   5371 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5372 {
   5373 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5374 
   5375 	if (nvectors < 2) {
   5376 		sc->sc_nqueues = 1;
   5377 		return;
   5378 	}
   5379 
   5380 	switch (sc->sc_type) {
   5381 	case WM_T_82572:
   5382 		hw_ntxqueues = 2;
   5383 		hw_nrxqueues = 2;
   5384 		break;
   5385 	case WM_T_82574:
   5386 		hw_ntxqueues = 2;
   5387 		hw_nrxqueues = 2;
   5388 		break;
   5389 	case WM_T_82575:
   5390 		hw_ntxqueues = 4;
   5391 		hw_nrxqueues = 4;
   5392 		break;
   5393 	case WM_T_82576:
   5394 		hw_ntxqueues = 16;
   5395 		hw_nrxqueues = 16;
   5396 		break;
   5397 	case WM_T_82580:
   5398 	case WM_T_I350:
   5399 	case WM_T_I354:
   5400 		hw_ntxqueues = 8;
   5401 		hw_nrxqueues = 8;
   5402 		break;
   5403 	case WM_T_I210:
   5404 		hw_ntxqueues = 4;
   5405 		hw_nrxqueues = 4;
   5406 		break;
   5407 	case WM_T_I211:
   5408 		hw_ntxqueues = 2;
   5409 		hw_nrxqueues = 2;
   5410 		break;
   5411 		/*
   5412 		 * As below ethernet controllers does not support MSI-X,
   5413 		 * this driver let them not use multiqueue.
   5414 		 *     - WM_T_80003
   5415 		 *     - WM_T_ICH8
   5416 		 *     - WM_T_ICH9
   5417 		 *     - WM_T_ICH10
   5418 		 *     - WM_T_PCH
   5419 		 *     - WM_T_PCH2
   5420 		 *     - WM_T_PCH_LPT
   5421 		 */
   5422 	default:
   5423 		hw_ntxqueues = 1;
   5424 		hw_nrxqueues = 1;
   5425 		break;
   5426 	}
   5427 
   5428 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5429 
   5430 	/*
   5431 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5432 	 * the number of queues used actually.
   5433 	 */
   5434 	if (nvectors < hw_nqueues + 1)
   5435 		sc->sc_nqueues = nvectors - 1;
   5436 	else
   5437 		sc->sc_nqueues = hw_nqueues;
   5438 
   5439 	/*
   5440 	 * As queues more then cpus cannot improve scaling, we limit
   5441 	 * the number of queues used actually.
   5442 	 */
   5443 	if (ncpu < sc->sc_nqueues)
   5444 		sc->sc_nqueues = ncpu;
   5445 }
   5446 
   5447 static inline bool
   5448 wm_is_using_msix(struct wm_softc *sc)
   5449 {
   5450 
   5451 	return (sc->sc_nintrs > 1);
   5452 }
   5453 
   5454 static inline bool
   5455 wm_is_using_multiqueue(struct wm_softc *sc)
   5456 {
   5457 
   5458 	return (sc->sc_nqueues > 1);
   5459 }
   5460 
   5461 static int
   5462 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   5463 {
   5464 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5465 
   5466 	wmq->wmq_id = qidx;
   5467 	wmq->wmq_intr_idx = intr_idx;
   5468 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
   5469 	    wm_handle_queue, wmq);
   5470 	if (wmq->wmq_si != NULL)
   5471 		return 0;
   5472 
   5473 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5474 	    wmq->wmq_id);
   5475 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5476 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5477 	return ENOMEM;
   5478 }
   5479 
   5480 /*
   5481  * Both single interrupt MSI and INTx can use this function.
   5482  */
   5483 static int
   5484 wm_setup_legacy(struct wm_softc *sc)
   5485 {
   5486 	pci_chipset_tag_t pc = sc->sc_pc;
   5487 	const char *intrstr = NULL;
   5488 	char intrbuf[PCI_INTRSTR_LEN];
   5489 	int error;
   5490 
   5491 	error = wm_alloc_txrx_queues(sc);
   5492 	if (error) {
   5493 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5494 		    error);
   5495 		return ENOMEM;
   5496 	}
   5497 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5498 	    sizeof(intrbuf));
   5499 #ifdef WM_MPSAFE
   5500 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5501 #endif
   5502 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5503 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5504 	if (sc->sc_ihs[0] == NULL) {
   5505 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5506 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5507 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5508 		return ENOMEM;
   5509 	}
   5510 
   5511 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5512 	sc->sc_nintrs = 1;
   5513 
   5514 	return wm_softint_establish_queue(sc, 0, 0);
   5515 }
   5516 
   5517 static int
   5518 wm_setup_msix(struct wm_softc *sc)
   5519 {
   5520 	void *vih;
   5521 	kcpuset_t *affinity;
   5522 	int qidx, error, intr_idx, txrx_established;
   5523 	pci_chipset_tag_t pc = sc->sc_pc;
   5524 	const char *intrstr = NULL;
   5525 	char intrbuf[PCI_INTRSTR_LEN];
   5526 	char intr_xname[INTRDEVNAMEBUF];
   5527 
   5528 	if (sc->sc_nqueues < ncpu) {
   5529 		/*
   5530 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5531 		 * interrupts start from CPU#1.
   5532 		 */
   5533 		sc->sc_affinity_offset = 1;
   5534 	} else {
   5535 		/*
   5536 		 * In this case, this device use all CPUs. So, we unify
   5537 		 * affinitied cpu_index to msix vector number for readability.
   5538 		 */
   5539 		sc->sc_affinity_offset = 0;
   5540 	}
   5541 
   5542 	error = wm_alloc_txrx_queues(sc);
   5543 	if (error) {
   5544 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5545 		    error);
   5546 		return ENOMEM;
   5547 	}
   5548 
   5549 	kcpuset_create(&affinity, false);
   5550 	intr_idx = 0;
   5551 
   5552 	/*
   5553 	 * TX and RX
   5554 	 */
   5555 	txrx_established = 0;
   5556 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5557 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5558 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5559 
   5560 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5561 		    sizeof(intrbuf));
   5562 #ifdef WM_MPSAFE
   5563 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5564 		    PCI_INTR_MPSAFE, true);
   5565 #endif
   5566 		memset(intr_xname, 0, sizeof(intr_xname));
   5567 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5568 		    device_xname(sc->sc_dev), qidx);
   5569 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5570 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5571 		if (vih == NULL) {
   5572 			aprint_error_dev(sc->sc_dev,
   5573 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5574 			    intrstr ? " at " : "",
   5575 			    intrstr ? intrstr : "");
   5576 
   5577 			goto fail;
   5578 		}
   5579 		kcpuset_zero(affinity);
   5580 		/* Round-robin affinity */
   5581 		kcpuset_set(affinity, affinity_to);
   5582 		error = interrupt_distribute(vih, affinity, NULL);
   5583 		if (error == 0) {
   5584 			aprint_normal_dev(sc->sc_dev,
   5585 			    "for TX and RX interrupting at %s affinity to %u\n",
   5586 			    intrstr, affinity_to);
   5587 		} else {
   5588 			aprint_normal_dev(sc->sc_dev,
   5589 			    "for TX and RX interrupting at %s\n", intrstr);
   5590 		}
   5591 		sc->sc_ihs[intr_idx] = vih;
   5592 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   5593 			goto fail;
   5594 		txrx_established++;
   5595 		intr_idx++;
   5596 	}
   5597 
   5598 	/* LINK */
   5599 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5600 	    sizeof(intrbuf));
   5601 #ifdef WM_MPSAFE
   5602 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5603 #endif
   5604 	memset(intr_xname, 0, sizeof(intr_xname));
   5605 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5606 	    device_xname(sc->sc_dev));
   5607 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5608 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5609 	if (vih == NULL) {
   5610 		aprint_error_dev(sc->sc_dev,
   5611 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5612 		    intrstr ? " at " : "",
   5613 		    intrstr ? intrstr : "");
   5614 
   5615 		goto fail;
   5616 	}
   5617 	/* Keep default affinity to LINK interrupt */
   5618 	aprint_normal_dev(sc->sc_dev,
   5619 	    "for LINK interrupting at %s\n", intrstr);
   5620 	sc->sc_ihs[intr_idx] = vih;
   5621 	sc->sc_link_intr_idx = intr_idx;
   5622 
   5623 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5624 	kcpuset_destroy(affinity);
   5625 	return 0;
   5626 
   5627  fail:
   5628 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5629 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5630 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5631 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5632 	}
   5633 
   5634 	kcpuset_destroy(affinity);
   5635 	return ENOMEM;
   5636 }
   5637 
   5638 static void
   5639 wm_unset_stopping_flags(struct wm_softc *sc)
   5640 {
   5641 	int i;
   5642 
   5643 	KASSERT(WM_CORE_LOCKED(sc));
   5644 
   5645 	/* Must unset stopping flags in ascending order. */
   5646 	for (i = 0; i < sc->sc_nqueues; i++) {
   5647 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5648 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5649 
   5650 		mutex_enter(txq->txq_lock);
   5651 		txq->txq_stopping = false;
   5652 		mutex_exit(txq->txq_lock);
   5653 
   5654 		mutex_enter(rxq->rxq_lock);
   5655 		rxq->rxq_stopping = false;
   5656 		mutex_exit(rxq->rxq_lock);
   5657 	}
   5658 
   5659 	sc->sc_core_stopping = false;
   5660 }
   5661 
   5662 static void
   5663 wm_set_stopping_flags(struct wm_softc *sc)
   5664 {
   5665 	int i;
   5666 
   5667 	KASSERT(WM_CORE_LOCKED(sc));
   5668 
   5669 	sc->sc_core_stopping = true;
   5670 
   5671 	/* Must set stopping flags in ascending order. */
   5672 	for (i = 0; i < sc->sc_nqueues; i++) {
   5673 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5674 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5675 
   5676 		mutex_enter(rxq->rxq_lock);
   5677 		rxq->rxq_stopping = true;
   5678 		mutex_exit(rxq->rxq_lock);
   5679 
   5680 		mutex_enter(txq->txq_lock);
   5681 		txq->txq_stopping = true;
   5682 		mutex_exit(txq->txq_lock);
   5683 	}
   5684 }
   5685 
   5686 /*
   5687  * Write interrupt interval value to ITR or EITR
   5688  */
   5689 static void
   5690 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5691 {
   5692 
   5693 	if (!wmq->wmq_set_itr)
   5694 		return;
   5695 
   5696 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5697 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5698 
   5699 		/*
   5700 		 * 82575 doesn't have CNT_INGR field.
   5701 		 * So, overwrite counter field by software.
   5702 		 */
   5703 		if (sc->sc_type == WM_T_82575)
   5704 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5705 		else
   5706 			eitr |= EITR_CNT_INGR;
   5707 
   5708 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5709 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5710 		/*
   5711 		 * 82574 has both ITR and EITR. SET EITR when we use
   5712 		 * the multi queue function with MSI-X.
   5713 		 */
   5714 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5715 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5716 	} else {
   5717 		KASSERT(wmq->wmq_id == 0);
   5718 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5719 	}
   5720 
   5721 	wmq->wmq_set_itr = false;
   5722 }
   5723 
   5724 /*
   5725  * TODO
   5726  * Below dynamic calculation of itr is almost the same as linux igb,
   5727  * however it does not fit to wm(4). So, we will have been disable AIM
   5728  * until we will find appropriate calculation of itr.
   5729  */
   5730 /*
   5731  * calculate interrupt interval value to be going to write register in
   5732  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5733  */
   5734 static void
   5735 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5736 {
   5737 #ifdef NOTYET
   5738 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5739 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5740 	uint32_t avg_size = 0;
   5741 	uint32_t new_itr;
   5742 
   5743 	if (rxq->rxq_packets)
   5744 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5745 	if (txq->txq_packets)
   5746 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5747 
   5748 	if (avg_size == 0) {
   5749 		new_itr = 450; /* restore default value */
   5750 		goto out;
   5751 	}
   5752 
   5753 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5754 	avg_size += 24;
   5755 
   5756 	/* Don't starve jumbo frames */
   5757 	avg_size = uimin(avg_size, 3000);
   5758 
   5759 	/* Give a little boost to mid-size frames */
   5760 	if ((avg_size > 300) && (avg_size < 1200))
   5761 		new_itr = avg_size / 3;
   5762 	else
   5763 		new_itr = avg_size / 2;
   5764 
   5765 out:
   5766 	/*
   5767 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5768 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5769 	 */
   5770 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5771 		new_itr *= 4;
   5772 
   5773 	if (new_itr != wmq->wmq_itr) {
   5774 		wmq->wmq_itr = new_itr;
   5775 		wmq->wmq_set_itr = true;
   5776 	} else
   5777 		wmq->wmq_set_itr = false;
   5778 
   5779 	rxq->rxq_packets = 0;
   5780 	rxq->rxq_bytes = 0;
   5781 	txq->txq_packets = 0;
   5782 	txq->txq_bytes = 0;
   5783 #endif
   5784 }
   5785 
   5786 static void
   5787 wm_init_sysctls(struct wm_softc *sc)
   5788 {
   5789 	struct sysctllog **log;
   5790 	const struct sysctlnode *rnode, *cnode;
   5791 	int rv;
   5792 	const char *dvname;
   5793 
   5794 	log = &sc->sc_sysctllog;
   5795 	dvname = device_xname(sc->sc_dev);
   5796 
   5797 	rv = sysctl_createv(log, 0, NULL, &rnode,
   5798 	    0, CTLTYPE_NODE, dvname,
   5799 	    SYSCTL_DESCR("wm information and settings"),
   5800 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   5801 	if (rv != 0)
   5802 		goto err;
   5803 
   5804 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5805 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   5806 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   5807 	if (rv != 0)
   5808 		goto teardown;
   5809 
   5810 	return;
   5811 
   5812 teardown:
   5813 	sysctl_teardown(log);
   5814 err:
   5815 	sc->sc_sysctllog = NULL;
   5816 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   5817 	    __func__, rv);
   5818 }
   5819 
   5820 /*
   5821  * wm_init:		[ifnet interface function]
   5822  *
   5823  *	Initialize the interface.
   5824  */
   5825 static int
   5826 wm_init(struct ifnet *ifp)
   5827 {
   5828 	struct wm_softc *sc = ifp->if_softc;
   5829 	int ret;
   5830 
   5831 	WM_CORE_LOCK(sc);
   5832 	ret = wm_init_locked(ifp);
   5833 	WM_CORE_UNLOCK(sc);
   5834 
   5835 	return ret;
   5836 }
   5837 
   5838 static int
   5839 wm_init_locked(struct ifnet *ifp)
   5840 {
   5841 	struct wm_softc *sc = ifp->if_softc;
   5842 	struct ethercom *ec = &sc->sc_ethercom;
   5843 	int i, j, trynum, error = 0;
   5844 	uint32_t reg, sfp_mask = 0;
   5845 
   5846 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5847 		device_xname(sc->sc_dev), __func__));
   5848 	KASSERT(WM_CORE_LOCKED(sc));
   5849 
   5850 	/*
   5851 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5852 	 * There is a small but measurable benefit to avoiding the adjusment
   5853 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5854 	 * on such platforms.  One possibility is that the DMA itself is
   5855 	 * slightly more efficient if the front of the entire packet (instead
   5856 	 * of the front of the headers) is aligned.
   5857 	 *
   5858 	 * Note we must always set align_tweak to 0 if we are using
   5859 	 * jumbo frames.
   5860 	 */
   5861 #ifdef __NO_STRICT_ALIGNMENT
   5862 	sc->sc_align_tweak = 0;
   5863 #else
   5864 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5865 		sc->sc_align_tweak = 0;
   5866 	else
   5867 		sc->sc_align_tweak = 2;
   5868 #endif /* __NO_STRICT_ALIGNMENT */
   5869 
   5870 	/* Cancel any pending I/O. */
   5871 	wm_stop_locked(ifp, false, false);
   5872 
   5873 	/* Update statistics before reset */
   5874 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   5875 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   5876 
   5877 	/* PCH_SPT hardware workaround */
   5878 	if (sc->sc_type == WM_T_PCH_SPT)
   5879 		wm_flush_desc_rings(sc);
   5880 
   5881 	/* Reset the chip to a known state. */
   5882 	wm_reset(sc);
   5883 
   5884 	/*
   5885 	 * AMT based hardware can now take control from firmware
   5886 	 * Do this after reset.
   5887 	 */
   5888 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5889 		wm_get_hw_control(sc);
   5890 
   5891 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5892 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5893 		wm_legacy_irq_quirk_spt(sc);
   5894 
   5895 	/* Init hardware bits */
   5896 	wm_initialize_hardware_bits(sc);
   5897 
   5898 	/* Reset the PHY. */
   5899 	if (sc->sc_flags & WM_F_HAS_MII)
   5900 		wm_gmii_reset(sc);
   5901 
   5902 	if (sc->sc_type >= WM_T_ICH8) {
   5903 		reg = CSR_READ(sc, WMREG_GCR);
   5904 		/*
   5905 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5906 		 * default after reset.
   5907 		 */
   5908 		if (sc->sc_type == WM_T_ICH8)
   5909 			reg |= GCR_NO_SNOOP_ALL;
   5910 		else
   5911 			reg &= ~GCR_NO_SNOOP_ALL;
   5912 		CSR_WRITE(sc, WMREG_GCR, reg);
   5913 	}
   5914 
   5915 	if ((sc->sc_type >= WM_T_ICH8)
   5916 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5917 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5918 
   5919 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5920 		reg |= CTRL_EXT_RO_DIS;
   5921 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5922 	}
   5923 
   5924 	/* Calculate (E)ITR value */
   5925 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5926 		/*
   5927 		 * For NEWQUEUE's EITR (except for 82575).
   5928 		 * 82575's EITR should be set same throttling value as other
   5929 		 * old controllers' ITR because the interrupt/sec calculation
   5930 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5931 		 *
   5932 		 * 82574's EITR should be set same throttling value as ITR.
   5933 		 *
   5934 		 * For N interrupts/sec, set this value to:
   5935 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5936 		 */
   5937 		sc->sc_itr_init = 450;
   5938 	} else if (sc->sc_type >= WM_T_82543) {
   5939 		/*
   5940 		 * Set up the interrupt throttling register (units of 256ns)
   5941 		 * Note that a footnote in Intel's documentation says this
   5942 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5943 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5944 		 * that that is also true for the 1024ns units of the other
   5945 		 * interrupt-related timer registers -- so, really, we ought
   5946 		 * to divide this value by 4 when the link speed is low.
   5947 		 *
   5948 		 * XXX implement this division at link speed change!
   5949 		 */
   5950 
   5951 		/*
   5952 		 * For N interrupts/sec, set this value to:
   5953 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5954 		 * absolute and packet timer values to this value
   5955 		 * divided by 4 to get "simple timer" behavior.
   5956 		 */
   5957 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5958 	}
   5959 
   5960 	error = wm_init_txrx_queues(sc);
   5961 	if (error)
   5962 		goto out;
   5963 
   5964 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   5965 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   5966 	    (sc->sc_type >= WM_T_82575))
   5967 		wm_serdes_power_up_link_82575(sc);
   5968 
   5969 	/* Clear out the VLAN table -- we don't use it (yet). */
   5970 	CSR_WRITE(sc, WMREG_VET, 0);
   5971 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5972 		trynum = 10; /* Due to hw errata */
   5973 	else
   5974 		trynum = 1;
   5975 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5976 		for (j = 0; j < trynum; j++)
   5977 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5978 
   5979 	/*
   5980 	 * Set up flow-control parameters.
   5981 	 *
   5982 	 * XXX Values could probably stand some tuning.
   5983 	 */
   5984 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5985 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5986 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5987 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5988 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5989 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5990 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5991 	}
   5992 
   5993 	sc->sc_fcrtl = FCRTL_DFLT;
   5994 	if (sc->sc_type < WM_T_82543) {
   5995 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5996 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5997 	} else {
   5998 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5999 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6000 	}
   6001 
   6002 	if (sc->sc_type == WM_T_80003)
   6003 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6004 	else
   6005 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6006 
   6007 	/* Writes the control register. */
   6008 	wm_set_vlan(sc);
   6009 
   6010 	if (sc->sc_flags & WM_F_HAS_MII) {
   6011 		uint16_t kmreg;
   6012 
   6013 		switch (sc->sc_type) {
   6014 		case WM_T_80003:
   6015 		case WM_T_ICH8:
   6016 		case WM_T_ICH9:
   6017 		case WM_T_ICH10:
   6018 		case WM_T_PCH:
   6019 		case WM_T_PCH2:
   6020 		case WM_T_PCH_LPT:
   6021 		case WM_T_PCH_SPT:
   6022 		case WM_T_PCH_CNP:
   6023 			/*
   6024 			 * Set the mac to wait the maximum time between each
   6025 			 * iteration and increase the max iterations when
   6026 			 * polling the phy; this fixes erroneous timeouts at
   6027 			 * 10Mbps.
   6028 			 */
   6029 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6030 			    0xFFFF);
   6031 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6032 			    &kmreg);
   6033 			kmreg |= 0x3F;
   6034 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6035 			    kmreg);
   6036 			break;
   6037 		default:
   6038 			break;
   6039 		}
   6040 
   6041 		if (sc->sc_type == WM_T_80003) {
   6042 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6043 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6044 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6045 
   6046 			/* Bypass RX and TX FIFO's */
   6047 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6048 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6049 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6050 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6051 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6052 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6053 		}
   6054 	}
   6055 #if 0
   6056 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6057 #endif
   6058 
   6059 	/* Set up checksum offload parameters. */
   6060 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6061 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6062 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6063 		reg |= RXCSUM_IPOFL;
   6064 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6065 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6066 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6067 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6068 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6069 
   6070 	/* Set registers about MSI-X */
   6071 	if (wm_is_using_msix(sc)) {
   6072 		uint32_t ivar, qintr_idx;
   6073 		struct wm_queue *wmq;
   6074 		unsigned int qid;
   6075 
   6076 		if (sc->sc_type == WM_T_82575) {
   6077 			/* Interrupt control */
   6078 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6079 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6080 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6081 
   6082 			/* TX and RX */
   6083 			for (i = 0; i < sc->sc_nqueues; i++) {
   6084 				wmq = &sc->sc_queue[i];
   6085 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6086 				    EITR_TX_QUEUE(wmq->wmq_id)
   6087 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6088 			}
   6089 			/* Link status */
   6090 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6091 			    EITR_OTHER);
   6092 		} else if (sc->sc_type == WM_T_82574) {
   6093 			/* Interrupt control */
   6094 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6095 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6096 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6097 
   6098 			/*
   6099 			 * Workaround issue with spurious interrupts
   6100 			 * in MSI-X mode.
   6101 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6102 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6103 			 */
   6104 			reg = CSR_READ(sc, WMREG_RFCTL);
   6105 			reg |= WMREG_RFCTL_ACKDIS;
   6106 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6107 
   6108 			ivar = 0;
   6109 			/* TX and RX */
   6110 			for (i = 0; i < sc->sc_nqueues; i++) {
   6111 				wmq = &sc->sc_queue[i];
   6112 				qid = wmq->wmq_id;
   6113 				qintr_idx = wmq->wmq_intr_idx;
   6114 
   6115 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6116 				    IVAR_TX_MASK_Q_82574(qid));
   6117 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6118 				    IVAR_RX_MASK_Q_82574(qid));
   6119 			}
   6120 			/* Link status */
   6121 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6122 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6123 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6124 		} else {
   6125 			/* Interrupt control */
   6126 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6127 			    | GPIE_EIAME | GPIE_PBA);
   6128 
   6129 			switch (sc->sc_type) {
   6130 			case WM_T_82580:
   6131 			case WM_T_I350:
   6132 			case WM_T_I354:
   6133 			case WM_T_I210:
   6134 			case WM_T_I211:
   6135 				/* TX and RX */
   6136 				for (i = 0; i < sc->sc_nqueues; i++) {
   6137 					wmq = &sc->sc_queue[i];
   6138 					qid = wmq->wmq_id;
   6139 					qintr_idx = wmq->wmq_intr_idx;
   6140 
   6141 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6142 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6143 					ivar |= __SHIFTIN((qintr_idx
   6144 						| IVAR_VALID),
   6145 					    IVAR_TX_MASK_Q(qid));
   6146 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6147 					ivar |= __SHIFTIN((qintr_idx
   6148 						| IVAR_VALID),
   6149 					    IVAR_RX_MASK_Q(qid));
   6150 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6151 				}
   6152 				break;
   6153 			case WM_T_82576:
   6154 				/* TX and RX */
   6155 				for (i = 0; i < sc->sc_nqueues; i++) {
   6156 					wmq = &sc->sc_queue[i];
   6157 					qid = wmq->wmq_id;
   6158 					qintr_idx = wmq->wmq_intr_idx;
   6159 
   6160 					ivar = CSR_READ(sc,
   6161 					    WMREG_IVAR_Q_82576(qid));
   6162 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6163 					ivar |= __SHIFTIN((qintr_idx
   6164 						| IVAR_VALID),
   6165 					    IVAR_TX_MASK_Q_82576(qid));
   6166 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6167 					ivar |= __SHIFTIN((qintr_idx
   6168 						| IVAR_VALID),
   6169 					    IVAR_RX_MASK_Q_82576(qid));
   6170 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6171 					    ivar);
   6172 				}
   6173 				break;
   6174 			default:
   6175 				break;
   6176 			}
   6177 
   6178 			/* Link status */
   6179 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6180 			    IVAR_MISC_OTHER);
   6181 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6182 		}
   6183 
   6184 		if (wm_is_using_multiqueue(sc)) {
   6185 			wm_init_rss(sc);
   6186 
   6187 			/*
   6188 			** NOTE: Receive Full-Packet Checksum Offload
   6189 			** is mutually exclusive with Multiqueue. However
   6190 			** this is not the same as TCP/IP checksums which
   6191 			** still work.
   6192 			*/
   6193 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6194 			reg |= RXCSUM_PCSD;
   6195 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6196 		}
   6197 	}
   6198 
   6199 	/* Set up the interrupt registers. */
   6200 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6201 
   6202 	/* Enable SFP module insertion interrupt if it's required */
   6203 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6204 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6205 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6206 		sfp_mask = ICR_GPI(0);
   6207 	}
   6208 
   6209 	if (wm_is_using_msix(sc)) {
   6210 		uint32_t mask;
   6211 		struct wm_queue *wmq;
   6212 
   6213 		switch (sc->sc_type) {
   6214 		case WM_T_82574:
   6215 			mask = 0;
   6216 			for (i = 0; i < sc->sc_nqueues; i++) {
   6217 				wmq = &sc->sc_queue[i];
   6218 				mask |= ICR_TXQ(wmq->wmq_id);
   6219 				mask |= ICR_RXQ(wmq->wmq_id);
   6220 			}
   6221 			mask |= ICR_OTHER;
   6222 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6223 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6224 			break;
   6225 		default:
   6226 			if (sc->sc_type == WM_T_82575) {
   6227 				mask = 0;
   6228 				for (i = 0; i < sc->sc_nqueues; i++) {
   6229 					wmq = &sc->sc_queue[i];
   6230 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6231 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6232 				}
   6233 				mask |= EITR_OTHER;
   6234 			} else {
   6235 				mask = 0;
   6236 				for (i = 0; i < sc->sc_nqueues; i++) {
   6237 					wmq = &sc->sc_queue[i];
   6238 					mask |= 1 << wmq->wmq_intr_idx;
   6239 				}
   6240 				mask |= 1 << sc->sc_link_intr_idx;
   6241 			}
   6242 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6243 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6244 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6245 
   6246 			/* For other interrupts */
   6247 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6248 			break;
   6249 		}
   6250 	} else {
   6251 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6252 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6253 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6254 	}
   6255 
   6256 	/* Set up the inter-packet gap. */
   6257 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6258 
   6259 	if (sc->sc_type >= WM_T_82543) {
   6260 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6261 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6262 			wm_itrs_writereg(sc, wmq);
   6263 		}
   6264 		/*
   6265 		 * Link interrupts occur much less than TX
   6266 		 * interrupts and RX interrupts. So, we don't
   6267 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6268 		 * FreeBSD's if_igb.
   6269 		 */
   6270 	}
   6271 
   6272 	/* Set the VLAN ethernetype. */
   6273 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6274 
   6275 	/*
   6276 	 * Set up the transmit control register; we start out with
   6277 	 * a collision distance suitable for FDX, but update it whe
   6278 	 * we resolve the media type.
   6279 	 */
   6280 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6281 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6282 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6283 	if (sc->sc_type >= WM_T_82571)
   6284 		sc->sc_tctl |= TCTL_MULR;
   6285 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6286 
   6287 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6288 		/* Write TDT after TCTL.EN is set. See the document. */
   6289 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6290 	}
   6291 
   6292 	if (sc->sc_type == WM_T_80003) {
   6293 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6294 		reg &= ~TCTL_EXT_GCEX_MASK;
   6295 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6296 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6297 	}
   6298 
   6299 	/* Set the media. */
   6300 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6301 		goto out;
   6302 
   6303 	/* Configure for OS presence */
   6304 	wm_init_manageability(sc);
   6305 
   6306 	/*
   6307 	 * Set up the receive control register; we actually program the
   6308 	 * register when we set the receive filter. Use multicast address
   6309 	 * offset type 0.
   6310 	 *
   6311 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6312 	 * don't enable that feature.
   6313 	 */
   6314 	sc->sc_mchash_type = 0;
   6315 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6316 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6317 
   6318 	/* 82574 use one buffer extended Rx descriptor. */
   6319 	if (sc->sc_type == WM_T_82574)
   6320 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6321 
   6322 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   6323 		sc->sc_rctl |= RCTL_SECRC;
   6324 
   6325 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6326 	    && (ifp->if_mtu > ETHERMTU)) {
   6327 		sc->sc_rctl |= RCTL_LPE;
   6328 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6329 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6330 	}
   6331 
   6332 	if (MCLBYTES == 2048)
   6333 		sc->sc_rctl |= RCTL_2k;
   6334 	else {
   6335 		if (sc->sc_type >= WM_T_82543) {
   6336 			switch (MCLBYTES) {
   6337 			case 4096:
   6338 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6339 				break;
   6340 			case 8192:
   6341 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6342 				break;
   6343 			case 16384:
   6344 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6345 				break;
   6346 			default:
   6347 				panic("wm_init: MCLBYTES %d unsupported",
   6348 				    MCLBYTES);
   6349 				break;
   6350 			}
   6351 		} else
   6352 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6353 	}
   6354 
   6355 	/* Enable ECC */
   6356 	switch (sc->sc_type) {
   6357 	case WM_T_82571:
   6358 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6359 		reg |= PBA_ECC_CORR_EN;
   6360 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6361 		break;
   6362 	case WM_T_PCH_LPT:
   6363 	case WM_T_PCH_SPT:
   6364 	case WM_T_PCH_CNP:
   6365 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6366 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6367 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6368 
   6369 		sc->sc_ctrl |= CTRL_MEHE;
   6370 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6371 		break;
   6372 	default:
   6373 		break;
   6374 	}
   6375 
   6376 	/*
   6377 	 * Set the receive filter.
   6378 	 *
   6379 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6380 	 * the setting of RCTL.EN in wm_set_filter()
   6381 	 */
   6382 	wm_set_filter(sc);
   6383 
   6384 	/* On 575 and later set RDT only if RX enabled */
   6385 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6386 		int qidx;
   6387 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6388 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6389 			for (i = 0; i < WM_NRXDESC; i++) {
   6390 				mutex_enter(rxq->rxq_lock);
   6391 				wm_init_rxdesc(rxq, i);
   6392 				mutex_exit(rxq->rxq_lock);
   6393 
   6394 			}
   6395 		}
   6396 	}
   6397 
   6398 	wm_unset_stopping_flags(sc);
   6399 
   6400 	/* Start the one second link check clock. */
   6401 	callout_schedule(&sc->sc_tick_ch, hz);
   6402 
   6403 	/* ...all done! */
   6404 	ifp->if_flags |= IFF_RUNNING;
   6405 
   6406  out:
   6407 	/* Save last flags for the callback */
   6408 	sc->sc_if_flags = ifp->if_flags;
   6409 	sc->sc_ec_capenable = ec->ec_capenable;
   6410 	if (error)
   6411 		log(LOG_ERR, "%s: interface not running\n",
   6412 		    device_xname(sc->sc_dev));
   6413 	return error;
   6414 }
   6415 
   6416 /*
   6417  * wm_stop:		[ifnet interface function]
   6418  *
   6419  *	Stop transmission on the interface.
   6420  */
   6421 static void
   6422 wm_stop(struct ifnet *ifp, int disable)
   6423 {
   6424 	struct wm_softc *sc = ifp->if_softc;
   6425 
   6426 	ASSERT_SLEEPABLE();
   6427 
   6428 	WM_CORE_LOCK(sc);
   6429 	wm_stop_locked(ifp, disable ? true : false, true);
   6430 	WM_CORE_UNLOCK(sc);
   6431 
   6432 	/*
   6433 	 * After wm_set_stopping_flags(), it is guaranteed
   6434 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   6435 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   6436 	 * because it can sleep...
   6437 	 * so, call workqueue_wait() here.
   6438 	 */
   6439 	for (int i = 0; i < sc->sc_nqueues; i++)
   6440 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   6441 }
   6442 
   6443 static void
   6444 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   6445 {
   6446 	struct wm_softc *sc = ifp->if_softc;
   6447 	struct wm_txsoft *txs;
   6448 	int i, qidx;
   6449 
   6450 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6451 		device_xname(sc->sc_dev), __func__));
   6452 	KASSERT(WM_CORE_LOCKED(sc));
   6453 
   6454 	wm_set_stopping_flags(sc);
   6455 
   6456 	if (sc->sc_flags & WM_F_HAS_MII) {
   6457 		/* Down the MII. */
   6458 		mii_down(&sc->sc_mii);
   6459 	} else {
   6460 #if 0
   6461 		/* Should we clear PHY's status properly? */
   6462 		wm_reset(sc);
   6463 #endif
   6464 	}
   6465 
   6466 	/* Stop the transmit and receive processes. */
   6467 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6468 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6469 	sc->sc_rctl &= ~RCTL_EN;
   6470 
   6471 	/*
   6472 	 * Clear the interrupt mask to ensure the device cannot assert its
   6473 	 * interrupt line.
   6474 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6475 	 * service any currently pending or shared interrupt.
   6476 	 */
   6477 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6478 	sc->sc_icr = 0;
   6479 	if (wm_is_using_msix(sc)) {
   6480 		if (sc->sc_type != WM_T_82574) {
   6481 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6482 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6483 		} else
   6484 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6485 	}
   6486 
   6487 	/*
   6488 	 * Stop callouts after interrupts are disabled; if we have
   6489 	 * to wait for them, we will be releasing the CORE_LOCK
   6490 	 * briefly, which will unblock interrupts on the current CPU.
   6491 	 */
   6492 
   6493 	/* Stop the one second clock. */
   6494 	if (wait)
   6495 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   6496 	else
   6497 		callout_stop(&sc->sc_tick_ch);
   6498 
   6499 	/* Stop the 82547 Tx FIFO stall check timer. */
   6500 	if (sc->sc_type == WM_T_82547) {
   6501 		if (wait)
   6502 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   6503 		else
   6504 			callout_stop(&sc->sc_txfifo_ch);
   6505 	}
   6506 
   6507 	/* Release any queued transmit buffers. */
   6508 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6509 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6510 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6511 		mutex_enter(txq->txq_lock);
   6512 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6513 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6514 			txs = &txq->txq_soft[i];
   6515 			if (txs->txs_mbuf != NULL) {
   6516 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6517 				m_freem(txs->txs_mbuf);
   6518 				txs->txs_mbuf = NULL;
   6519 			}
   6520 		}
   6521 		mutex_exit(txq->txq_lock);
   6522 	}
   6523 
   6524 	/* Mark the interface as down and cancel the watchdog timer. */
   6525 	ifp->if_flags &= ~IFF_RUNNING;
   6526 
   6527 	if (disable) {
   6528 		for (i = 0; i < sc->sc_nqueues; i++) {
   6529 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6530 			mutex_enter(rxq->rxq_lock);
   6531 			wm_rxdrain(rxq);
   6532 			mutex_exit(rxq->rxq_lock);
   6533 		}
   6534 	}
   6535 
   6536 #if 0 /* notyet */
   6537 	if (sc->sc_type >= WM_T_82544)
   6538 		CSR_WRITE(sc, WMREG_WUC, 0);
   6539 #endif
   6540 }
   6541 
   6542 static void
   6543 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6544 {
   6545 	struct mbuf *m;
   6546 	int i;
   6547 
   6548 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6549 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6550 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6551 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6552 		    m->m_data, m->m_len, m->m_flags);
   6553 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6554 	    i, i == 1 ? "" : "s");
   6555 }
   6556 
   6557 /*
   6558  * wm_82547_txfifo_stall:
   6559  *
   6560  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6561  *	reset the FIFO pointers, and restart packet transmission.
   6562  */
   6563 static void
   6564 wm_82547_txfifo_stall(void *arg)
   6565 {
   6566 	struct wm_softc *sc = arg;
   6567 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6568 
   6569 	mutex_enter(txq->txq_lock);
   6570 
   6571 	if (txq->txq_stopping)
   6572 		goto out;
   6573 
   6574 	if (txq->txq_fifo_stall) {
   6575 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6576 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6577 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6578 			/*
   6579 			 * Packets have drained.  Stop transmitter, reset
   6580 			 * FIFO pointers, restart transmitter, and kick
   6581 			 * the packet queue.
   6582 			 */
   6583 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6584 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6585 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6586 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6587 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6588 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6589 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6590 			CSR_WRITE_FLUSH(sc);
   6591 
   6592 			txq->txq_fifo_head = 0;
   6593 			txq->txq_fifo_stall = 0;
   6594 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6595 		} else {
   6596 			/*
   6597 			 * Still waiting for packets to drain; try again in
   6598 			 * another tick.
   6599 			 */
   6600 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6601 		}
   6602 	}
   6603 
   6604 out:
   6605 	mutex_exit(txq->txq_lock);
   6606 }
   6607 
   6608 /*
   6609  * wm_82547_txfifo_bugchk:
   6610  *
   6611  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6612  *	prevent enqueueing a packet that would wrap around the end
   6613  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6614  *
   6615  *	We do this by checking the amount of space before the end
   6616  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6617  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6618  *	the internal FIFO pointers to the beginning, and restart
   6619  *	transmission on the interface.
   6620  */
   6621 #define	WM_FIFO_HDR		0x10
   6622 #define	WM_82547_PAD_LEN	0x3e0
   6623 static int
   6624 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6625 {
   6626 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6627 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6628 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6629 
   6630 	/* Just return if already stalled. */
   6631 	if (txq->txq_fifo_stall)
   6632 		return 1;
   6633 
   6634 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6635 		/* Stall only occurs in half-duplex mode. */
   6636 		goto send_packet;
   6637 	}
   6638 
   6639 	if (len >= WM_82547_PAD_LEN + space) {
   6640 		txq->txq_fifo_stall = 1;
   6641 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6642 		return 1;
   6643 	}
   6644 
   6645  send_packet:
   6646 	txq->txq_fifo_head += len;
   6647 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6648 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6649 
   6650 	return 0;
   6651 }
   6652 
   6653 static int
   6654 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6655 {
   6656 	int error;
   6657 
   6658 	/*
   6659 	 * Allocate the control data structures, and create and load the
   6660 	 * DMA map for it.
   6661 	 *
   6662 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6663 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6664 	 * both sets within the same 4G segment.
   6665 	 */
   6666 	if (sc->sc_type < WM_T_82544)
   6667 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6668 	else
   6669 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6670 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6671 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6672 	else
   6673 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6674 
   6675 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6676 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6677 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6678 		aprint_error_dev(sc->sc_dev,
   6679 		    "unable to allocate TX control data, error = %d\n",
   6680 		    error);
   6681 		goto fail_0;
   6682 	}
   6683 
   6684 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6685 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6686 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6687 		aprint_error_dev(sc->sc_dev,
   6688 		    "unable to map TX control data, error = %d\n", error);
   6689 		goto fail_1;
   6690 	}
   6691 
   6692 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6693 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6694 		aprint_error_dev(sc->sc_dev,
   6695 		    "unable to create TX control data DMA map, error = %d\n",
   6696 		    error);
   6697 		goto fail_2;
   6698 	}
   6699 
   6700 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6701 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6702 		aprint_error_dev(sc->sc_dev,
   6703 		    "unable to load TX control data DMA map, error = %d\n",
   6704 		    error);
   6705 		goto fail_3;
   6706 	}
   6707 
   6708 	return 0;
   6709 
   6710  fail_3:
   6711 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6712  fail_2:
   6713 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6714 	    WM_TXDESCS_SIZE(txq));
   6715  fail_1:
   6716 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6717  fail_0:
   6718 	return error;
   6719 }
   6720 
   6721 static void
   6722 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6723 {
   6724 
   6725 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6726 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6727 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6728 	    WM_TXDESCS_SIZE(txq));
   6729 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6730 }
   6731 
   6732 static int
   6733 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6734 {
   6735 	int error;
   6736 	size_t rxq_descs_size;
   6737 
   6738 	/*
   6739 	 * Allocate the control data structures, and create and load the
   6740 	 * DMA map for it.
   6741 	 *
   6742 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6743 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6744 	 * both sets within the same 4G segment.
   6745 	 */
   6746 	rxq->rxq_ndesc = WM_NRXDESC;
   6747 	if (sc->sc_type == WM_T_82574)
   6748 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6749 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6750 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6751 	else
   6752 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6753 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6754 
   6755 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6756 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6757 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6758 		aprint_error_dev(sc->sc_dev,
   6759 		    "unable to allocate RX control data, error = %d\n",
   6760 		    error);
   6761 		goto fail_0;
   6762 	}
   6763 
   6764 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6765 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6766 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6767 		aprint_error_dev(sc->sc_dev,
   6768 		    "unable to map RX control data, error = %d\n", error);
   6769 		goto fail_1;
   6770 	}
   6771 
   6772 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6773 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6774 		aprint_error_dev(sc->sc_dev,
   6775 		    "unable to create RX control data DMA map, error = %d\n",
   6776 		    error);
   6777 		goto fail_2;
   6778 	}
   6779 
   6780 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6781 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6782 		aprint_error_dev(sc->sc_dev,
   6783 		    "unable to load RX control data DMA map, error = %d\n",
   6784 		    error);
   6785 		goto fail_3;
   6786 	}
   6787 
   6788 	return 0;
   6789 
   6790  fail_3:
   6791 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6792  fail_2:
   6793 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6794 	    rxq_descs_size);
   6795  fail_1:
   6796 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6797  fail_0:
   6798 	return error;
   6799 }
   6800 
   6801 static void
   6802 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6803 {
   6804 
   6805 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6806 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6807 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6808 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6809 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6810 }
   6811 
   6812 
   6813 static int
   6814 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6815 {
   6816 	int i, error;
   6817 
   6818 	/* Create the transmit buffer DMA maps. */
   6819 	WM_TXQUEUELEN(txq) =
   6820 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6821 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6822 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6823 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6824 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6825 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6826 			aprint_error_dev(sc->sc_dev,
   6827 			    "unable to create Tx DMA map %d, error = %d\n",
   6828 			    i, error);
   6829 			goto fail;
   6830 		}
   6831 	}
   6832 
   6833 	return 0;
   6834 
   6835  fail:
   6836 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6837 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6838 			bus_dmamap_destroy(sc->sc_dmat,
   6839 			    txq->txq_soft[i].txs_dmamap);
   6840 	}
   6841 	return error;
   6842 }
   6843 
   6844 static void
   6845 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6846 {
   6847 	int i;
   6848 
   6849 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6850 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6851 			bus_dmamap_destroy(sc->sc_dmat,
   6852 			    txq->txq_soft[i].txs_dmamap);
   6853 	}
   6854 }
   6855 
   6856 static int
   6857 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6858 {
   6859 	int i, error;
   6860 
   6861 	/* Create the receive buffer DMA maps. */
   6862 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6863 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6864 			    MCLBYTES, 0, 0,
   6865 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6866 			aprint_error_dev(sc->sc_dev,
   6867 			    "unable to create Rx DMA map %d error = %d\n",
   6868 			    i, error);
   6869 			goto fail;
   6870 		}
   6871 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6872 	}
   6873 
   6874 	return 0;
   6875 
   6876  fail:
   6877 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6878 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6879 			bus_dmamap_destroy(sc->sc_dmat,
   6880 			    rxq->rxq_soft[i].rxs_dmamap);
   6881 	}
   6882 	return error;
   6883 }
   6884 
   6885 static void
   6886 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6887 {
   6888 	int i;
   6889 
   6890 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6891 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6892 			bus_dmamap_destroy(sc->sc_dmat,
   6893 			    rxq->rxq_soft[i].rxs_dmamap);
   6894 	}
   6895 }
   6896 
   6897 /*
   6898  * wm_alloc_quques:
   6899  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6900  */
   6901 static int
   6902 wm_alloc_txrx_queues(struct wm_softc *sc)
   6903 {
   6904 	int i, error, tx_done, rx_done;
   6905 
   6906 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6907 	    KM_SLEEP);
   6908 	if (sc->sc_queue == NULL) {
   6909 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6910 		error = ENOMEM;
   6911 		goto fail_0;
   6912 	}
   6913 
   6914 	/* For transmission */
   6915 	error = 0;
   6916 	tx_done = 0;
   6917 	for (i = 0; i < sc->sc_nqueues; i++) {
   6918 #ifdef WM_EVENT_COUNTERS
   6919 		int j;
   6920 		const char *xname;
   6921 #endif
   6922 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6923 		txq->txq_sc = sc;
   6924 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6925 
   6926 		error = wm_alloc_tx_descs(sc, txq);
   6927 		if (error)
   6928 			break;
   6929 		error = wm_alloc_tx_buffer(sc, txq);
   6930 		if (error) {
   6931 			wm_free_tx_descs(sc, txq);
   6932 			break;
   6933 		}
   6934 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6935 		if (txq->txq_interq == NULL) {
   6936 			wm_free_tx_descs(sc, txq);
   6937 			wm_free_tx_buffer(sc, txq);
   6938 			error = ENOMEM;
   6939 			break;
   6940 		}
   6941 
   6942 #ifdef WM_EVENT_COUNTERS
   6943 		xname = device_xname(sc->sc_dev);
   6944 
   6945 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6946 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6947 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6948 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6949 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6950 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6951 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6952 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6953 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6954 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6955 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6956 
   6957 		for (j = 0; j < WM_NTXSEGS; j++) {
   6958 			snprintf(txq->txq_txseg_evcnt_names[j],
   6959 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6960 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6961 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6962 		}
   6963 
   6964 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6965 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6966 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6967 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6968 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6969 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   6970 #endif /* WM_EVENT_COUNTERS */
   6971 
   6972 		tx_done++;
   6973 	}
   6974 	if (error)
   6975 		goto fail_1;
   6976 
   6977 	/* For receive */
   6978 	error = 0;
   6979 	rx_done = 0;
   6980 	for (i = 0; i < sc->sc_nqueues; i++) {
   6981 #ifdef WM_EVENT_COUNTERS
   6982 		const char *xname;
   6983 #endif
   6984 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6985 		rxq->rxq_sc = sc;
   6986 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6987 
   6988 		error = wm_alloc_rx_descs(sc, rxq);
   6989 		if (error)
   6990 			break;
   6991 
   6992 		error = wm_alloc_rx_buffer(sc, rxq);
   6993 		if (error) {
   6994 			wm_free_rx_descs(sc, rxq);
   6995 			break;
   6996 		}
   6997 
   6998 #ifdef WM_EVENT_COUNTERS
   6999 		xname = device_xname(sc->sc_dev);
   7000 
   7001 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7002 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7003 
   7004 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7005 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7006 #endif /* WM_EVENT_COUNTERS */
   7007 
   7008 		rx_done++;
   7009 	}
   7010 	if (error)
   7011 		goto fail_2;
   7012 
   7013 	return 0;
   7014 
   7015  fail_2:
   7016 	for (i = 0; i < rx_done; i++) {
   7017 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7018 		wm_free_rx_buffer(sc, rxq);
   7019 		wm_free_rx_descs(sc, rxq);
   7020 		if (rxq->rxq_lock)
   7021 			mutex_obj_free(rxq->rxq_lock);
   7022 	}
   7023  fail_1:
   7024 	for (i = 0; i < tx_done; i++) {
   7025 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7026 		pcq_destroy(txq->txq_interq);
   7027 		wm_free_tx_buffer(sc, txq);
   7028 		wm_free_tx_descs(sc, txq);
   7029 		if (txq->txq_lock)
   7030 			mutex_obj_free(txq->txq_lock);
   7031 	}
   7032 
   7033 	kmem_free(sc->sc_queue,
   7034 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7035  fail_0:
   7036 	return error;
   7037 }
   7038 
   7039 /*
   7040  * wm_free_quques:
   7041  *	Free {tx,rx}descs and {tx,rx} buffers
   7042  */
   7043 static void
   7044 wm_free_txrx_queues(struct wm_softc *sc)
   7045 {
   7046 	int i;
   7047 
   7048 	for (i = 0; i < sc->sc_nqueues; i++) {
   7049 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7050 
   7051 #ifdef WM_EVENT_COUNTERS
   7052 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7053 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7054 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7055 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7056 #endif /* WM_EVENT_COUNTERS */
   7057 
   7058 		wm_free_rx_buffer(sc, rxq);
   7059 		wm_free_rx_descs(sc, rxq);
   7060 		if (rxq->rxq_lock)
   7061 			mutex_obj_free(rxq->rxq_lock);
   7062 	}
   7063 
   7064 	for (i = 0; i < sc->sc_nqueues; i++) {
   7065 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7066 		struct mbuf *m;
   7067 #ifdef WM_EVENT_COUNTERS
   7068 		int j;
   7069 
   7070 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7071 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7072 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7073 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7074 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7075 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7076 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7077 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7078 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7079 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7080 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7081 
   7082 		for (j = 0; j < WM_NTXSEGS; j++)
   7083 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7084 
   7085 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7086 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7087 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7088 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7089 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7090 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7091 #endif /* WM_EVENT_COUNTERS */
   7092 
   7093 		/* Drain txq_interq */
   7094 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7095 			m_freem(m);
   7096 		pcq_destroy(txq->txq_interq);
   7097 
   7098 		wm_free_tx_buffer(sc, txq);
   7099 		wm_free_tx_descs(sc, txq);
   7100 		if (txq->txq_lock)
   7101 			mutex_obj_free(txq->txq_lock);
   7102 	}
   7103 
   7104 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7105 }
   7106 
   7107 static void
   7108 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7109 {
   7110 
   7111 	KASSERT(mutex_owned(txq->txq_lock));
   7112 
   7113 	/* Initialize the transmit descriptor ring. */
   7114 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7115 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7116 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7117 	txq->txq_free = WM_NTXDESC(txq);
   7118 	txq->txq_next = 0;
   7119 }
   7120 
   7121 static void
   7122 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7123     struct wm_txqueue *txq)
   7124 {
   7125 
   7126 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7127 		device_xname(sc->sc_dev), __func__));
   7128 	KASSERT(mutex_owned(txq->txq_lock));
   7129 
   7130 	if (sc->sc_type < WM_T_82543) {
   7131 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7132 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7133 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7134 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7135 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7136 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7137 	} else {
   7138 		int qid = wmq->wmq_id;
   7139 
   7140 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7141 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7142 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7143 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7144 
   7145 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7146 			/*
   7147 			 * Don't write TDT before TCTL.EN is set.
   7148 			 * See the document.
   7149 			 */
   7150 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7151 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7152 			    | TXDCTL_WTHRESH(0));
   7153 		else {
   7154 			/* XXX should update with AIM? */
   7155 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7156 			if (sc->sc_type >= WM_T_82540) {
   7157 				/* Should be the same */
   7158 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7159 			}
   7160 
   7161 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7162 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7163 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7164 		}
   7165 	}
   7166 }
   7167 
   7168 static void
   7169 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7170 {
   7171 	int i;
   7172 
   7173 	KASSERT(mutex_owned(txq->txq_lock));
   7174 
   7175 	/* Initialize the transmit job descriptors. */
   7176 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7177 		txq->txq_soft[i].txs_mbuf = NULL;
   7178 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7179 	txq->txq_snext = 0;
   7180 	txq->txq_sdirty = 0;
   7181 }
   7182 
   7183 static void
   7184 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7185     struct wm_txqueue *txq)
   7186 {
   7187 
   7188 	KASSERT(mutex_owned(txq->txq_lock));
   7189 
   7190 	/*
   7191 	 * Set up some register offsets that are different between
   7192 	 * the i82542 and the i82543 and later chips.
   7193 	 */
   7194 	if (sc->sc_type < WM_T_82543)
   7195 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7196 	else
   7197 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7198 
   7199 	wm_init_tx_descs(sc, txq);
   7200 	wm_init_tx_regs(sc, wmq, txq);
   7201 	wm_init_tx_buffer(sc, txq);
   7202 
   7203 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7204 	txq->txq_sending = false;
   7205 }
   7206 
   7207 static void
   7208 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7209     struct wm_rxqueue *rxq)
   7210 {
   7211 
   7212 	KASSERT(mutex_owned(rxq->rxq_lock));
   7213 
   7214 	/*
   7215 	 * Initialize the receive descriptor and receive job
   7216 	 * descriptor rings.
   7217 	 */
   7218 	if (sc->sc_type < WM_T_82543) {
   7219 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7220 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7221 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7222 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7223 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7224 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7225 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7226 
   7227 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7228 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7229 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7230 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7231 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7232 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7233 	} else {
   7234 		int qid = wmq->wmq_id;
   7235 
   7236 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7237 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7238 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7239 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7240 
   7241 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7242 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7243 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7244 
   7245 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7246 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7247 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7248 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7249 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7250 			    | RXDCTL_WTHRESH(1));
   7251 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7252 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7253 		} else {
   7254 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7255 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7256 			/* XXX should update with AIM? */
   7257 			CSR_WRITE(sc, WMREG_RDTR,
   7258 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7259 			/* MUST be same */
   7260 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7261 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7262 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7263 		}
   7264 	}
   7265 }
   7266 
   7267 static int
   7268 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7269 {
   7270 	struct wm_rxsoft *rxs;
   7271 	int error, i;
   7272 
   7273 	KASSERT(mutex_owned(rxq->rxq_lock));
   7274 
   7275 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7276 		rxs = &rxq->rxq_soft[i];
   7277 		if (rxs->rxs_mbuf == NULL) {
   7278 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7279 				log(LOG_ERR, "%s: unable to allocate or map "
   7280 				    "rx buffer %d, error = %d\n",
   7281 				    device_xname(sc->sc_dev), i, error);
   7282 				/*
   7283 				 * XXX Should attempt to run with fewer receive
   7284 				 * XXX buffers instead of just failing.
   7285 				 */
   7286 				wm_rxdrain(rxq);
   7287 				return ENOMEM;
   7288 			}
   7289 		} else {
   7290 			/*
   7291 			 * For 82575 and 82576, the RX descriptors must be
   7292 			 * initialized after the setting of RCTL.EN in
   7293 			 * wm_set_filter()
   7294 			 */
   7295 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7296 				wm_init_rxdesc(rxq, i);
   7297 		}
   7298 	}
   7299 	rxq->rxq_ptr = 0;
   7300 	rxq->rxq_discard = 0;
   7301 	WM_RXCHAIN_RESET(rxq);
   7302 
   7303 	return 0;
   7304 }
   7305 
   7306 static int
   7307 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7308     struct wm_rxqueue *rxq)
   7309 {
   7310 
   7311 	KASSERT(mutex_owned(rxq->rxq_lock));
   7312 
   7313 	/*
   7314 	 * Set up some register offsets that are different between
   7315 	 * the i82542 and the i82543 and later chips.
   7316 	 */
   7317 	if (sc->sc_type < WM_T_82543)
   7318 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7319 	else
   7320 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7321 
   7322 	wm_init_rx_regs(sc, wmq, rxq);
   7323 	return wm_init_rx_buffer(sc, rxq);
   7324 }
   7325 
   7326 /*
   7327  * wm_init_quques:
   7328  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7329  */
   7330 static int
   7331 wm_init_txrx_queues(struct wm_softc *sc)
   7332 {
   7333 	int i, error = 0;
   7334 
   7335 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7336 		device_xname(sc->sc_dev), __func__));
   7337 
   7338 	for (i = 0; i < sc->sc_nqueues; i++) {
   7339 		struct wm_queue *wmq = &sc->sc_queue[i];
   7340 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7341 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7342 
   7343 		/*
   7344 		 * TODO
   7345 		 * Currently, use constant variable instead of AIM.
   7346 		 * Furthermore, the interrupt interval of multiqueue which use
   7347 		 * polling mode is less than default value.
   7348 		 * More tuning and AIM are required.
   7349 		 */
   7350 		if (wm_is_using_multiqueue(sc))
   7351 			wmq->wmq_itr = 50;
   7352 		else
   7353 			wmq->wmq_itr = sc->sc_itr_init;
   7354 		wmq->wmq_set_itr = true;
   7355 
   7356 		mutex_enter(txq->txq_lock);
   7357 		wm_init_tx_queue(sc, wmq, txq);
   7358 		mutex_exit(txq->txq_lock);
   7359 
   7360 		mutex_enter(rxq->rxq_lock);
   7361 		error = wm_init_rx_queue(sc, wmq, rxq);
   7362 		mutex_exit(rxq->rxq_lock);
   7363 		if (error)
   7364 			break;
   7365 	}
   7366 
   7367 	return error;
   7368 }
   7369 
   7370 /*
   7371  * wm_tx_offload:
   7372  *
   7373  *	Set up TCP/IP checksumming parameters for the
   7374  *	specified packet.
   7375  */
   7376 static void
   7377 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7378     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7379 {
   7380 	struct mbuf *m0 = txs->txs_mbuf;
   7381 	struct livengood_tcpip_ctxdesc *t;
   7382 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7383 	uint32_t ipcse;
   7384 	struct ether_header *eh;
   7385 	int offset, iphl;
   7386 	uint8_t fields;
   7387 
   7388 	/*
   7389 	 * XXX It would be nice if the mbuf pkthdr had offset
   7390 	 * fields for the protocol headers.
   7391 	 */
   7392 
   7393 	eh = mtod(m0, struct ether_header *);
   7394 	switch (htons(eh->ether_type)) {
   7395 	case ETHERTYPE_IP:
   7396 	case ETHERTYPE_IPV6:
   7397 		offset = ETHER_HDR_LEN;
   7398 		break;
   7399 
   7400 	case ETHERTYPE_VLAN:
   7401 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7402 		break;
   7403 
   7404 	default:
   7405 		/* Don't support this protocol or encapsulation. */
   7406  		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7407  		txq->txq_last_hw_ipcs = 0;
   7408  		txq->txq_last_hw_tucs = 0;
   7409 		*fieldsp = 0;
   7410 		*cmdp = 0;
   7411 		return;
   7412 	}
   7413 
   7414 	if ((m0->m_pkthdr.csum_flags &
   7415 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7416 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7417 	} else
   7418 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7419 
   7420 	ipcse = offset + iphl - 1;
   7421 
   7422 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7423 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7424 	seg = 0;
   7425 	fields = 0;
   7426 
   7427 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7428 		int hlen = offset + iphl;
   7429 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7430 
   7431 		if (__predict_false(m0->m_len <
   7432 				    (hlen + sizeof(struct tcphdr)))) {
   7433 			/*
   7434 			 * TCP/IP headers are not in the first mbuf; we need
   7435 			 * to do this the slow and painful way. Let's just
   7436 			 * hope this doesn't happen very often.
   7437 			 */
   7438 			struct tcphdr th;
   7439 
   7440 			WM_Q_EVCNT_INCR(txq, tsopain);
   7441 
   7442 			m_copydata(m0, hlen, sizeof(th), &th);
   7443 			if (v4) {
   7444 				struct ip ip;
   7445 
   7446 				m_copydata(m0, offset, sizeof(ip), &ip);
   7447 				ip.ip_len = 0;
   7448 				m_copyback(m0,
   7449 				    offset + offsetof(struct ip, ip_len),
   7450 				    sizeof(ip.ip_len), &ip.ip_len);
   7451 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7452 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7453 			} else {
   7454 				struct ip6_hdr ip6;
   7455 
   7456 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7457 				ip6.ip6_plen = 0;
   7458 				m_copyback(m0,
   7459 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7460 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7461 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7462 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7463 			}
   7464 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7465 			    sizeof(th.th_sum), &th.th_sum);
   7466 
   7467 			hlen += th.th_off << 2;
   7468 		} else {
   7469 			/*
   7470 			 * TCP/IP headers are in the first mbuf; we can do
   7471 			 * this the easy way.
   7472 			 */
   7473 			struct tcphdr *th;
   7474 
   7475 			if (v4) {
   7476 				struct ip *ip =
   7477 				    (void *)(mtod(m0, char *) + offset);
   7478 				th = (void *)(mtod(m0, char *) + hlen);
   7479 
   7480 				ip->ip_len = 0;
   7481 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7482 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7483 			} else {
   7484 				struct ip6_hdr *ip6 =
   7485 				    (void *)(mtod(m0, char *) + offset);
   7486 				th = (void *)(mtod(m0, char *) + hlen);
   7487 
   7488 				ip6->ip6_plen = 0;
   7489 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7490 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7491 			}
   7492 			hlen += th->th_off << 2;
   7493 		}
   7494 
   7495 		if (v4) {
   7496 			WM_Q_EVCNT_INCR(txq, tso);
   7497 			cmdlen |= WTX_TCPIP_CMD_IP;
   7498 		} else {
   7499 			WM_Q_EVCNT_INCR(txq, tso6);
   7500 			ipcse = 0;
   7501 		}
   7502 		cmd |= WTX_TCPIP_CMD_TSE;
   7503 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7504 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7505 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7506 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7507 	}
   7508 
   7509 	/*
   7510 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7511 	 * offload feature, if we load the context descriptor, we
   7512 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7513 	 */
   7514 
   7515 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7516 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7517 	    WTX_TCPIP_IPCSE(ipcse);
   7518 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7519 		WM_Q_EVCNT_INCR(txq, ipsum);
   7520 		fields |= WTX_IXSM;
   7521 	}
   7522 
   7523 	offset += iphl;
   7524 
   7525 	if (m0->m_pkthdr.csum_flags &
   7526 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7527 		WM_Q_EVCNT_INCR(txq, tusum);
   7528 		fields |= WTX_TXSM;
   7529 		tucs = WTX_TCPIP_TUCSS(offset) |
   7530 		    WTX_TCPIP_TUCSO(offset +
   7531 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7532 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7533 	} else if ((m0->m_pkthdr.csum_flags &
   7534 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7535 		WM_Q_EVCNT_INCR(txq, tusum6);
   7536 		fields |= WTX_TXSM;
   7537 		tucs = WTX_TCPIP_TUCSS(offset) |
   7538 		    WTX_TCPIP_TUCSO(offset +
   7539 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7540 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7541 	} else {
   7542 		/* Just initialize it to a valid TCP context. */
   7543 		tucs = WTX_TCPIP_TUCSS(offset) |
   7544 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7545 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7546 	}
   7547 
   7548 	*cmdp = cmd;
   7549 	*fieldsp = fields;
   7550 
   7551 	/*
   7552 	 * We don't have to write context descriptor for every packet
   7553 	 * except for 82574. For 82574, we must write context descriptor
   7554 	 * for every packet when we use two descriptor queues.
   7555 	 *
   7556 	 * The 82574L can only remember the *last* context used
   7557 	 * regardless of queue that it was use for.  We cannot reuse
   7558 	 * contexts on this hardware platform and must generate a new
   7559 	 * context every time.  82574L hardware spec, section 7.2.6,
   7560 	 * second note.
   7561 	 */
   7562 	if (sc->sc_nqueues < 2) {
   7563 		/*
   7564 	 	 *
   7565 	  	 * Setting up new checksum offload context for every
   7566 		 * frames takes a lot of processing time for hardware.
   7567 		 * This also reduces performance a lot for small sized
   7568 		 * frames so avoid it if driver can use previously
   7569 		 * configured checksum offload context.
   7570 		 * For TSO, in theory we can use the same TSO context only if
   7571 		 * frame is the same type(IP/TCP) and the same MSS. However
   7572 		 * checking whether a frame has the same IP/TCP structure is
   7573 		 * hard thing so just ignore that and always restablish a
   7574 		 * new TSO context.
   7575 	  	 */
   7576 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   7577 		    == 0) {
   7578 			if (txq->txq_last_hw_cmd == cmd &&
   7579 			    txq->txq_last_hw_fields == fields &&
   7580 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   7581 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   7582 				WM_Q_EVCNT_INCR(txq, skipcontext);
   7583 				return;
   7584 			}
   7585 		}
   7586 
   7587 	 	txq->txq_last_hw_cmd = cmd;
   7588  		txq->txq_last_hw_fields = fields;
   7589  		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   7590 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   7591 	}
   7592 
   7593 	/* Fill in the context descriptor. */
   7594 	t = (struct livengood_tcpip_ctxdesc *)
   7595 	    &txq->txq_descs[txq->txq_next];
   7596 	t->tcpip_ipcs = htole32(ipcs);
   7597 	t->tcpip_tucs = htole32(tucs);
   7598 	t->tcpip_cmdlen = htole32(cmdlen);
   7599 	t->tcpip_seg = htole32(seg);
   7600 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7601 
   7602 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7603 	txs->txs_ndesc++;
   7604 }
   7605 
   7606 static inline int
   7607 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7608 {
   7609 	struct wm_softc *sc = ifp->if_softc;
   7610 	u_int cpuid = cpu_index(curcpu());
   7611 
   7612 	/*
   7613 	 * Currently, simple distribute strategy.
   7614 	 * TODO:
   7615 	 * distribute by flowid(RSS has value).
   7616 	 */
   7617 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7618 }
   7619 
   7620 /*
   7621  * wm_start:		[ifnet interface function]
   7622  *
   7623  *	Start packet transmission on the interface.
   7624  */
   7625 static void
   7626 wm_start(struct ifnet *ifp)
   7627 {
   7628 	struct wm_softc *sc = ifp->if_softc;
   7629 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7630 
   7631 #ifdef WM_MPSAFE
   7632 	KASSERT(if_is_mpsafe(ifp));
   7633 #endif
   7634 	/*
   7635 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   7636 	 */
   7637 
   7638 	mutex_enter(txq->txq_lock);
   7639 	if (!txq->txq_stopping)
   7640 		wm_start_locked(ifp);
   7641 	mutex_exit(txq->txq_lock);
   7642 }
   7643 
   7644 static void
   7645 wm_start_locked(struct ifnet *ifp)
   7646 {
   7647 	struct wm_softc *sc = ifp->if_softc;
   7648 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7649 
   7650 	wm_send_common_locked(ifp, txq, false);
   7651 }
   7652 
   7653 static int
   7654 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7655 {
   7656 	int qid;
   7657 	struct wm_softc *sc = ifp->if_softc;
   7658 	struct wm_txqueue *txq;
   7659 
   7660 	qid = wm_select_txqueue(ifp, m);
   7661 	txq = &sc->sc_queue[qid].wmq_txq;
   7662 
   7663 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7664 		m_freem(m);
   7665 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7666 		return ENOBUFS;
   7667 	}
   7668 
   7669 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   7670 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   7671 	if (m->m_flags & M_MCAST)
   7672 		if_statinc_ref(nsr, if_omcasts);
   7673 	IF_STAT_PUTREF(ifp);
   7674 
   7675 	if (mutex_tryenter(txq->txq_lock)) {
   7676 		if (!txq->txq_stopping)
   7677 			wm_transmit_locked(ifp, txq);
   7678 		mutex_exit(txq->txq_lock);
   7679 	}
   7680 
   7681 	return 0;
   7682 }
   7683 
   7684 static void
   7685 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7686 {
   7687 
   7688 	wm_send_common_locked(ifp, txq, true);
   7689 }
   7690 
   7691 static void
   7692 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7693     bool is_transmit)
   7694 {
   7695 	struct wm_softc *sc = ifp->if_softc;
   7696 	struct mbuf *m0;
   7697 	struct wm_txsoft *txs;
   7698 	bus_dmamap_t dmamap;
   7699 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7700 	bus_addr_t curaddr;
   7701 	bus_size_t seglen, curlen;
   7702 	uint32_t cksumcmd;
   7703 	uint8_t cksumfields;
   7704 	bool remap = true;
   7705 
   7706 	KASSERT(mutex_owned(txq->txq_lock));
   7707 
   7708 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7709 		return;
   7710 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7711 		return;
   7712 
   7713 	/* Remember the previous number of free descriptors. */
   7714 	ofree = txq->txq_free;
   7715 
   7716 	/*
   7717 	 * Loop through the send queue, setting up transmit descriptors
   7718 	 * until we drain the queue, or use up all available transmit
   7719 	 * descriptors.
   7720 	 */
   7721 	for (;;) {
   7722 		m0 = NULL;
   7723 
   7724 		/* Get a work queue entry. */
   7725 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7726 			wm_txeof(txq, UINT_MAX);
   7727 			if (txq->txq_sfree == 0) {
   7728 				DPRINTF(WM_DEBUG_TX,
   7729 				    ("%s: TX: no free job descriptors\n",
   7730 					device_xname(sc->sc_dev)));
   7731 				WM_Q_EVCNT_INCR(txq, txsstall);
   7732 				break;
   7733 			}
   7734 		}
   7735 
   7736 		/* Grab a packet off the queue. */
   7737 		if (is_transmit)
   7738 			m0 = pcq_get(txq->txq_interq);
   7739 		else
   7740 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7741 		if (m0 == NULL)
   7742 			break;
   7743 
   7744 		DPRINTF(WM_DEBUG_TX,
   7745 		    ("%s: TX: have packet to transmit: %p\n",
   7746 			device_xname(sc->sc_dev), m0));
   7747 
   7748 		txs = &txq->txq_soft[txq->txq_snext];
   7749 		dmamap = txs->txs_dmamap;
   7750 
   7751 		use_tso = (m0->m_pkthdr.csum_flags &
   7752 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7753 
   7754 		/*
   7755 		 * So says the Linux driver:
   7756 		 * The controller does a simple calculation to make sure
   7757 		 * there is enough room in the FIFO before initiating the
   7758 		 * DMA for each buffer. The calc is:
   7759 		 *	4 = ceil(buffer len / MSS)
   7760 		 * To make sure we don't overrun the FIFO, adjust the max
   7761 		 * buffer len if the MSS drops.
   7762 		 */
   7763 		dmamap->dm_maxsegsz =
   7764 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7765 		    ? m0->m_pkthdr.segsz << 2
   7766 		    : WTX_MAX_LEN;
   7767 
   7768 		/*
   7769 		 * Load the DMA map.  If this fails, the packet either
   7770 		 * didn't fit in the allotted number of segments, or we
   7771 		 * were short on resources.  For the too-many-segments
   7772 		 * case, we simply report an error and drop the packet,
   7773 		 * since we can't sanely copy a jumbo packet to a single
   7774 		 * buffer.
   7775 		 */
   7776 retry:
   7777 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7778 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7779 		if (__predict_false(error)) {
   7780 			if (error == EFBIG) {
   7781 				if (remap == true) {
   7782 					struct mbuf *m;
   7783 
   7784 					remap = false;
   7785 					m = m_defrag(m0, M_NOWAIT);
   7786 					if (m != NULL) {
   7787 						WM_Q_EVCNT_INCR(txq, defrag);
   7788 						m0 = m;
   7789 						goto retry;
   7790 					}
   7791 				}
   7792 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7793 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7794 				    "DMA segments, dropping...\n",
   7795 				    device_xname(sc->sc_dev));
   7796 				wm_dump_mbuf_chain(sc, m0);
   7797 				m_freem(m0);
   7798 				continue;
   7799 			}
   7800 			/* Short on resources, just stop for now. */
   7801 			DPRINTF(WM_DEBUG_TX,
   7802 			    ("%s: TX: dmamap load failed: %d\n",
   7803 				device_xname(sc->sc_dev), error));
   7804 			break;
   7805 		}
   7806 
   7807 		segs_needed = dmamap->dm_nsegs;
   7808 		if (use_tso) {
   7809 			/* For sentinel descriptor; see below. */
   7810 			segs_needed++;
   7811 		}
   7812 
   7813 		/*
   7814 		 * Ensure we have enough descriptors free to describe
   7815 		 * the packet. Note, we always reserve one descriptor
   7816 		 * at the end of the ring due to the semantics of the
   7817 		 * TDT register, plus one more in the event we need
   7818 		 * to load offload context.
   7819 		 */
   7820 		if (segs_needed > txq->txq_free - 2) {
   7821 			/*
   7822 			 * Not enough free descriptors to transmit this
   7823 			 * packet.  We haven't committed anything yet,
   7824 			 * so just unload the DMA map, put the packet
   7825 			 * pack on the queue, and punt. Notify the upper
   7826 			 * layer that there are no more slots left.
   7827 			 */
   7828 			DPRINTF(WM_DEBUG_TX,
   7829 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7830 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7831 				segs_needed, txq->txq_free - 1));
   7832 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7833 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7834 			WM_Q_EVCNT_INCR(txq, txdstall);
   7835 			break;
   7836 		}
   7837 
   7838 		/*
   7839 		 * Check for 82547 Tx FIFO bug. We need to do this
   7840 		 * once we know we can transmit the packet, since we
   7841 		 * do some internal FIFO space accounting here.
   7842 		 */
   7843 		if (sc->sc_type == WM_T_82547 &&
   7844 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7845 			DPRINTF(WM_DEBUG_TX,
   7846 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7847 				device_xname(sc->sc_dev)));
   7848 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7849 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7850 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7851 			break;
   7852 		}
   7853 
   7854 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7855 
   7856 		DPRINTF(WM_DEBUG_TX,
   7857 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7858 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7859 
   7860 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7861 
   7862 		/*
   7863 		 * Store a pointer to the packet so that we can free it
   7864 		 * later.
   7865 		 *
   7866 		 * Initially, we consider the number of descriptors the
   7867 		 * packet uses the number of DMA segments.  This may be
   7868 		 * incremented by 1 if we do checksum offload (a descriptor
   7869 		 * is used to set the checksum context).
   7870 		 */
   7871 		txs->txs_mbuf = m0;
   7872 		txs->txs_firstdesc = txq->txq_next;
   7873 		txs->txs_ndesc = segs_needed;
   7874 
   7875 		/* Set up offload parameters for this packet. */
   7876 		if (m0->m_pkthdr.csum_flags &
   7877 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7878 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7879 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7880 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   7881 		} else {
   7882  			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7883  			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   7884 			cksumcmd = 0;
   7885 			cksumfields = 0;
   7886 		}
   7887 
   7888 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7889 
   7890 		/* Sync the DMA map. */
   7891 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7892 		    BUS_DMASYNC_PREWRITE);
   7893 
   7894 		/* Initialize the transmit descriptor. */
   7895 		for (nexttx = txq->txq_next, seg = 0;
   7896 		     seg < dmamap->dm_nsegs; seg++) {
   7897 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7898 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7899 			     seglen != 0;
   7900 			     curaddr += curlen, seglen -= curlen,
   7901 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7902 				curlen = seglen;
   7903 
   7904 				/*
   7905 				 * So says the Linux driver:
   7906 				 * Work around for premature descriptor
   7907 				 * write-backs in TSO mode.  Append a
   7908 				 * 4-byte sentinel descriptor.
   7909 				 */
   7910 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7911 				    curlen > 8)
   7912 					curlen -= 4;
   7913 
   7914 				wm_set_dma_addr(
   7915 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7916 				txq->txq_descs[nexttx].wtx_cmdlen
   7917 				    = htole32(cksumcmd | curlen);
   7918 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7919 				    = 0;
   7920 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7921 				    = cksumfields;
   7922 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7923 				lasttx = nexttx;
   7924 
   7925 				DPRINTF(WM_DEBUG_TX,
   7926 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7927 					"len %#04zx\n",
   7928 					device_xname(sc->sc_dev), nexttx,
   7929 					(uint64_t)curaddr, curlen));
   7930 			}
   7931 		}
   7932 
   7933 		KASSERT(lasttx != -1);
   7934 
   7935 		/*
   7936 		 * Set up the command byte on the last descriptor of
   7937 		 * the packet. If we're in the interrupt delay window,
   7938 		 * delay the interrupt.
   7939 		 */
   7940 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7941 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7942 
   7943 		/*
   7944 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7945 		 * up the descriptor to encapsulate the packet for us.
   7946 		 *
   7947 		 * This is only valid on the last descriptor of the packet.
   7948 		 */
   7949 		if (vlan_has_tag(m0)) {
   7950 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7951 			    htole32(WTX_CMD_VLE);
   7952 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7953 			    = htole16(vlan_get_tag(m0));
   7954 		}
   7955 
   7956 		txs->txs_lastdesc = lasttx;
   7957 
   7958 		DPRINTF(WM_DEBUG_TX,
   7959 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7960 			device_xname(sc->sc_dev),
   7961 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7962 
   7963 		/* Sync the descriptors we're using. */
   7964 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7965 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7966 
   7967 		/* Give the packet to the chip. */
   7968 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7969 
   7970 		DPRINTF(WM_DEBUG_TX,
   7971 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7972 
   7973 		DPRINTF(WM_DEBUG_TX,
   7974 		    ("%s: TX: finished transmitting packet, job %d\n",
   7975 			device_xname(sc->sc_dev), txq->txq_snext));
   7976 
   7977 		/* Advance the tx pointer. */
   7978 		txq->txq_free -= txs->txs_ndesc;
   7979 		txq->txq_next = nexttx;
   7980 
   7981 		txq->txq_sfree--;
   7982 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7983 
   7984 		/* Pass the packet to any BPF listeners. */
   7985 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7986 	}
   7987 
   7988 	if (m0 != NULL) {
   7989 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7990 		WM_Q_EVCNT_INCR(txq, descdrop);
   7991 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7992 			__func__));
   7993 		m_freem(m0);
   7994 	}
   7995 
   7996 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7997 		/* No more slots; notify upper layer. */
   7998 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7999 	}
   8000 
   8001 	if (txq->txq_free != ofree) {
   8002 		/* Set a watchdog timer in case the chip flakes out. */
   8003 		txq->txq_lastsent = time_uptime;
   8004 		txq->txq_sending = true;
   8005 	}
   8006 }
   8007 
   8008 /*
   8009  * wm_nq_tx_offload:
   8010  *
   8011  *	Set up TCP/IP checksumming parameters for the
   8012  *	specified packet, for NEWQUEUE devices
   8013  */
   8014 static void
   8015 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8016     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8017 {
   8018 	struct mbuf *m0 = txs->txs_mbuf;
   8019 	uint32_t vl_len, mssidx, cmdc;
   8020 	struct ether_header *eh;
   8021 	int offset, iphl;
   8022 
   8023 	/*
   8024 	 * XXX It would be nice if the mbuf pkthdr had offset
   8025 	 * fields for the protocol headers.
   8026 	 */
   8027 	*cmdlenp = 0;
   8028 	*fieldsp = 0;
   8029 
   8030 	eh = mtod(m0, struct ether_header *);
   8031 	switch (htons(eh->ether_type)) {
   8032 	case ETHERTYPE_IP:
   8033 	case ETHERTYPE_IPV6:
   8034 		offset = ETHER_HDR_LEN;
   8035 		break;
   8036 
   8037 	case ETHERTYPE_VLAN:
   8038 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8039 		break;
   8040 
   8041 	default:
   8042 		/* Don't support this protocol or encapsulation. */
   8043 		*do_csum = false;
   8044 		return;
   8045 	}
   8046 	*do_csum = true;
   8047 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8048 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8049 
   8050 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8051 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8052 
   8053 	if ((m0->m_pkthdr.csum_flags &
   8054 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8055 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8056 	} else {
   8057 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8058 	}
   8059 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8060 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8061 
   8062 	if (vlan_has_tag(m0)) {
   8063 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8064 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8065 		*cmdlenp |= NQTX_CMD_VLE;
   8066 	}
   8067 
   8068 	mssidx = 0;
   8069 
   8070 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8071 		int hlen = offset + iphl;
   8072 		int tcp_hlen;
   8073 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8074 
   8075 		if (__predict_false(m0->m_len <
   8076 				    (hlen + sizeof(struct tcphdr)))) {
   8077 			/*
   8078 			 * TCP/IP headers are not in the first mbuf; we need
   8079 			 * to do this the slow and painful way. Let's just
   8080 			 * hope this doesn't happen very often.
   8081 			 */
   8082 			struct tcphdr th;
   8083 
   8084 			WM_Q_EVCNT_INCR(txq, tsopain);
   8085 
   8086 			m_copydata(m0, hlen, sizeof(th), &th);
   8087 			if (v4) {
   8088 				struct ip ip;
   8089 
   8090 				m_copydata(m0, offset, sizeof(ip), &ip);
   8091 				ip.ip_len = 0;
   8092 				m_copyback(m0,
   8093 				    offset + offsetof(struct ip, ip_len),
   8094 				    sizeof(ip.ip_len), &ip.ip_len);
   8095 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8096 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8097 			} else {
   8098 				struct ip6_hdr ip6;
   8099 
   8100 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8101 				ip6.ip6_plen = 0;
   8102 				m_copyback(m0,
   8103 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8104 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8105 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8106 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8107 			}
   8108 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8109 			    sizeof(th.th_sum), &th.th_sum);
   8110 
   8111 			tcp_hlen = th.th_off << 2;
   8112 		} else {
   8113 			/*
   8114 			 * TCP/IP headers are in the first mbuf; we can do
   8115 			 * this the easy way.
   8116 			 */
   8117 			struct tcphdr *th;
   8118 
   8119 			if (v4) {
   8120 				struct ip *ip =
   8121 				    (void *)(mtod(m0, char *) + offset);
   8122 				th = (void *)(mtod(m0, char *) + hlen);
   8123 
   8124 				ip->ip_len = 0;
   8125 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8126 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8127 			} else {
   8128 				struct ip6_hdr *ip6 =
   8129 				    (void *)(mtod(m0, char *) + offset);
   8130 				th = (void *)(mtod(m0, char *) + hlen);
   8131 
   8132 				ip6->ip6_plen = 0;
   8133 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8134 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8135 			}
   8136 			tcp_hlen = th->th_off << 2;
   8137 		}
   8138 		hlen += tcp_hlen;
   8139 		*cmdlenp |= NQTX_CMD_TSE;
   8140 
   8141 		if (v4) {
   8142 			WM_Q_EVCNT_INCR(txq, tso);
   8143 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8144 		} else {
   8145 			WM_Q_EVCNT_INCR(txq, tso6);
   8146 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8147 		}
   8148 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8149 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8150 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8151 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8152 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8153 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8154 	} else {
   8155 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8156 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8157 	}
   8158 
   8159 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8160 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8161 		cmdc |= NQTXC_CMD_IP4;
   8162 	}
   8163 
   8164 	if (m0->m_pkthdr.csum_flags &
   8165 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8166 		WM_Q_EVCNT_INCR(txq, tusum);
   8167 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8168 			cmdc |= NQTXC_CMD_TCP;
   8169 		else
   8170 			cmdc |= NQTXC_CMD_UDP;
   8171 
   8172 		cmdc |= NQTXC_CMD_IP4;
   8173 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8174 	}
   8175 	if (m0->m_pkthdr.csum_flags &
   8176 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8177 		WM_Q_EVCNT_INCR(txq, tusum6);
   8178 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8179 			cmdc |= NQTXC_CMD_TCP;
   8180 		else
   8181 			cmdc |= NQTXC_CMD_UDP;
   8182 
   8183 		cmdc |= NQTXC_CMD_IP6;
   8184 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8185 	}
   8186 
   8187 	/*
   8188 	 * We don't have to write context descriptor for every packet to
   8189 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8190 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8191 	 * controllers.
   8192 	 * It would be overhead to write context descriptor for every packet,
   8193 	 * however it does not cause problems.
   8194 	 */
   8195 	/* Fill in the context descriptor. */
   8196 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8197 	    htole32(vl_len);
   8198 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8199 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8200 	    htole32(cmdc);
   8201 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8202 	    htole32(mssidx);
   8203 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8204 	DPRINTF(WM_DEBUG_TX,
   8205 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8206 		txq->txq_next, 0, vl_len));
   8207 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8208 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8209 	txs->txs_ndesc++;
   8210 }
   8211 
   8212 /*
   8213  * wm_nq_start:		[ifnet interface function]
   8214  *
   8215  *	Start packet transmission on the interface for NEWQUEUE devices
   8216  */
   8217 static void
   8218 wm_nq_start(struct ifnet *ifp)
   8219 {
   8220 	struct wm_softc *sc = ifp->if_softc;
   8221 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8222 
   8223 #ifdef WM_MPSAFE
   8224 	KASSERT(if_is_mpsafe(ifp));
   8225 #endif
   8226 	/*
   8227 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8228 	 */
   8229 
   8230 	mutex_enter(txq->txq_lock);
   8231 	if (!txq->txq_stopping)
   8232 		wm_nq_start_locked(ifp);
   8233 	mutex_exit(txq->txq_lock);
   8234 }
   8235 
   8236 static void
   8237 wm_nq_start_locked(struct ifnet *ifp)
   8238 {
   8239 	struct wm_softc *sc = ifp->if_softc;
   8240 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8241 
   8242 	wm_nq_send_common_locked(ifp, txq, false);
   8243 }
   8244 
   8245 static int
   8246 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8247 {
   8248 	int qid;
   8249 	struct wm_softc *sc = ifp->if_softc;
   8250 	struct wm_txqueue *txq;
   8251 
   8252 	qid = wm_select_txqueue(ifp, m);
   8253 	txq = &sc->sc_queue[qid].wmq_txq;
   8254 
   8255 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8256 		m_freem(m);
   8257 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8258 		return ENOBUFS;
   8259 	}
   8260 
   8261 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8262 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8263 	if (m->m_flags & M_MCAST)
   8264 		if_statinc_ref(nsr, if_omcasts);
   8265 	IF_STAT_PUTREF(ifp);
   8266 
   8267 	/*
   8268 	 * The situations which this mutex_tryenter() fails at running time
   8269 	 * are below two patterns.
   8270 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8271 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8272 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8273 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8274 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8275 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8276 	 * stuck, either.
   8277 	 */
   8278 	if (mutex_tryenter(txq->txq_lock)) {
   8279 		if (!txq->txq_stopping)
   8280 			wm_nq_transmit_locked(ifp, txq);
   8281 		mutex_exit(txq->txq_lock);
   8282 	}
   8283 
   8284 	return 0;
   8285 }
   8286 
   8287 static void
   8288 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8289 {
   8290 
   8291 	wm_nq_send_common_locked(ifp, txq, true);
   8292 }
   8293 
   8294 static void
   8295 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8296     bool is_transmit)
   8297 {
   8298 	struct wm_softc *sc = ifp->if_softc;
   8299 	struct mbuf *m0;
   8300 	struct wm_txsoft *txs;
   8301 	bus_dmamap_t dmamap;
   8302 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8303 	bool do_csum, sent;
   8304 	bool remap = true;
   8305 
   8306 	KASSERT(mutex_owned(txq->txq_lock));
   8307 
   8308 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8309 		return;
   8310 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8311 		return;
   8312 
   8313 	sent = false;
   8314 
   8315 	/*
   8316 	 * Loop through the send queue, setting up transmit descriptors
   8317 	 * until we drain the queue, or use up all available transmit
   8318 	 * descriptors.
   8319 	 */
   8320 	for (;;) {
   8321 		m0 = NULL;
   8322 
   8323 		/* Get a work queue entry. */
   8324 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8325 			wm_txeof(txq, UINT_MAX);
   8326 			if (txq->txq_sfree == 0) {
   8327 				DPRINTF(WM_DEBUG_TX,
   8328 				    ("%s: TX: no free job descriptors\n",
   8329 					device_xname(sc->sc_dev)));
   8330 				WM_Q_EVCNT_INCR(txq, txsstall);
   8331 				break;
   8332 			}
   8333 		}
   8334 
   8335 		/* Grab a packet off the queue. */
   8336 		if (is_transmit)
   8337 			m0 = pcq_get(txq->txq_interq);
   8338 		else
   8339 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8340 		if (m0 == NULL)
   8341 			break;
   8342 
   8343 		DPRINTF(WM_DEBUG_TX,
   8344 		    ("%s: TX: have packet to transmit: %p\n",
   8345 		    device_xname(sc->sc_dev), m0));
   8346 
   8347 		txs = &txq->txq_soft[txq->txq_snext];
   8348 		dmamap = txs->txs_dmamap;
   8349 
   8350 		/*
   8351 		 * Load the DMA map.  If this fails, the packet either
   8352 		 * didn't fit in the allotted number of segments, or we
   8353 		 * were short on resources.  For the too-many-segments
   8354 		 * case, we simply report an error and drop the packet,
   8355 		 * since we can't sanely copy a jumbo packet to a single
   8356 		 * buffer.
   8357 		 */
   8358 retry:
   8359 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8360 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8361 		if (__predict_false(error)) {
   8362 			if (error == EFBIG) {
   8363 				if (remap == true) {
   8364 					struct mbuf *m;
   8365 
   8366 					remap = false;
   8367 					m = m_defrag(m0, M_NOWAIT);
   8368 					if (m != NULL) {
   8369 						WM_Q_EVCNT_INCR(txq, defrag);
   8370 						m0 = m;
   8371 						goto retry;
   8372 					}
   8373 				}
   8374 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8375 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8376 				    "DMA segments, dropping...\n",
   8377 				    device_xname(sc->sc_dev));
   8378 				wm_dump_mbuf_chain(sc, m0);
   8379 				m_freem(m0);
   8380 				continue;
   8381 			}
   8382 			/* Short on resources, just stop for now. */
   8383 			DPRINTF(WM_DEBUG_TX,
   8384 			    ("%s: TX: dmamap load failed: %d\n",
   8385 				device_xname(sc->sc_dev), error));
   8386 			break;
   8387 		}
   8388 
   8389 		segs_needed = dmamap->dm_nsegs;
   8390 
   8391 		/*
   8392 		 * Ensure we have enough descriptors free to describe
   8393 		 * the packet. Note, we always reserve one descriptor
   8394 		 * at the end of the ring due to the semantics of the
   8395 		 * TDT register, plus one more in the event we need
   8396 		 * to load offload context.
   8397 		 */
   8398 		if (segs_needed > txq->txq_free - 2) {
   8399 			/*
   8400 			 * Not enough free descriptors to transmit this
   8401 			 * packet.  We haven't committed anything yet,
   8402 			 * so just unload the DMA map, put the packet
   8403 			 * pack on the queue, and punt. Notify the upper
   8404 			 * layer that there are no more slots left.
   8405 			 */
   8406 			DPRINTF(WM_DEBUG_TX,
   8407 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8408 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8409 				segs_needed, txq->txq_free - 1));
   8410 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8411 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8412 			WM_Q_EVCNT_INCR(txq, txdstall);
   8413 			break;
   8414 		}
   8415 
   8416 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8417 
   8418 		DPRINTF(WM_DEBUG_TX,
   8419 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8420 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8421 
   8422 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8423 
   8424 		/*
   8425 		 * Store a pointer to the packet so that we can free it
   8426 		 * later.
   8427 		 *
   8428 		 * Initially, we consider the number of descriptors the
   8429 		 * packet uses the number of DMA segments.  This may be
   8430 		 * incremented by 1 if we do checksum offload (a descriptor
   8431 		 * is used to set the checksum context).
   8432 		 */
   8433 		txs->txs_mbuf = m0;
   8434 		txs->txs_firstdesc = txq->txq_next;
   8435 		txs->txs_ndesc = segs_needed;
   8436 
   8437 		/* Set up offload parameters for this packet. */
   8438 		uint32_t cmdlen, fields, dcmdlen;
   8439 		if (m0->m_pkthdr.csum_flags &
   8440 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8441 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8442 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8443 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8444 			    &do_csum);
   8445 		} else {
   8446 			do_csum = false;
   8447 			cmdlen = 0;
   8448 			fields = 0;
   8449 		}
   8450 
   8451 		/* Sync the DMA map. */
   8452 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8453 		    BUS_DMASYNC_PREWRITE);
   8454 
   8455 		/* Initialize the first transmit descriptor. */
   8456 		nexttx = txq->txq_next;
   8457 		if (!do_csum) {
   8458 			/* Setup a legacy descriptor */
   8459 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8460 			    dmamap->dm_segs[0].ds_addr);
   8461 			txq->txq_descs[nexttx].wtx_cmdlen =
   8462 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8463 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8464 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8465 			if (vlan_has_tag(m0)) {
   8466 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8467 				    htole32(WTX_CMD_VLE);
   8468 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8469 				    htole16(vlan_get_tag(m0));
   8470 			} else
   8471 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8472 
   8473 			dcmdlen = 0;
   8474 		} else {
   8475 			/* Setup an advanced data descriptor */
   8476 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8477 			    htole64(dmamap->dm_segs[0].ds_addr);
   8478 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8479 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8480 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   8481 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8482 			    htole32(fields);
   8483 			DPRINTF(WM_DEBUG_TX,
   8484 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8485 				device_xname(sc->sc_dev), nexttx,
   8486 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8487 			DPRINTF(WM_DEBUG_TX,
   8488 			    ("\t 0x%08x%08x\n", fields,
   8489 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8490 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8491 		}
   8492 
   8493 		lasttx = nexttx;
   8494 		nexttx = WM_NEXTTX(txq, nexttx);
   8495 		/*
   8496 		 * Fill in the next descriptors. legacy or advanced format
   8497 		 * is the same here
   8498 		 */
   8499 		for (seg = 1; seg < dmamap->dm_nsegs;
   8500 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8501 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8502 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8503 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8504 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8505 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8506 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8507 			lasttx = nexttx;
   8508 
   8509 			DPRINTF(WM_DEBUG_TX,
   8510 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8511 				device_xname(sc->sc_dev), nexttx,
   8512 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8513 				dmamap->dm_segs[seg].ds_len));
   8514 		}
   8515 
   8516 		KASSERT(lasttx != -1);
   8517 
   8518 		/*
   8519 		 * Set up the command byte on the last descriptor of
   8520 		 * the packet. If we're in the interrupt delay window,
   8521 		 * delay the interrupt.
   8522 		 */
   8523 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8524 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8525 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8526 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8527 
   8528 		txs->txs_lastdesc = lasttx;
   8529 
   8530 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8531 		    device_xname(sc->sc_dev),
   8532 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8533 
   8534 		/* Sync the descriptors we're using. */
   8535 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8536 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8537 
   8538 		/* Give the packet to the chip. */
   8539 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8540 		sent = true;
   8541 
   8542 		DPRINTF(WM_DEBUG_TX,
   8543 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8544 
   8545 		DPRINTF(WM_DEBUG_TX,
   8546 		    ("%s: TX: finished transmitting packet, job %d\n",
   8547 			device_xname(sc->sc_dev), txq->txq_snext));
   8548 
   8549 		/* Advance the tx pointer. */
   8550 		txq->txq_free -= txs->txs_ndesc;
   8551 		txq->txq_next = nexttx;
   8552 
   8553 		txq->txq_sfree--;
   8554 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8555 
   8556 		/* Pass the packet to any BPF listeners. */
   8557 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8558 	}
   8559 
   8560 	if (m0 != NULL) {
   8561 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8562 		WM_Q_EVCNT_INCR(txq, descdrop);
   8563 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8564 			__func__));
   8565 		m_freem(m0);
   8566 	}
   8567 
   8568 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8569 		/* No more slots; notify upper layer. */
   8570 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8571 	}
   8572 
   8573 	if (sent) {
   8574 		/* Set a watchdog timer in case the chip flakes out. */
   8575 		txq->txq_lastsent = time_uptime;
   8576 		txq->txq_sending = true;
   8577 	}
   8578 }
   8579 
   8580 static void
   8581 wm_deferred_start_locked(struct wm_txqueue *txq)
   8582 {
   8583 	struct wm_softc *sc = txq->txq_sc;
   8584 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8585 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8586 	int qid = wmq->wmq_id;
   8587 
   8588 	KASSERT(mutex_owned(txq->txq_lock));
   8589 
   8590 	if (txq->txq_stopping) {
   8591 		mutex_exit(txq->txq_lock);
   8592 		return;
   8593 	}
   8594 
   8595 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8596 		/* XXX need for ALTQ or one CPU system */
   8597 		if (qid == 0)
   8598 			wm_nq_start_locked(ifp);
   8599 		wm_nq_transmit_locked(ifp, txq);
   8600 	} else {
   8601 		/* XXX need for ALTQ or one CPU system */
   8602 		if (qid == 0)
   8603 			wm_start_locked(ifp);
   8604 		wm_transmit_locked(ifp, txq);
   8605 	}
   8606 }
   8607 
   8608 /* Interrupt */
   8609 
   8610 /*
   8611  * wm_txeof:
   8612  *
   8613  *	Helper; handle transmit interrupts.
   8614  */
   8615 static bool
   8616 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8617 {
   8618 	struct wm_softc *sc = txq->txq_sc;
   8619 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8620 	struct wm_txsoft *txs;
   8621 	int count = 0;
   8622 	int i;
   8623 	uint8_t status;
   8624 	bool more = false;
   8625 
   8626 	KASSERT(mutex_owned(txq->txq_lock));
   8627 
   8628 	if (txq->txq_stopping)
   8629 		return false;
   8630 
   8631 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8632 
   8633 	/*
   8634 	 * Go through the Tx list and free mbufs for those
   8635 	 * frames which have been transmitted.
   8636 	 */
   8637 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8638 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8639 		if (limit-- == 0) {
   8640 			more = true;
   8641 			DPRINTF(WM_DEBUG_TX,
   8642 			    ("%s: TX: loop limited, job %d is not processed\n",
   8643 				device_xname(sc->sc_dev), i));
   8644 			break;
   8645 		}
   8646 
   8647 		txs = &txq->txq_soft[i];
   8648 
   8649 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8650 			device_xname(sc->sc_dev), i));
   8651 
   8652 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8653 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8654 
   8655 		status =
   8656 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8657 		if ((status & WTX_ST_DD) == 0) {
   8658 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8659 			    BUS_DMASYNC_PREREAD);
   8660 			break;
   8661 		}
   8662 
   8663 		count++;
   8664 		DPRINTF(WM_DEBUG_TX,
   8665 		    ("%s: TX: job %d done: descs %d..%d\n",
   8666 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8667 		    txs->txs_lastdesc));
   8668 
   8669 		/*
   8670 		 * XXX We should probably be using the statistics
   8671 		 * XXX registers, but I don't know if they exist
   8672 		 * XXX on chips before the i82544.
   8673 		 */
   8674 
   8675 #ifdef WM_EVENT_COUNTERS
   8676 		if (status & WTX_ST_TU)
   8677 			WM_Q_EVCNT_INCR(txq, underrun);
   8678 #endif /* WM_EVENT_COUNTERS */
   8679 
   8680 		/*
   8681 		 * 82574 and newer's document says the status field has neither
   8682 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8683 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8684 		 * Developer's Manual", 82574 datasheet and newer.
   8685 		 *
   8686 		 * XXX I saw the LC bit was set on I218 even though the media
   8687 		 * was full duplex, so the bit might be used for other
   8688 		 * meaning ...(I have no document).
   8689 		 */
   8690 
   8691 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8692 		    && ((sc->sc_type < WM_T_82574)
   8693 			|| (sc->sc_type == WM_T_80003))) {
   8694 			if_statinc(ifp, if_oerrors);
   8695 			if (status & WTX_ST_LC)
   8696 				log(LOG_WARNING, "%s: late collision\n",
   8697 				    device_xname(sc->sc_dev));
   8698 			else if (status & WTX_ST_EC) {
   8699 				if_statadd(ifp, if_collisions,
   8700 				    TX_COLLISION_THRESHOLD + 1);
   8701 				log(LOG_WARNING, "%s: excessive collisions\n",
   8702 				    device_xname(sc->sc_dev));
   8703 			}
   8704 		} else
   8705 			if_statinc(ifp, if_opackets);
   8706 
   8707 		txq->txq_packets++;
   8708 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8709 
   8710 		txq->txq_free += txs->txs_ndesc;
   8711 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8712 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8713 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8714 		m_freem(txs->txs_mbuf);
   8715 		txs->txs_mbuf = NULL;
   8716 	}
   8717 
   8718 	/* Update the dirty transmit buffer pointer. */
   8719 	txq->txq_sdirty = i;
   8720 	DPRINTF(WM_DEBUG_TX,
   8721 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8722 
   8723 	if (count != 0)
   8724 		rnd_add_uint32(&sc->rnd_source, count);
   8725 
   8726 	/*
   8727 	 * If there are no more pending transmissions, cancel the watchdog
   8728 	 * timer.
   8729 	 */
   8730 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8731 		txq->txq_sending = false;
   8732 
   8733 	return more;
   8734 }
   8735 
   8736 static inline uint32_t
   8737 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8738 {
   8739 	struct wm_softc *sc = rxq->rxq_sc;
   8740 
   8741 	if (sc->sc_type == WM_T_82574)
   8742 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8743 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8744 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8745 	else
   8746 		return rxq->rxq_descs[idx].wrx_status;
   8747 }
   8748 
   8749 static inline uint32_t
   8750 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8751 {
   8752 	struct wm_softc *sc = rxq->rxq_sc;
   8753 
   8754 	if (sc->sc_type == WM_T_82574)
   8755 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8756 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8757 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8758 	else
   8759 		return rxq->rxq_descs[idx].wrx_errors;
   8760 }
   8761 
   8762 static inline uint16_t
   8763 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8764 {
   8765 	struct wm_softc *sc = rxq->rxq_sc;
   8766 
   8767 	if (sc->sc_type == WM_T_82574)
   8768 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8769 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8770 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8771 	else
   8772 		return rxq->rxq_descs[idx].wrx_special;
   8773 }
   8774 
   8775 static inline int
   8776 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8777 {
   8778 	struct wm_softc *sc = rxq->rxq_sc;
   8779 
   8780 	if (sc->sc_type == WM_T_82574)
   8781 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8782 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8783 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8784 	else
   8785 		return rxq->rxq_descs[idx].wrx_len;
   8786 }
   8787 
   8788 #ifdef WM_DEBUG
   8789 static inline uint32_t
   8790 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8791 {
   8792 	struct wm_softc *sc = rxq->rxq_sc;
   8793 
   8794 	if (sc->sc_type == WM_T_82574)
   8795 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8796 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8797 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8798 	else
   8799 		return 0;
   8800 }
   8801 
   8802 static inline uint8_t
   8803 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8804 {
   8805 	struct wm_softc *sc = rxq->rxq_sc;
   8806 
   8807 	if (sc->sc_type == WM_T_82574)
   8808 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8809 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8810 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8811 	else
   8812 		return 0;
   8813 }
   8814 #endif /* WM_DEBUG */
   8815 
   8816 static inline bool
   8817 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8818     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8819 {
   8820 
   8821 	if (sc->sc_type == WM_T_82574)
   8822 		return (status & ext_bit) != 0;
   8823 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8824 		return (status & nq_bit) != 0;
   8825 	else
   8826 		return (status & legacy_bit) != 0;
   8827 }
   8828 
   8829 static inline bool
   8830 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8831     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8832 {
   8833 
   8834 	if (sc->sc_type == WM_T_82574)
   8835 		return (error & ext_bit) != 0;
   8836 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8837 		return (error & nq_bit) != 0;
   8838 	else
   8839 		return (error & legacy_bit) != 0;
   8840 }
   8841 
   8842 static inline bool
   8843 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8844 {
   8845 
   8846 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8847 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8848 		return true;
   8849 	else
   8850 		return false;
   8851 }
   8852 
   8853 static inline bool
   8854 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8855 {
   8856 	struct wm_softc *sc = rxq->rxq_sc;
   8857 
   8858 	/* XXX missing error bit for newqueue? */
   8859 	if (wm_rxdesc_is_set_error(sc, errors,
   8860 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8861 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8862 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8863 		NQRXC_ERROR_RXE)) {
   8864 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8865 		    EXTRXC_ERROR_SE, 0))
   8866 			log(LOG_WARNING, "%s: symbol error\n",
   8867 			    device_xname(sc->sc_dev));
   8868 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8869 		    EXTRXC_ERROR_SEQ, 0))
   8870 			log(LOG_WARNING, "%s: receive sequence error\n",
   8871 			    device_xname(sc->sc_dev));
   8872 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8873 		    EXTRXC_ERROR_CE, 0))
   8874 			log(LOG_WARNING, "%s: CRC error\n",
   8875 			    device_xname(sc->sc_dev));
   8876 		return true;
   8877 	}
   8878 
   8879 	return false;
   8880 }
   8881 
   8882 static inline bool
   8883 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8884 {
   8885 	struct wm_softc *sc = rxq->rxq_sc;
   8886 
   8887 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8888 		NQRXC_STATUS_DD)) {
   8889 		/* We have processed all of the receive descriptors. */
   8890 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8891 		return false;
   8892 	}
   8893 
   8894 	return true;
   8895 }
   8896 
   8897 static inline bool
   8898 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8899     uint16_t vlantag, struct mbuf *m)
   8900 {
   8901 
   8902 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8903 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8904 		vlan_set_tag(m, le16toh(vlantag));
   8905 	}
   8906 
   8907 	return true;
   8908 }
   8909 
   8910 static inline void
   8911 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8912     uint32_t errors, struct mbuf *m)
   8913 {
   8914 	struct wm_softc *sc = rxq->rxq_sc;
   8915 
   8916 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8917 		if (wm_rxdesc_is_set_status(sc, status,
   8918 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8919 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8920 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8921 			if (wm_rxdesc_is_set_error(sc, errors,
   8922 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8923 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8924 		}
   8925 		if (wm_rxdesc_is_set_status(sc, status,
   8926 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8927 			/*
   8928 			 * Note: we don't know if this was TCP or UDP,
   8929 			 * so we just set both bits, and expect the
   8930 			 * upper layers to deal.
   8931 			 */
   8932 			WM_Q_EVCNT_INCR(rxq, tusum);
   8933 			m->m_pkthdr.csum_flags |=
   8934 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8935 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8936 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8937 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8938 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8939 		}
   8940 	}
   8941 }
   8942 
   8943 /*
   8944  * wm_rxeof:
   8945  *
   8946  *	Helper; handle receive interrupts.
   8947  */
   8948 static bool
   8949 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8950 {
   8951 	struct wm_softc *sc = rxq->rxq_sc;
   8952 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8953 	struct wm_rxsoft *rxs;
   8954 	struct mbuf *m;
   8955 	int i, len;
   8956 	int count = 0;
   8957 	uint32_t status, errors;
   8958 	uint16_t vlantag;
   8959 	bool more = false;
   8960 
   8961 	KASSERT(mutex_owned(rxq->rxq_lock));
   8962 
   8963 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8964 		if (limit-- == 0) {
   8965 			rxq->rxq_ptr = i;
   8966 			more = true;
   8967 			DPRINTF(WM_DEBUG_RX,
   8968 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8969 				device_xname(sc->sc_dev), i));
   8970 			break;
   8971 		}
   8972 
   8973 		rxs = &rxq->rxq_soft[i];
   8974 
   8975 		DPRINTF(WM_DEBUG_RX,
   8976 		    ("%s: RX: checking descriptor %d\n",
   8977 			device_xname(sc->sc_dev), i));
   8978 		wm_cdrxsync(rxq, i,
   8979 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8980 
   8981 		status = wm_rxdesc_get_status(rxq, i);
   8982 		errors = wm_rxdesc_get_errors(rxq, i);
   8983 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8984 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8985 #ifdef WM_DEBUG
   8986 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8987 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8988 #endif
   8989 
   8990 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8991 			/*
   8992 			 * Update the receive pointer holding rxq_lock
   8993 			 * consistent with increment counter.
   8994 			 */
   8995 			rxq->rxq_ptr = i;
   8996 			break;
   8997 		}
   8998 
   8999 		count++;
   9000 		if (__predict_false(rxq->rxq_discard)) {
   9001 			DPRINTF(WM_DEBUG_RX,
   9002 			    ("%s: RX: discarding contents of descriptor %d\n",
   9003 				device_xname(sc->sc_dev), i));
   9004 			wm_init_rxdesc(rxq, i);
   9005 			if (wm_rxdesc_is_eop(rxq, status)) {
   9006 				/* Reset our state. */
   9007 				DPRINTF(WM_DEBUG_RX,
   9008 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9009 					device_xname(sc->sc_dev)));
   9010 				rxq->rxq_discard = 0;
   9011 			}
   9012 			continue;
   9013 		}
   9014 
   9015 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9016 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9017 
   9018 		m = rxs->rxs_mbuf;
   9019 
   9020 		/*
   9021 		 * Add a new receive buffer to the ring, unless of
   9022 		 * course the length is zero. Treat the latter as a
   9023 		 * failed mapping.
   9024 		 */
   9025 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9026 			/*
   9027 			 * Failed, throw away what we've done so
   9028 			 * far, and discard the rest of the packet.
   9029 			 */
   9030 			if_statinc(ifp, if_ierrors);
   9031 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9032 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9033 			wm_init_rxdesc(rxq, i);
   9034 			if (!wm_rxdesc_is_eop(rxq, status))
   9035 				rxq->rxq_discard = 1;
   9036 			if (rxq->rxq_head != NULL)
   9037 				m_freem(rxq->rxq_head);
   9038 			WM_RXCHAIN_RESET(rxq);
   9039 			DPRINTF(WM_DEBUG_RX,
   9040 			    ("%s: RX: Rx buffer allocation failed, "
   9041 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9042 				rxq->rxq_discard ? " (discard)" : ""));
   9043 			continue;
   9044 		}
   9045 
   9046 		m->m_len = len;
   9047 		rxq->rxq_len += len;
   9048 		DPRINTF(WM_DEBUG_RX,
   9049 		    ("%s: RX: buffer at %p len %d\n",
   9050 			device_xname(sc->sc_dev), m->m_data, len));
   9051 
   9052 		/* If this is not the end of the packet, keep looking. */
   9053 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9054 			WM_RXCHAIN_LINK(rxq, m);
   9055 			DPRINTF(WM_DEBUG_RX,
   9056 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9057 				device_xname(sc->sc_dev), rxq->rxq_len));
   9058 			continue;
   9059 		}
   9060 
   9061 		/*
   9062 		 * Okay, we have the entire packet now. The chip is
   9063 		 * configured to include the FCS except I35[05], I21[01].
   9064 		 * (not all chips can be configured to strip it), so we need
   9065 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   9066 		 * in RCTL register is always set, so we don't trim it.
   9067 		 * PCH2 and newer chip also not include FCS when jumbo
   9068 		 * frame is used to do workaround an errata.
   9069 		 * May need to adjust length of previous mbuf in the
   9070 		 * chain if the current mbuf is too short.
   9071 		 */
   9072 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   9073 			if (m->m_len < ETHER_CRC_LEN) {
   9074 				rxq->rxq_tail->m_len
   9075 				    -= (ETHER_CRC_LEN - m->m_len);
   9076 				m->m_len = 0;
   9077 			} else
   9078 				m->m_len -= ETHER_CRC_LEN;
   9079 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9080 		} else
   9081 			len = rxq->rxq_len;
   9082 
   9083 		WM_RXCHAIN_LINK(rxq, m);
   9084 
   9085 		*rxq->rxq_tailp = NULL;
   9086 		m = rxq->rxq_head;
   9087 
   9088 		WM_RXCHAIN_RESET(rxq);
   9089 
   9090 		DPRINTF(WM_DEBUG_RX,
   9091 		    ("%s: RX: have entire packet, len -> %d\n",
   9092 			device_xname(sc->sc_dev), len));
   9093 
   9094 		/* If an error occurred, update stats and drop the packet. */
   9095 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9096 			m_freem(m);
   9097 			continue;
   9098 		}
   9099 
   9100 		/* No errors.  Receive the packet. */
   9101 		m_set_rcvif(m, ifp);
   9102 		m->m_pkthdr.len = len;
   9103 		/*
   9104 		 * TODO
   9105 		 * should be save rsshash and rsstype to this mbuf.
   9106 		 */
   9107 		DPRINTF(WM_DEBUG_RX,
   9108 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9109 			device_xname(sc->sc_dev), rsstype, rsshash));
   9110 
   9111 		/*
   9112 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9113 		 * for us.  Associate the tag with the packet.
   9114 		 */
   9115 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9116 			continue;
   9117 
   9118 		/* Set up checksum info for this packet. */
   9119 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9120 		/*
   9121 		 * Update the receive pointer holding rxq_lock consistent with
   9122 		 * increment counter.
   9123 		 */
   9124 		rxq->rxq_ptr = i;
   9125 		rxq->rxq_packets++;
   9126 		rxq->rxq_bytes += len;
   9127 		mutex_exit(rxq->rxq_lock);
   9128 
   9129 		/* Pass it on. */
   9130 		if_percpuq_enqueue(sc->sc_ipq, m);
   9131 
   9132 		mutex_enter(rxq->rxq_lock);
   9133 
   9134 		if (rxq->rxq_stopping)
   9135 			break;
   9136 	}
   9137 
   9138 	if (count != 0)
   9139 		rnd_add_uint32(&sc->rnd_source, count);
   9140 
   9141 	DPRINTF(WM_DEBUG_RX,
   9142 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9143 
   9144 	return more;
   9145 }
   9146 
   9147 /*
   9148  * wm_linkintr_gmii:
   9149  *
   9150  *	Helper; handle link interrupts for GMII.
   9151  */
   9152 static void
   9153 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9154 {
   9155 	device_t dev = sc->sc_dev;
   9156 	uint32_t status, reg;
   9157 	bool link;
   9158 	int rv;
   9159 
   9160 	KASSERT(WM_CORE_LOCKED(sc));
   9161 
   9162 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9163 		__func__));
   9164 
   9165 	if ((icr & ICR_LSC) == 0) {
   9166 		if (icr & ICR_RXSEQ)
   9167 			DPRINTF(WM_DEBUG_LINK,
   9168 			    ("%s: LINK Receive sequence error\n",
   9169 				device_xname(dev)));
   9170 		return;
   9171 	}
   9172 
   9173 	/* Link status changed */
   9174 	status = CSR_READ(sc, WMREG_STATUS);
   9175 	link = status & STATUS_LU;
   9176 	if (link) {
   9177 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9178 			device_xname(dev),
   9179 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9180 	} else {
   9181 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9182 			device_xname(dev)));
   9183 	}
   9184 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9185 		wm_gig_downshift_workaround_ich8lan(sc);
   9186 
   9187 	if ((sc->sc_type == WM_T_ICH8)
   9188 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9189 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9190 	}
   9191 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9192 		device_xname(dev)));
   9193 	mii_pollstat(&sc->sc_mii);
   9194 	if (sc->sc_type == WM_T_82543) {
   9195 		int miistatus, active;
   9196 
   9197 		/*
   9198 		 * With 82543, we need to force speed and
   9199 		 * duplex on the MAC equal to what the PHY
   9200 		 * speed and duplex configuration is.
   9201 		 */
   9202 		miistatus = sc->sc_mii.mii_media_status;
   9203 
   9204 		if (miistatus & IFM_ACTIVE) {
   9205 			active = sc->sc_mii.mii_media_active;
   9206 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9207 			switch (IFM_SUBTYPE(active)) {
   9208 			case IFM_10_T:
   9209 				sc->sc_ctrl |= CTRL_SPEED_10;
   9210 				break;
   9211 			case IFM_100_TX:
   9212 				sc->sc_ctrl |= CTRL_SPEED_100;
   9213 				break;
   9214 			case IFM_1000_T:
   9215 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9216 				break;
   9217 			default:
   9218 				/*
   9219 				 * Fiber?
   9220 				 * Shoud not enter here.
   9221 				 */
   9222 				device_printf(dev, "unknown media (%x)\n",
   9223 				    active);
   9224 				break;
   9225 			}
   9226 			if (active & IFM_FDX)
   9227 				sc->sc_ctrl |= CTRL_FD;
   9228 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9229 		}
   9230 	} else if (sc->sc_type == WM_T_PCH) {
   9231 		wm_k1_gig_workaround_hv(sc,
   9232 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9233 	}
   9234 
   9235 	/*
   9236 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9237 	 * aggressive resulting in many collisions. To avoid this, increase
   9238 	 * the IPG and reduce Rx latency in the PHY.
   9239 	 */
   9240 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9241 	    && link) {
   9242 		uint32_t tipg_reg;
   9243 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9244 		bool fdx;
   9245 		uint16_t emi_addr, emi_val;
   9246 
   9247 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9248 		tipg_reg &= ~TIPG_IPGT_MASK;
   9249 		fdx = status & STATUS_FD;
   9250 
   9251 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9252 			tipg_reg |= 0xff;
   9253 			/* Reduce Rx latency in analog PHY */
   9254 			emi_val = 0;
   9255 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9256 		    fdx && speed != STATUS_SPEED_1000) {
   9257 			tipg_reg |= 0xc;
   9258 			emi_val = 1;
   9259 		} else {
   9260 			/* Roll back the default values */
   9261 			tipg_reg |= 0x08;
   9262 			emi_val = 1;
   9263 		}
   9264 
   9265 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9266 
   9267 		rv = sc->phy.acquire(sc);
   9268 		if (rv)
   9269 			return;
   9270 
   9271 		if (sc->sc_type == WM_T_PCH2)
   9272 			emi_addr = I82579_RX_CONFIG;
   9273 		else
   9274 			emi_addr = I217_RX_CONFIG;
   9275 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9276 
   9277 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9278 			uint16_t phy_reg;
   9279 
   9280 			sc->phy.readreg_locked(dev, 2,
   9281 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9282 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9283 			if (speed == STATUS_SPEED_100
   9284 			    || speed == STATUS_SPEED_10)
   9285 				phy_reg |= 0x3e8;
   9286 			else
   9287 				phy_reg |= 0xfa;
   9288 			sc->phy.writereg_locked(dev, 2,
   9289 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9290 
   9291 			if (speed == STATUS_SPEED_1000) {
   9292 				sc->phy.readreg_locked(dev, 2,
   9293 				    HV_PM_CTRL, &phy_reg);
   9294 
   9295 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9296 
   9297 				sc->phy.writereg_locked(dev, 2,
   9298 				    HV_PM_CTRL, phy_reg);
   9299 			}
   9300 		}
   9301 		sc->phy.release(sc);
   9302 
   9303 		if (rv)
   9304 			return;
   9305 
   9306 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9307 			uint16_t data, ptr_gap;
   9308 
   9309 			if (speed == STATUS_SPEED_1000) {
   9310 				rv = sc->phy.acquire(sc);
   9311 				if (rv)
   9312 					return;
   9313 
   9314 				rv = sc->phy.readreg_locked(dev, 2,
   9315 				    I219_UNKNOWN1, &data);
   9316 				if (rv) {
   9317 					sc->phy.release(sc);
   9318 					return;
   9319 				}
   9320 
   9321 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9322 				if (ptr_gap < 0x18) {
   9323 					data &= ~(0x3ff << 2);
   9324 					data |= (0x18 << 2);
   9325 					rv = sc->phy.writereg_locked(dev,
   9326 					    2, I219_UNKNOWN1, data);
   9327 				}
   9328 				sc->phy.release(sc);
   9329 				if (rv)
   9330 					return;
   9331 			} else {
   9332 				rv = sc->phy.acquire(sc);
   9333 				if (rv)
   9334 					return;
   9335 
   9336 				rv = sc->phy.writereg_locked(dev, 2,
   9337 				    I219_UNKNOWN1, 0xc023);
   9338 				sc->phy.release(sc);
   9339 				if (rv)
   9340 					return;
   9341 
   9342 			}
   9343 		}
   9344 	}
   9345 
   9346 	/*
   9347 	 * I217 Packet Loss issue:
   9348 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9349 	 * on power up.
   9350 	 * Set the Beacon Duration for I217 to 8 usec
   9351 	 */
   9352 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9353 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9354 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9355 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9356 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9357 	}
   9358 
   9359 	/* Work-around I218 hang issue */
   9360 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9361 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9362 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9363 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9364 		wm_k1_workaround_lpt_lp(sc, link);
   9365 
   9366 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9367 		/*
   9368 		 * Set platform power management values for Latency
   9369 		 * Tolerance Reporting (LTR)
   9370 		 */
   9371 		wm_platform_pm_pch_lpt(sc,
   9372 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9373 	}
   9374 
   9375 	/* Clear link partner's EEE ability */
   9376 	sc->eee_lp_ability = 0;
   9377 
   9378 	/* FEXTNVM6 K1-off workaround */
   9379 	if (sc->sc_type == WM_T_PCH_SPT) {
   9380 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9381 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9382 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9383 		else
   9384 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9385 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9386 	}
   9387 
   9388 	if (!link)
   9389 		return;
   9390 
   9391 	switch (sc->sc_type) {
   9392 	case WM_T_PCH2:
   9393 		wm_k1_workaround_lv(sc);
   9394 		/* FALLTHROUGH */
   9395 	case WM_T_PCH:
   9396 		if (sc->sc_phytype == WMPHY_82578)
   9397 			wm_link_stall_workaround_hv(sc);
   9398 		break;
   9399 	default:
   9400 		break;
   9401 	}
   9402 
   9403 	/* Enable/Disable EEE after link up */
   9404 	if (sc->sc_phytype > WMPHY_82579)
   9405 		wm_set_eee_pchlan(sc);
   9406 }
   9407 
   9408 /*
   9409  * wm_linkintr_tbi:
   9410  *
   9411  *	Helper; handle link interrupts for TBI mode.
   9412  */
   9413 static void
   9414 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9415 {
   9416 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9417 	uint32_t status;
   9418 
   9419 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9420 		__func__));
   9421 
   9422 	status = CSR_READ(sc, WMREG_STATUS);
   9423 	if (icr & ICR_LSC) {
   9424 		wm_check_for_link(sc);
   9425 		if (status & STATUS_LU) {
   9426 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9427 				device_xname(sc->sc_dev),
   9428 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9429 			/*
   9430 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9431 			 * so we should update sc->sc_ctrl
   9432 			 */
   9433 
   9434 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9435 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9436 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9437 			if (status & STATUS_FD)
   9438 				sc->sc_tctl |=
   9439 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9440 			else
   9441 				sc->sc_tctl |=
   9442 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9443 			if (sc->sc_ctrl & CTRL_TFCE)
   9444 				sc->sc_fcrtl |= FCRTL_XONE;
   9445 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9446 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9447 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9448 			sc->sc_tbi_linkup = 1;
   9449 			if_link_state_change(ifp, LINK_STATE_UP);
   9450 		} else {
   9451 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9452 				device_xname(sc->sc_dev)));
   9453 			sc->sc_tbi_linkup = 0;
   9454 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9455 		}
   9456 		/* Update LED */
   9457 		wm_tbi_serdes_set_linkled(sc);
   9458 	} else if (icr & ICR_RXSEQ)
   9459 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9460 			device_xname(sc->sc_dev)));
   9461 }
   9462 
   9463 /*
   9464  * wm_linkintr_serdes:
   9465  *
   9466  *	Helper; handle link interrupts for TBI mode.
   9467  */
   9468 static void
   9469 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9470 {
   9471 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9472 	struct mii_data *mii = &sc->sc_mii;
   9473 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9474 	uint32_t pcs_adv, pcs_lpab, reg;
   9475 
   9476 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9477 		__func__));
   9478 
   9479 	if (icr & ICR_LSC) {
   9480 		/* Check PCS */
   9481 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9482 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9483 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9484 				device_xname(sc->sc_dev)));
   9485 			mii->mii_media_status |= IFM_ACTIVE;
   9486 			sc->sc_tbi_linkup = 1;
   9487 			if_link_state_change(ifp, LINK_STATE_UP);
   9488 		} else {
   9489 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9490 				device_xname(sc->sc_dev)));
   9491 			mii->mii_media_status |= IFM_NONE;
   9492 			sc->sc_tbi_linkup = 0;
   9493 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9494 			wm_tbi_serdes_set_linkled(sc);
   9495 			return;
   9496 		}
   9497 		mii->mii_media_active |= IFM_1000_SX;
   9498 		if ((reg & PCS_LSTS_FDX) != 0)
   9499 			mii->mii_media_active |= IFM_FDX;
   9500 		else
   9501 			mii->mii_media_active |= IFM_HDX;
   9502 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9503 			/* Check flow */
   9504 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9505 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9506 				DPRINTF(WM_DEBUG_LINK,
   9507 				    ("XXX LINKOK but not ACOMP\n"));
   9508 				return;
   9509 			}
   9510 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9511 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9512 			DPRINTF(WM_DEBUG_LINK,
   9513 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9514 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9515 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9516 				mii->mii_media_active |= IFM_FLOW
   9517 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9518 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9519 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9520 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9521 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9522 				mii->mii_media_active |= IFM_FLOW
   9523 				    | IFM_ETH_TXPAUSE;
   9524 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9525 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9526 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9527 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9528 				mii->mii_media_active |= IFM_FLOW
   9529 				    | IFM_ETH_RXPAUSE;
   9530 		}
   9531 		/* Update LED */
   9532 		wm_tbi_serdes_set_linkled(sc);
   9533 	} else
   9534 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9535 		    device_xname(sc->sc_dev)));
   9536 }
   9537 
   9538 /*
   9539  * wm_linkintr:
   9540  *
   9541  *	Helper; handle link interrupts.
   9542  */
   9543 static void
   9544 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9545 {
   9546 
   9547 	KASSERT(WM_CORE_LOCKED(sc));
   9548 
   9549 	if (sc->sc_flags & WM_F_HAS_MII)
   9550 		wm_linkintr_gmii(sc, icr);
   9551 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9552 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9553 		wm_linkintr_serdes(sc, icr);
   9554 	else
   9555 		wm_linkintr_tbi(sc, icr);
   9556 }
   9557 
   9558 
   9559 static inline void
   9560 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   9561 {
   9562 
   9563 	if (wmq->wmq_txrx_use_workqueue)
   9564 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   9565 	else
   9566 		softint_schedule(wmq->wmq_si);
   9567 }
   9568 
   9569 /*
   9570  * wm_intr_legacy:
   9571  *
   9572  *	Interrupt service routine for INTx and MSI.
   9573  */
   9574 static int
   9575 wm_intr_legacy(void *arg)
   9576 {
   9577 	struct wm_softc *sc = arg;
   9578 	struct wm_queue *wmq = &sc->sc_queue[0];
   9579 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9580 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9581 	uint32_t icr, rndval = 0;
   9582 	int handled = 0;
   9583 
   9584 	while (1 /* CONSTCOND */) {
   9585 		icr = CSR_READ(sc, WMREG_ICR);
   9586 		if ((icr & sc->sc_icr) == 0)
   9587 			break;
   9588 		if (handled == 0)
   9589 			DPRINTF(WM_DEBUG_TX,
   9590 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9591 		if (rndval == 0)
   9592 			rndval = icr;
   9593 
   9594 		mutex_enter(rxq->rxq_lock);
   9595 
   9596 		if (rxq->rxq_stopping) {
   9597 			mutex_exit(rxq->rxq_lock);
   9598 			break;
   9599 		}
   9600 
   9601 		handled = 1;
   9602 
   9603 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9604 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9605 			DPRINTF(WM_DEBUG_RX,
   9606 			    ("%s: RX: got Rx intr 0x%08x\n",
   9607 				device_xname(sc->sc_dev),
   9608 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9609 			WM_Q_EVCNT_INCR(rxq, intr);
   9610 		}
   9611 #endif
   9612 		/*
   9613 		 * wm_rxeof() does *not* call upper layer functions directly,
   9614 		 * as if_percpuq_enqueue() just call softint_schedule().
   9615 		 * So, we can call wm_rxeof() in interrupt context.
   9616 		 */
   9617 		wm_rxeof(rxq, UINT_MAX);
   9618 
   9619 		mutex_exit(rxq->rxq_lock);
   9620 		mutex_enter(txq->txq_lock);
   9621 
   9622 		if (txq->txq_stopping) {
   9623 			mutex_exit(txq->txq_lock);
   9624 			break;
   9625 		}
   9626 
   9627 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9628 		if (icr & ICR_TXDW) {
   9629 			DPRINTF(WM_DEBUG_TX,
   9630 			    ("%s: TX: got TXDW interrupt\n",
   9631 				device_xname(sc->sc_dev)));
   9632 			WM_Q_EVCNT_INCR(txq, txdw);
   9633 		}
   9634 #endif
   9635 		wm_txeof(txq, UINT_MAX);
   9636 
   9637 		mutex_exit(txq->txq_lock);
   9638 		WM_CORE_LOCK(sc);
   9639 
   9640 		if (sc->sc_core_stopping) {
   9641 			WM_CORE_UNLOCK(sc);
   9642 			break;
   9643 		}
   9644 
   9645 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9646 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9647 			wm_linkintr(sc, icr);
   9648 		}
   9649 		if ((icr & ICR_GPI(0)) != 0)
   9650 			device_printf(sc->sc_dev, "got module interrupt\n");
   9651 
   9652 		WM_CORE_UNLOCK(sc);
   9653 
   9654 		if (icr & ICR_RXO) {
   9655 #if defined(WM_DEBUG)
   9656 			log(LOG_WARNING, "%s: Receive overrun\n",
   9657 			    device_xname(sc->sc_dev));
   9658 #endif /* defined(WM_DEBUG) */
   9659 		}
   9660 	}
   9661 
   9662 	rnd_add_uint32(&sc->rnd_source, rndval);
   9663 
   9664 	if (handled) {
   9665 		/* Try to get more packets going. */
   9666 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9667 		wm_sched_handle_queue(sc, wmq);
   9668 	}
   9669 
   9670 	return handled;
   9671 }
   9672 
   9673 static inline void
   9674 wm_txrxintr_disable(struct wm_queue *wmq)
   9675 {
   9676 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9677 
   9678 	if (sc->sc_type == WM_T_82574)
   9679 		CSR_WRITE(sc, WMREG_IMC,
   9680 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9681 	else if (sc->sc_type == WM_T_82575)
   9682 		CSR_WRITE(sc, WMREG_EIMC,
   9683 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9684 	else
   9685 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9686 }
   9687 
   9688 static inline void
   9689 wm_txrxintr_enable(struct wm_queue *wmq)
   9690 {
   9691 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9692 
   9693 	wm_itrs_calculate(sc, wmq);
   9694 
   9695 	/*
   9696 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9697 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9698 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9699 	 * while each wm_handle_queue(wmq) is runnig.
   9700 	 */
   9701 	if (sc->sc_type == WM_T_82574)
   9702 		CSR_WRITE(sc, WMREG_IMS,
   9703 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9704 	else if (sc->sc_type == WM_T_82575)
   9705 		CSR_WRITE(sc, WMREG_EIMS,
   9706 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9707 	else
   9708 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9709 }
   9710 
   9711 static int
   9712 wm_txrxintr_msix(void *arg)
   9713 {
   9714 	struct wm_queue *wmq = arg;
   9715 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9716 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9717 	struct wm_softc *sc = txq->txq_sc;
   9718 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9719 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9720 	bool txmore;
   9721 	bool rxmore;
   9722 
   9723 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9724 
   9725 	DPRINTF(WM_DEBUG_TX,
   9726 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9727 
   9728 	wm_txrxintr_disable(wmq);
   9729 
   9730 	mutex_enter(txq->txq_lock);
   9731 
   9732 	if (txq->txq_stopping) {
   9733 		mutex_exit(txq->txq_lock);
   9734 		return 0;
   9735 	}
   9736 
   9737 	WM_Q_EVCNT_INCR(txq, txdw);
   9738 	txmore = wm_txeof(txq, txlimit);
   9739 	/* wm_deferred start() is done in wm_handle_queue(). */
   9740 	mutex_exit(txq->txq_lock);
   9741 
   9742 	DPRINTF(WM_DEBUG_RX,
   9743 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9744 	mutex_enter(rxq->rxq_lock);
   9745 
   9746 	if (rxq->rxq_stopping) {
   9747 		mutex_exit(rxq->rxq_lock);
   9748 		return 0;
   9749 	}
   9750 
   9751 	WM_Q_EVCNT_INCR(rxq, intr);
   9752 	rxmore = wm_rxeof(rxq, rxlimit);
   9753 	mutex_exit(rxq->rxq_lock);
   9754 
   9755 	wm_itrs_writereg(sc, wmq);
   9756 
   9757 	if (txmore || rxmore) {
   9758 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9759 		wm_sched_handle_queue(sc, wmq);
   9760 	} else
   9761 		wm_txrxintr_enable(wmq);
   9762 
   9763 	return 1;
   9764 }
   9765 
   9766 static void
   9767 wm_handle_queue(void *arg)
   9768 {
   9769 	struct wm_queue *wmq = arg;
   9770 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9771 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9772 	struct wm_softc *sc = txq->txq_sc;
   9773 	u_int txlimit = sc->sc_tx_process_limit;
   9774 	u_int rxlimit = sc->sc_rx_process_limit;
   9775 	bool txmore;
   9776 	bool rxmore;
   9777 
   9778 	mutex_enter(txq->txq_lock);
   9779 	if (txq->txq_stopping) {
   9780 		mutex_exit(txq->txq_lock);
   9781 		return;
   9782 	}
   9783 	txmore = wm_txeof(txq, txlimit);
   9784 	wm_deferred_start_locked(txq);
   9785 	mutex_exit(txq->txq_lock);
   9786 
   9787 	mutex_enter(rxq->rxq_lock);
   9788 	if (rxq->rxq_stopping) {
   9789 		mutex_exit(rxq->rxq_lock);
   9790 		return;
   9791 	}
   9792 	WM_Q_EVCNT_INCR(rxq, defer);
   9793 	rxmore = wm_rxeof(rxq, rxlimit);
   9794 	mutex_exit(rxq->rxq_lock);
   9795 
   9796 	if (txmore || rxmore) {
   9797 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9798 		wm_sched_handle_queue(sc, wmq);
   9799 	} else
   9800 		wm_txrxintr_enable(wmq);
   9801 }
   9802 
   9803 static void
   9804 wm_handle_queue_work(struct work *wk, void *context)
   9805 {
   9806 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   9807 
   9808 	/*
   9809 	 * "enqueued flag" is not required here.
   9810 	 */
   9811 	wm_handle_queue(wmq);
   9812 }
   9813 
   9814 /*
   9815  * wm_linkintr_msix:
   9816  *
   9817  *	Interrupt service routine for link status change for MSI-X.
   9818  */
   9819 static int
   9820 wm_linkintr_msix(void *arg)
   9821 {
   9822 	struct wm_softc *sc = arg;
   9823 	uint32_t reg;
   9824 	bool has_rxo;
   9825 
   9826 	reg = CSR_READ(sc, WMREG_ICR);
   9827 	WM_CORE_LOCK(sc);
   9828 	DPRINTF(WM_DEBUG_LINK,
   9829 	    ("%s: LINK: got link intr. ICR = %08x\n",
   9830 		device_xname(sc->sc_dev), reg));
   9831 
   9832 	if (sc->sc_core_stopping)
   9833 		goto out;
   9834 
   9835 	if ((reg & ICR_LSC) != 0) {
   9836 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9837 		wm_linkintr(sc, ICR_LSC);
   9838 	}
   9839 	if ((reg & ICR_GPI(0)) != 0)
   9840 		device_printf(sc->sc_dev, "got module interrupt\n");
   9841 
   9842 	/*
   9843 	 * XXX 82574 MSI-X mode workaround
   9844 	 *
   9845 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9846 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9847 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9848 	 * interrupts by writing WMREG_ICS to process receive packets.
   9849 	 */
   9850 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9851 #if defined(WM_DEBUG)
   9852 		log(LOG_WARNING, "%s: Receive overrun\n",
   9853 		    device_xname(sc->sc_dev));
   9854 #endif /* defined(WM_DEBUG) */
   9855 
   9856 		has_rxo = true;
   9857 		/*
   9858 		 * The RXO interrupt is very high rate when receive traffic is
   9859 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9860 		 * interrupts. ICR_OTHER will be enabled at the end of
   9861 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9862 		 * ICR_RXQ(1) interrupts.
   9863 		 */
   9864 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9865 
   9866 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9867 	}
   9868 
   9869 
   9870 
   9871 out:
   9872 	WM_CORE_UNLOCK(sc);
   9873 
   9874 	if (sc->sc_type == WM_T_82574) {
   9875 		if (!has_rxo)
   9876 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9877 		else
   9878 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9879 	} else if (sc->sc_type == WM_T_82575)
   9880 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9881 	else
   9882 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9883 
   9884 	return 1;
   9885 }
   9886 
   9887 /*
   9888  * Media related.
   9889  * GMII, SGMII, TBI (and SERDES)
   9890  */
   9891 
   9892 /* Common */
   9893 
   9894 /*
   9895  * wm_tbi_serdes_set_linkled:
   9896  *
   9897  *	Update the link LED on TBI and SERDES devices.
   9898  */
   9899 static void
   9900 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9901 {
   9902 
   9903 	if (sc->sc_tbi_linkup)
   9904 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9905 	else
   9906 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9907 
   9908 	/* 82540 or newer devices are active low */
   9909 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9910 
   9911 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9912 }
   9913 
   9914 /* GMII related */
   9915 
   9916 /*
   9917  * wm_gmii_reset:
   9918  *
   9919  *	Reset the PHY.
   9920  */
   9921 static void
   9922 wm_gmii_reset(struct wm_softc *sc)
   9923 {
   9924 	uint32_t reg;
   9925 	int rv;
   9926 
   9927 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9928 		device_xname(sc->sc_dev), __func__));
   9929 
   9930 	rv = sc->phy.acquire(sc);
   9931 	if (rv != 0) {
   9932 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9933 		    __func__);
   9934 		return;
   9935 	}
   9936 
   9937 	switch (sc->sc_type) {
   9938 	case WM_T_82542_2_0:
   9939 	case WM_T_82542_2_1:
   9940 		/* null */
   9941 		break;
   9942 	case WM_T_82543:
   9943 		/*
   9944 		 * With 82543, we need to force speed and duplex on the MAC
   9945 		 * equal to what the PHY speed and duplex configuration is.
   9946 		 * In addition, we need to perform a hardware reset on the PHY
   9947 		 * to take it out of reset.
   9948 		 */
   9949 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9950 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9951 
   9952 		/* The PHY reset pin is active-low. */
   9953 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9954 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9955 		    CTRL_EXT_SWDPIN(4));
   9956 		reg |= CTRL_EXT_SWDPIO(4);
   9957 
   9958 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9959 		CSR_WRITE_FLUSH(sc);
   9960 		delay(10*1000);
   9961 
   9962 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9963 		CSR_WRITE_FLUSH(sc);
   9964 		delay(150);
   9965 #if 0
   9966 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9967 #endif
   9968 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9969 		break;
   9970 	case WM_T_82544:	/* Reset 10000us */
   9971 	case WM_T_82540:
   9972 	case WM_T_82545:
   9973 	case WM_T_82545_3:
   9974 	case WM_T_82546:
   9975 	case WM_T_82546_3:
   9976 	case WM_T_82541:
   9977 	case WM_T_82541_2:
   9978 	case WM_T_82547:
   9979 	case WM_T_82547_2:
   9980 	case WM_T_82571:	/* Reset 100us */
   9981 	case WM_T_82572:
   9982 	case WM_T_82573:
   9983 	case WM_T_82574:
   9984 	case WM_T_82575:
   9985 	case WM_T_82576:
   9986 	case WM_T_82580:
   9987 	case WM_T_I350:
   9988 	case WM_T_I354:
   9989 	case WM_T_I210:
   9990 	case WM_T_I211:
   9991 	case WM_T_82583:
   9992 	case WM_T_80003:
   9993 		/* Generic reset */
   9994 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9995 		CSR_WRITE_FLUSH(sc);
   9996 		delay(20000);
   9997 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9998 		CSR_WRITE_FLUSH(sc);
   9999 		delay(20000);
   10000 
   10001 		if ((sc->sc_type == WM_T_82541)
   10002 		    || (sc->sc_type == WM_T_82541_2)
   10003 		    || (sc->sc_type == WM_T_82547)
   10004 		    || (sc->sc_type == WM_T_82547_2)) {
   10005 			/* Workaround for igp are done in igp_reset() */
   10006 			/* XXX add code to set LED after phy reset */
   10007 		}
   10008 		break;
   10009 	case WM_T_ICH8:
   10010 	case WM_T_ICH9:
   10011 	case WM_T_ICH10:
   10012 	case WM_T_PCH:
   10013 	case WM_T_PCH2:
   10014 	case WM_T_PCH_LPT:
   10015 	case WM_T_PCH_SPT:
   10016 	case WM_T_PCH_CNP:
   10017 		/* Generic reset */
   10018 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10019 		CSR_WRITE_FLUSH(sc);
   10020 		delay(100);
   10021 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10022 		CSR_WRITE_FLUSH(sc);
   10023 		delay(150);
   10024 		break;
   10025 	default:
   10026 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10027 		    __func__);
   10028 		break;
   10029 	}
   10030 
   10031 	sc->phy.release(sc);
   10032 
   10033 	/* get_cfg_done */
   10034 	wm_get_cfg_done(sc);
   10035 
   10036 	/* Extra setup */
   10037 	switch (sc->sc_type) {
   10038 	case WM_T_82542_2_0:
   10039 	case WM_T_82542_2_1:
   10040 	case WM_T_82543:
   10041 	case WM_T_82544:
   10042 	case WM_T_82540:
   10043 	case WM_T_82545:
   10044 	case WM_T_82545_3:
   10045 	case WM_T_82546:
   10046 	case WM_T_82546_3:
   10047 	case WM_T_82541_2:
   10048 	case WM_T_82547_2:
   10049 	case WM_T_82571:
   10050 	case WM_T_82572:
   10051 	case WM_T_82573:
   10052 	case WM_T_82574:
   10053 	case WM_T_82583:
   10054 	case WM_T_82575:
   10055 	case WM_T_82576:
   10056 	case WM_T_82580:
   10057 	case WM_T_I350:
   10058 	case WM_T_I354:
   10059 	case WM_T_I210:
   10060 	case WM_T_I211:
   10061 	case WM_T_80003:
   10062 		/* Null */
   10063 		break;
   10064 	case WM_T_82541:
   10065 	case WM_T_82547:
   10066 		/* XXX Configure actively LED after PHY reset */
   10067 		break;
   10068 	case WM_T_ICH8:
   10069 	case WM_T_ICH9:
   10070 	case WM_T_ICH10:
   10071 	case WM_T_PCH:
   10072 	case WM_T_PCH2:
   10073 	case WM_T_PCH_LPT:
   10074 	case WM_T_PCH_SPT:
   10075 	case WM_T_PCH_CNP:
   10076 		wm_phy_post_reset(sc);
   10077 		break;
   10078 	default:
   10079 		panic("%s: unknown type\n", __func__);
   10080 		break;
   10081 	}
   10082 }
   10083 
   10084 /*
   10085  * Setup sc_phytype and mii_{read|write}reg.
   10086  *
   10087  *  To identify PHY type, correct read/write function should be selected.
   10088  * To select correct read/write function, PCI ID or MAC type are required
   10089  * without accessing PHY registers.
   10090  *
   10091  *  On the first call of this function, PHY ID is not known yet. Check
   10092  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10093  * result might be incorrect.
   10094  *
   10095  *  In the second call, PHY OUI and model is used to identify PHY type.
   10096  * It might not be perfect because of the lack of compared entry, but it
   10097  * would be better than the first call.
   10098  *
   10099  *  If the detected new result and previous assumption is different,
   10100  * diagnous message will be printed.
   10101  */
   10102 static void
   10103 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10104     uint16_t phy_model)
   10105 {
   10106 	device_t dev = sc->sc_dev;
   10107 	struct mii_data *mii = &sc->sc_mii;
   10108 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10109 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10110 	mii_readreg_t new_readreg;
   10111 	mii_writereg_t new_writereg;
   10112 	bool dodiag = true;
   10113 
   10114 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10115 		device_xname(sc->sc_dev), __func__));
   10116 
   10117 	/*
   10118 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10119 	 * incorrect. So don't print diag output when it's 2nd call.
   10120 	 */
   10121 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10122 		dodiag = false;
   10123 
   10124 	if (mii->mii_readreg == NULL) {
   10125 		/*
   10126 		 *  This is the first call of this function. For ICH and PCH
   10127 		 * variants, it's difficult to determine the PHY access method
   10128 		 * by sc_type, so use the PCI product ID for some devices.
   10129 		 */
   10130 
   10131 		switch (sc->sc_pcidevid) {
   10132 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10133 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10134 			/* 82577 */
   10135 			new_phytype = WMPHY_82577;
   10136 			break;
   10137 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10138 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10139 			/* 82578 */
   10140 			new_phytype = WMPHY_82578;
   10141 			break;
   10142 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10143 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10144 			/* 82579 */
   10145 			new_phytype = WMPHY_82579;
   10146 			break;
   10147 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10148 		case PCI_PRODUCT_INTEL_82801I_BM:
   10149 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10150 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10151 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10152 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10153 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10154 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10155 			/* ICH8, 9, 10 with 82567 */
   10156 			new_phytype = WMPHY_BM;
   10157 			break;
   10158 		default:
   10159 			break;
   10160 		}
   10161 	} else {
   10162 		/* It's not the first call. Use PHY OUI and model */
   10163 		switch (phy_oui) {
   10164 		case MII_OUI_ATTANSIC: /* XXX ??? */
   10165 			switch (phy_model) {
   10166 			case 0x0004: /* XXX */
   10167 				new_phytype = WMPHY_82578;
   10168 				break;
   10169 			default:
   10170 				break;
   10171 			}
   10172 			break;
   10173 		case MII_OUI_xxMARVELL:
   10174 			switch (phy_model) {
   10175 			case MII_MODEL_xxMARVELL_I210:
   10176 				new_phytype = WMPHY_I210;
   10177 				break;
   10178 			case MII_MODEL_xxMARVELL_E1011:
   10179 			case MII_MODEL_xxMARVELL_E1000_3:
   10180 			case MII_MODEL_xxMARVELL_E1000_5:
   10181 			case MII_MODEL_xxMARVELL_E1112:
   10182 				new_phytype = WMPHY_M88;
   10183 				break;
   10184 			case MII_MODEL_xxMARVELL_E1149:
   10185 				new_phytype = WMPHY_BM;
   10186 				break;
   10187 			case MII_MODEL_xxMARVELL_E1111:
   10188 			case MII_MODEL_xxMARVELL_I347:
   10189 			case MII_MODEL_xxMARVELL_E1512:
   10190 			case MII_MODEL_xxMARVELL_E1340M:
   10191 			case MII_MODEL_xxMARVELL_E1543:
   10192 				new_phytype = WMPHY_M88;
   10193 				break;
   10194 			case MII_MODEL_xxMARVELL_I82563:
   10195 				new_phytype = WMPHY_GG82563;
   10196 				break;
   10197 			default:
   10198 				break;
   10199 			}
   10200 			break;
   10201 		case MII_OUI_INTEL:
   10202 			switch (phy_model) {
   10203 			case MII_MODEL_INTEL_I82577:
   10204 				new_phytype = WMPHY_82577;
   10205 				break;
   10206 			case MII_MODEL_INTEL_I82579:
   10207 				new_phytype = WMPHY_82579;
   10208 				break;
   10209 			case MII_MODEL_INTEL_I217:
   10210 				new_phytype = WMPHY_I217;
   10211 				break;
   10212 			case MII_MODEL_INTEL_I82580:
   10213 			case MII_MODEL_INTEL_I350:
   10214 				new_phytype = WMPHY_82580;
   10215 				break;
   10216 			default:
   10217 				break;
   10218 			}
   10219 			break;
   10220 		case MII_OUI_yyINTEL:
   10221 			switch (phy_model) {
   10222 			case MII_MODEL_yyINTEL_I82562G:
   10223 			case MII_MODEL_yyINTEL_I82562EM:
   10224 			case MII_MODEL_yyINTEL_I82562ET:
   10225 				new_phytype = WMPHY_IFE;
   10226 				break;
   10227 			case MII_MODEL_yyINTEL_IGP01E1000:
   10228 				new_phytype = WMPHY_IGP;
   10229 				break;
   10230 			case MII_MODEL_yyINTEL_I82566:
   10231 				new_phytype = WMPHY_IGP_3;
   10232 				break;
   10233 			default:
   10234 				break;
   10235 			}
   10236 			break;
   10237 		default:
   10238 			break;
   10239 		}
   10240 
   10241 		if (dodiag) {
   10242 			if (new_phytype == WMPHY_UNKNOWN)
   10243 				aprint_verbose_dev(dev,
   10244 				    "%s: Unknown PHY model. OUI=%06x, "
   10245 				    "model=%04x\n", __func__, phy_oui,
   10246 				    phy_model);
   10247 
   10248 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10249 			    && (sc->sc_phytype != new_phytype)) {
   10250 				aprint_error_dev(dev, "Previously assumed PHY "
   10251 				    "type(%u) was incorrect. PHY type from PHY"
   10252 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   10253 			}
   10254 		}
   10255 	}
   10256 
   10257 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10258 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10259 		/* SGMII */
   10260 		new_readreg = wm_sgmii_readreg;
   10261 		new_writereg = wm_sgmii_writereg;
   10262 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10263 		/* BM2 (phyaddr == 1) */
   10264 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10265 		    && (new_phytype != WMPHY_BM)
   10266 		    && (new_phytype != WMPHY_UNKNOWN))
   10267 			doubt_phytype = new_phytype;
   10268 		new_phytype = WMPHY_BM;
   10269 		new_readreg = wm_gmii_bm_readreg;
   10270 		new_writereg = wm_gmii_bm_writereg;
   10271 	} else if (sc->sc_type >= WM_T_PCH) {
   10272 		/* All PCH* use _hv_ */
   10273 		new_readreg = wm_gmii_hv_readreg;
   10274 		new_writereg = wm_gmii_hv_writereg;
   10275 	} else if (sc->sc_type >= WM_T_ICH8) {
   10276 		/* non-82567 ICH8, 9 and 10 */
   10277 		new_readreg = wm_gmii_i82544_readreg;
   10278 		new_writereg = wm_gmii_i82544_writereg;
   10279 	} else if (sc->sc_type >= WM_T_80003) {
   10280 		/* 80003 */
   10281 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10282 		    && (new_phytype != WMPHY_GG82563)
   10283 		    && (new_phytype != WMPHY_UNKNOWN))
   10284 			doubt_phytype = new_phytype;
   10285 		new_phytype = WMPHY_GG82563;
   10286 		new_readreg = wm_gmii_i80003_readreg;
   10287 		new_writereg = wm_gmii_i80003_writereg;
   10288 	} else if (sc->sc_type >= WM_T_I210) {
   10289 		/* I210 and I211 */
   10290 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10291 		    && (new_phytype != WMPHY_I210)
   10292 		    && (new_phytype != WMPHY_UNKNOWN))
   10293 			doubt_phytype = new_phytype;
   10294 		new_phytype = WMPHY_I210;
   10295 		new_readreg = wm_gmii_gs40g_readreg;
   10296 		new_writereg = wm_gmii_gs40g_writereg;
   10297 	} else if (sc->sc_type >= WM_T_82580) {
   10298 		/* 82580, I350 and I354 */
   10299 		new_readreg = wm_gmii_82580_readreg;
   10300 		new_writereg = wm_gmii_82580_writereg;
   10301 	} else if (sc->sc_type >= WM_T_82544) {
   10302 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10303 		new_readreg = wm_gmii_i82544_readreg;
   10304 		new_writereg = wm_gmii_i82544_writereg;
   10305 	} else {
   10306 		new_readreg = wm_gmii_i82543_readreg;
   10307 		new_writereg = wm_gmii_i82543_writereg;
   10308 	}
   10309 
   10310 	if (new_phytype == WMPHY_BM) {
   10311 		/* All BM use _bm_ */
   10312 		new_readreg = wm_gmii_bm_readreg;
   10313 		new_writereg = wm_gmii_bm_writereg;
   10314 	}
   10315 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10316 		/* All PCH* use _hv_ */
   10317 		new_readreg = wm_gmii_hv_readreg;
   10318 		new_writereg = wm_gmii_hv_writereg;
   10319 	}
   10320 
   10321 	/* Diag output */
   10322 	if (dodiag) {
   10323 		if (doubt_phytype != WMPHY_UNKNOWN)
   10324 			aprint_error_dev(dev, "Assumed new PHY type was "
   10325 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10326 			    new_phytype);
   10327 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10328 		    && (sc->sc_phytype != new_phytype))
   10329 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10330 			    "was incorrect. New PHY type = %u\n",
   10331 			    sc->sc_phytype, new_phytype);
   10332 
   10333 		if ((mii->mii_readreg != NULL) &&
   10334 		    (new_phytype == WMPHY_UNKNOWN))
   10335 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   10336 
   10337 		if ((mii->mii_readreg != NULL) &&
   10338 		    (mii->mii_readreg != new_readreg))
   10339 			aprint_error_dev(dev, "Previously assumed PHY "
   10340 			    "read/write function was incorrect.\n");
   10341 	}
   10342 
   10343 	/* Update now */
   10344 	sc->sc_phytype = new_phytype;
   10345 	mii->mii_readreg = new_readreg;
   10346 	mii->mii_writereg = new_writereg;
   10347 	if (new_readreg == wm_gmii_hv_readreg) {
   10348 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10349 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10350 	} else if (new_readreg == wm_sgmii_readreg) {
   10351 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10352 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10353 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10354 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10355 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10356 	}
   10357 }
   10358 
   10359 /*
   10360  * wm_get_phy_id_82575:
   10361  *
   10362  * Return PHY ID. Return -1 if it failed.
   10363  */
   10364 static int
   10365 wm_get_phy_id_82575(struct wm_softc *sc)
   10366 {
   10367 	uint32_t reg;
   10368 	int phyid = -1;
   10369 
   10370 	/* XXX */
   10371 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10372 		return -1;
   10373 
   10374 	if (wm_sgmii_uses_mdio(sc)) {
   10375 		switch (sc->sc_type) {
   10376 		case WM_T_82575:
   10377 		case WM_T_82576:
   10378 			reg = CSR_READ(sc, WMREG_MDIC);
   10379 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10380 			break;
   10381 		case WM_T_82580:
   10382 		case WM_T_I350:
   10383 		case WM_T_I354:
   10384 		case WM_T_I210:
   10385 		case WM_T_I211:
   10386 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10387 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10388 			break;
   10389 		default:
   10390 			return -1;
   10391 		}
   10392 	}
   10393 
   10394 	return phyid;
   10395 }
   10396 
   10397 /*
   10398  * wm_gmii_mediainit:
   10399  *
   10400  *	Initialize media for use on 1000BASE-T devices.
   10401  */
   10402 static void
   10403 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10404 {
   10405 	device_t dev = sc->sc_dev;
   10406 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10407 	struct mii_data *mii = &sc->sc_mii;
   10408 
   10409 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10410 		device_xname(sc->sc_dev), __func__));
   10411 
   10412 	/* We have GMII. */
   10413 	sc->sc_flags |= WM_F_HAS_MII;
   10414 
   10415 	if (sc->sc_type == WM_T_80003)
   10416 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10417 	else
   10418 		sc->sc_tipg = TIPG_1000T_DFLT;
   10419 
   10420 	/*
   10421 	 * Let the chip set speed/duplex on its own based on
   10422 	 * signals from the PHY.
   10423 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10424 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10425 	 */
   10426 	sc->sc_ctrl |= CTRL_SLU;
   10427 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10428 
   10429 	/* Initialize our media structures and probe the GMII. */
   10430 	mii->mii_ifp = ifp;
   10431 
   10432 	mii->mii_statchg = wm_gmii_statchg;
   10433 
   10434 	/* get PHY control from SMBus to PCIe */
   10435 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10436 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10437 	    || (sc->sc_type == WM_T_PCH_CNP))
   10438 		wm_init_phy_workarounds_pchlan(sc);
   10439 
   10440 	wm_gmii_reset(sc);
   10441 
   10442 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10443 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10444 	    wm_gmii_mediastatus, sc->sc_core_lock);
   10445 
   10446 	/* Setup internal SGMII PHY for SFP */
   10447 	wm_sgmii_sfp_preconfig(sc);
   10448 
   10449 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10450 	    || (sc->sc_type == WM_T_82580)
   10451 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10452 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10453 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10454 			/* Attach only one port */
   10455 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10456 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10457 		} else {
   10458 			int i, id;
   10459 			uint32_t ctrl_ext;
   10460 
   10461 			id = wm_get_phy_id_82575(sc);
   10462 			if (id != -1) {
   10463 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10464 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10465 			}
   10466 			if ((id == -1)
   10467 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10468 				/* Power on sgmii phy if it is disabled */
   10469 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10470 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10471 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10472 				CSR_WRITE_FLUSH(sc);
   10473 				delay(300*1000); /* XXX too long */
   10474 
   10475 				/*
   10476 				 * From 1 to 8.
   10477 				 *
   10478 				 * I2C access fails with I2C register's ERROR
   10479 				 * bit set, so prevent error message while
   10480 				 * scanning.
   10481 				 */
   10482 				sc->phy.no_errprint = true;
   10483 				for (i = 1; i < 8; i++)
   10484 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10485 					    0xffffffff, i, MII_OFFSET_ANY,
   10486 					    MIIF_DOPAUSE);
   10487 				sc->phy.no_errprint = false;
   10488 
   10489 				/* Restore previous sfp cage power state */
   10490 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10491 			}
   10492 		}
   10493 	} else
   10494 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10495 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10496 
   10497 	/*
   10498 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10499 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10500 	 */
   10501 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10502 		|| (sc->sc_type == WM_T_PCH_SPT)
   10503 		|| (sc->sc_type == WM_T_PCH_CNP))
   10504 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10505 		wm_set_mdio_slow_mode_hv(sc);
   10506 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10507 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10508 	}
   10509 
   10510 	/*
   10511 	 * (For ICH8 variants)
   10512 	 * If PHY detection failed, use BM's r/w function and retry.
   10513 	 */
   10514 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10515 		/* if failed, retry with *_bm_* */
   10516 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10517 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10518 		    sc->sc_phytype);
   10519 		sc->sc_phytype = WMPHY_BM;
   10520 		mii->mii_readreg = wm_gmii_bm_readreg;
   10521 		mii->mii_writereg = wm_gmii_bm_writereg;
   10522 
   10523 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10524 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10525 	}
   10526 
   10527 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10528 		/* Any PHY wasn't find */
   10529 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10530 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10531 		sc->sc_phytype = WMPHY_NONE;
   10532 	} else {
   10533 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10534 
   10535 		/*
   10536 		 * PHY Found! Check PHY type again by the second call of
   10537 		 * wm_gmii_setup_phytype.
   10538 		 */
   10539 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10540 		    child->mii_mpd_model);
   10541 
   10542 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10543 	}
   10544 }
   10545 
   10546 /*
   10547  * wm_gmii_mediachange:	[ifmedia interface function]
   10548  *
   10549  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10550  */
   10551 static int
   10552 wm_gmii_mediachange(struct ifnet *ifp)
   10553 {
   10554 	struct wm_softc *sc = ifp->if_softc;
   10555 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10556 	uint32_t reg;
   10557 	int rc;
   10558 
   10559 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10560 		device_xname(sc->sc_dev), __func__));
   10561 	if ((ifp->if_flags & IFF_UP) == 0)
   10562 		return 0;
   10563 
   10564 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10565 	if ((sc->sc_type == WM_T_82580)
   10566 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10567 	    || (sc->sc_type == WM_T_I211)) {
   10568 		reg = CSR_READ(sc, WMREG_PHPM);
   10569 		reg &= ~PHPM_GO_LINK_D;
   10570 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10571 	}
   10572 
   10573 	/* Disable D0 LPLU. */
   10574 	wm_lplu_d0_disable(sc);
   10575 
   10576 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10577 	sc->sc_ctrl |= CTRL_SLU;
   10578 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10579 	    || (sc->sc_type > WM_T_82543)) {
   10580 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10581 	} else {
   10582 		sc->sc_ctrl &= ~CTRL_ASDE;
   10583 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10584 		if (ife->ifm_media & IFM_FDX)
   10585 			sc->sc_ctrl |= CTRL_FD;
   10586 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10587 		case IFM_10_T:
   10588 			sc->sc_ctrl |= CTRL_SPEED_10;
   10589 			break;
   10590 		case IFM_100_TX:
   10591 			sc->sc_ctrl |= CTRL_SPEED_100;
   10592 			break;
   10593 		case IFM_1000_T:
   10594 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10595 			break;
   10596 		case IFM_NONE:
   10597 			/* There is no specific setting for IFM_NONE */
   10598 			break;
   10599 		default:
   10600 			panic("wm_gmii_mediachange: bad media 0x%x",
   10601 			    ife->ifm_media);
   10602 		}
   10603 	}
   10604 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10605 	CSR_WRITE_FLUSH(sc);
   10606 
   10607 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   10608 		wm_serdes_mediachange(ifp);
   10609 
   10610 	if (sc->sc_type <= WM_T_82543)
   10611 		wm_gmii_reset(sc);
   10612 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   10613 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   10614 		/* allow time for SFP cage time to power up phy */
   10615 		delay(300 * 1000);
   10616 		wm_gmii_reset(sc);
   10617 	}
   10618 
   10619 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10620 		return 0;
   10621 	return rc;
   10622 }
   10623 
   10624 /*
   10625  * wm_gmii_mediastatus:	[ifmedia interface function]
   10626  *
   10627  *	Get the current interface media status on a 1000BASE-T device.
   10628  */
   10629 static void
   10630 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10631 {
   10632 	struct wm_softc *sc = ifp->if_softc;
   10633 
   10634 	ether_mediastatus(ifp, ifmr);
   10635 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10636 	    | sc->sc_flowflags;
   10637 }
   10638 
   10639 #define	MDI_IO		CTRL_SWDPIN(2)
   10640 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10641 #define	MDI_CLK		CTRL_SWDPIN(3)
   10642 
   10643 static void
   10644 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10645 {
   10646 	uint32_t i, v;
   10647 
   10648 	v = CSR_READ(sc, WMREG_CTRL);
   10649 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10650 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10651 
   10652 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10653 		if (data & i)
   10654 			v |= MDI_IO;
   10655 		else
   10656 			v &= ~MDI_IO;
   10657 		CSR_WRITE(sc, WMREG_CTRL, v);
   10658 		CSR_WRITE_FLUSH(sc);
   10659 		delay(10);
   10660 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10661 		CSR_WRITE_FLUSH(sc);
   10662 		delay(10);
   10663 		CSR_WRITE(sc, WMREG_CTRL, v);
   10664 		CSR_WRITE_FLUSH(sc);
   10665 		delay(10);
   10666 	}
   10667 }
   10668 
   10669 static uint16_t
   10670 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10671 {
   10672 	uint32_t v, i;
   10673 	uint16_t data = 0;
   10674 
   10675 	v = CSR_READ(sc, WMREG_CTRL);
   10676 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10677 	v |= CTRL_SWDPIO(3);
   10678 
   10679 	CSR_WRITE(sc, WMREG_CTRL, v);
   10680 	CSR_WRITE_FLUSH(sc);
   10681 	delay(10);
   10682 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10683 	CSR_WRITE_FLUSH(sc);
   10684 	delay(10);
   10685 	CSR_WRITE(sc, WMREG_CTRL, v);
   10686 	CSR_WRITE_FLUSH(sc);
   10687 	delay(10);
   10688 
   10689 	for (i = 0; i < 16; i++) {
   10690 		data <<= 1;
   10691 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10692 		CSR_WRITE_FLUSH(sc);
   10693 		delay(10);
   10694 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10695 			data |= 1;
   10696 		CSR_WRITE(sc, WMREG_CTRL, v);
   10697 		CSR_WRITE_FLUSH(sc);
   10698 		delay(10);
   10699 	}
   10700 
   10701 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10702 	CSR_WRITE_FLUSH(sc);
   10703 	delay(10);
   10704 	CSR_WRITE(sc, WMREG_CTRL, v);
   10705 	CSR_WRITE_FLUSH(sc);
   10706 	delay(10);
   10707 
   10708 	return data;
   10709 }
   10710 
   10711 #undef MDI_IO
   10712 #undef MDI_DIR
   10713 #undef MDI_CLK
   10714 
   10715 /*
   10716  * wm_gmii_i82543_readreg:	[mii interface function]
   10717  *
   10718  *	Read a PHY register on the GMII (i82543 version).
   10719  */
   10720 static int
   10721 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10722 {
   10723 	struct wm_softc *sc = device_private(dev);
   10724 
   10725 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10726 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10727 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10728 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10729 
   10730 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10731 		device_xname(dev), phy, reg, *val));
   10732 
   10733 	return 0;
   10734 }
   10735 
   10736 /*
   10737  * wm_gmii_i82543_writereg:	[mii interface function]
   10738  *
   10739  *	Write a PHY register on the GMII (i82543 version).
   10740  */
   10741 static int
   10742 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10743 {
   10744 	struct wm_softc *sc = device_private(dev);
   10745 
   10746 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10747 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10748 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10749 	    (MII_COMMAND_START << 30), 32);
   10750 
   10751 	return 0;
   10752 }
   10753 
   10754 /*
   10755  * wm_gmii_mdic_readreg:	[mii interface function]
   10756  *
   10757  *	Read a PHY register on the GMII.
   10758  */
   10759 static int
   10760 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10761 {
   10762 	struct wm_softc *sc = device_private(dev);
   10763 	uint32_t mdic = 0;
   10764 	int i;
   10765 
   10766 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10767 	    && (reg > MII_ADDRMASK)) {
   10768 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10769 		    __func__, sc->sc_phytype, reg);
   10770 		reg &= MII_ADDRMASK;
   10771 	}
   10772 
   10773 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10774 	    MDIC_REGADD(reg));
   10775 
   10776 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10777 		delay(50);
   10778 		mdic = CSR_READ(sc, WMREG_MDIC);
   10779 		if (mdic & MDIC_READY)
   10780 			break;
   10781 	}
   10782 
   10783 	if ((mdic & MDIC_READY) == 0) {
   10784 		DPRINTF(WM_DEBUG_GMII,
   10785 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10786 			device_xname(dev), phy, reg));
   10787 		return ETIMEDOUT;
   10788 	} else if (mdic & MDIC_E) {
   10789 		/* This is normal if no PHY is present. */
   10790 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10791 			device_xname(sc->sc_dev), phy, reg));
   10792 		return -1;
   10793 	} else
   10794 		*val = MDIC_DATA(mdic);
   10795 
   10796 	/*
   10797 	 * Allow some time after each MDIC transaction to avoid
   10798 	 * reading duplicate data in the next MDIC transaction.
   10799 	 */
   10800 	if (sc->sc_type == WM_T_PCH2)
   10801 		delay(100);
   10802 
   10803 	return 0;
   10804 }
   10805 
   10806 /*
   10807  * wm_gmii_mdic_writereg:	[mii interface function]
   10808  *
   10809  *	Write a PHY register on the GMII.
   10810  */
   10811 static int
   10812 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10813 {
   10814 	struct wm_softc *sc = device_private(dev);
   10815 	uint32_t mdic = 0;
   10816 	int i;
   10817 
   10818 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10819 	    && (reg > MII_ADDRMASK)) {
   10820 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10821 		    __func__, sc->sc_phytype, reg);
   10822 		reg &= MII_ADDRMASK;
   10823 	}
   10824 
   10825 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10826 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10827 
   10828 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10829 		delay(50);
   10830 		mdic = CSR_READ(sc, WMREG_MDIC);
   10831 		if (mdic & MDIC_READY)
   10832 			break;
   10833 	}
   10834 
   10835 	if ((mdic & MDIC_READY) == 0) {
   10836 		DPRINTF(WM_DEBUG_GMII,
   10837 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10838 			device_xname(dev), phy, reg));
   10839 		return ETIMEDOUT;
   10840 	} else if (mdic & MDIC_E) {
   10841 		DPRINTF(WM_DEBUG_GMII,
   10842 		    ("%s: MDIC write error: phy %d reg %d\n",
   10843 			device_xname(dev), phy, reg));
   10844 		return -1;
   10845 	}
   10846 
   10847 	/*
   10848 	 * Allow some time after each MDIC transaction to avoid
   10849 	 * reading duplicate data in the next MDIC transaction.
   10850 	 */
   10851 	if (sc->sc_type == WM_T_PCH2)
   10852 		delay(100);
   10853 
   10854 	return 0;
   10855 }
   10856 
   10857 /*
   10858  * wm_gmii_i82544_readreg:	[mii interface function]
   10859  *
   10860  *	Read a PHY register on the GMII.
   10861  */
   10862 static int
   10863 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10864 {
   10865 	struct wm_softc *sc = device_private(dev);
   10866 	int rv;
   10867 
   10868 	if (sc->phy.acquire(sc)) {
   10869 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10870 		return -1;
   10871 	}
   10872 
   10873 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10874 
   10875 	sc->phy.release(sc);
   10876 
   10877 	return rv;
   10878 }
   10879 
   10880 static int
   10881 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10882 {
   10883 	struct wm_softc *sc = device_private(dev);
   10884 	int rv;
   10885 
   10886 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10887 		switch (sc->sc_phytype) {
   10888 		case WMPHY_IGP:
   10889 		case WMPHY_IGP_2:
   10890 		case WMPHY_IGP_3:
   10891 			rv = wm_gmii_mdic_writereg(dev, phy,
   10892 			    IGPHY_PAGE_SELECT, reg);
   10893 			if (rv != 0)
   10894 				return rv;
   10895 			break;
   10896 		default:
   10897 #ifdef WM_DEBUG
   10898 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10899 			    __func__, sc->sc_phytype, reg);
   10900 #endif
   10901 			break;
   10902 		}
   10903 	}
   10904 
   10905 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10906 }
   10907 
   10908 /*
   10909  * wm_gmii_i82544_writereg:	[mii interface function]
   10910  *
   10911  *	Write a PHY register on the GMII.
   10912  */
   10913 static int
   10914 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10915 {
   10916 	struct wm_softc *sc = device_private(dev);
   10917 	int rv;
   10918 
   10919 	if (sc->phy.acquire(sc)) {
   10920 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10921 		return -1;
   10922 	}
   10923 
   10924 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10925 	sc->phy.release(sc);
   10926 
   10927 	return rv;
   10928 }
   10929 
   10930 static int
   10931 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10932 {
   10933 	struct wm_softc *sc = device_private(dev);
   10934 	int rv;
   10935 
   10936 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10937 		switch (sc->sc_phytype) {
   10938 		case WMPHY_IGP:
   10939 		case WMPHY_IGP_2:
   10940 		case WMPHY_IGP_3:
   10941 			rv = wm_gmii_mdic_writereg(dev, phy,
   10942 			    IGPHY_PAGE_SELECT, reg);
   10943 			if (rv != 0)
   10944 				return rv;
   10945 			break;
   10946 		default:
   10947 #ifdef WM_DEBUG
   10948 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10949 			    __func__, sc->sc_phytype, reg);
   10950 #endif
   10951 			break;
   10952 		}
   10953 	}
   10954 
   10955 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10956 }
   10957 
   10958 /*
   10959  * wm_gmii_i80003_readreg:	[mii interface function]
   10960  *
   10961  *	Read a PHY register on the kumeran
   10962  * This could be handled by the PHY layer if we didn't have to lock the
   10963  * resource ...
   10964  */
   10965 static int
   10966 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10967 {
   10968 	struct wm_softc *sc = device_private(dev);
   10969 	int page_select;
   10970 	uint16_t temp, temp2;
   10971 	int rv = 0;
   10972 
   10973 	if (phy != 1) /* Only one PHY on kumeran bus */
   10974 		return -1;
   10975 
   10976 	if (sc->phy.acquire(sc)) {
   10977 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10978 		return -1;
   10979 	}
   10980 
   10981 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10982 		page_select = GG82563_PHY_PAGE_SELECT;
   10983 	else {
   10984 		/*
   10985 		 * Use Alternative Page Select register to access registers
   10986 		 * 30 and 31.
   10987 		 */
   10988 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10989 	}
   10990 	temp = reg >> GG82563_PAGE_SHIFT;
   10991 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10992 		goto out;
   10993 
   10994 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10995 		/*
   10996 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10997 		 * register.
   10998 		 */
   10999 		delay(200);
   11000 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11001 		if ((rv != 0) || (temp2 != temp)) {
   11002 			device_printf(dev, "%s failed\n", __func__);
   11003 			rv = -1;
   11004 			goto out;
   11005 		}
   11006 		delay(200);
   11007 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11008 		delay(200);
   11009 	} else
   11010 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11011 
   11012 out:
   11013 	sc->phy.release(sc);
   11014 	return rv;
   11015 }
   11016 
   11017 /*
   11018  * wm_gmii_i80003_writereg:	[mii interface function]
   11019  *
   11020  *	Write a PHY register on the kumeran.
   11021  * This could be handled by the PHY layer if we didn't have to lock the
   11022  * resource ...
   11023  */
   11024 static int
   11025 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11026 {
   11027 	struct wm_softc *sc = device_private(dev);
   11028 	int page_select, rv;
   11029 	uint16_t temp, temp2;
   11030 
   11031 	if (phy != 1) /* Only one PHY on kumeran bus */
   11032 		return -1;
   11033 
   11034 	if (sc->phy.acquire(sc)) {
   11035 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11036 		return -1;
   11037 	}
   11038 
   11039 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11040 		page_select = GG82563_PHY_PAGE_SELECT;
   11041 	else {
   11042 		/*
   11043 		 * Use Alternative Page Select register to access registers
   11044 		 * 30 and 31.
   11045 		 */
   11046 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11047 	}
   11048 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11049 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11050 		goto out;
   11051 
   11052 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11053 		/*
   11054 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11055 		 * register.
   11056 		 */
   11057 		delay(200);
   11058 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11059 		if ((rv != 0) || (temp2 != temp)) {
   11060 			device_printf(dev, "%s failed\n", __func__);
   11061 			rv = -1;
   11062 			goto out;
   11063 		}
   11064 		delay(200);
   11065 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11066 		delay(200);
   11067 	} else
   11068 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11069 
   11070 out:
   11071 	sc->phy.release(sc);
   11072 	return rv;
   11073 }
   11074 
   11075 /*
   11076  * wm_gmii_bm_readreg:	[mii interface function]
   11077  *
   11078  *	Read a PHY register on the kumeran
   11079  * This could be handled by the PHY layer if we didn't have to lock the
   11080  * resource ...
   11081  */
   11082 static int
   11083 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11084 {
   11085 	struct wm_softc *sc = device_private(dev);
   11086 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11087 	int rv;
   11088 
   11089 	if (sc->phy.acquire(sc)) {
   11090 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11091 		return -1;
   11092 	}
   11093 
   11094 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11095 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11096 		    || (reg == 31)) ? 1 : phy;
   11097 	/* Page 800 works differently than the rest so it has its own func */
   11098 	if (page == BM_WUC_PAGE) {
   11099 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11100 		goto release;
   11101 	}
   11102 
   11103 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11104 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11105 		    && (sc->sc_type != WM_T_82583))
   11106 			rv = wm_gmii_mdic_writereg(dev, phy,
   11107 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11108 		else
   11109 			rv = wm_gmii_mdic_writereg(dev, phy,
   11110 			    BME1000_PHY_PAGE_SELECT, page);
   11111 		if (rv != 0)
   11112 			goto release;
   11113 	}
   11114 
   11115 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11116 
   11117 release:
   11118 	sc->phy.release(sc);
   11119 	return rv;
   11120 }
   11121 
   11122 /*
   11123  * wm_gmii_bm_writereg:	[mii interface function]
   11124  *
   11125  *	Write a PHY register on the kumeran.
   11126  * This could be handled by the PHY layer if we didn't have to lock the
   11127  * resource ...
   11128  */
   11129 static int
   11130 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11131 {
   11132 	struct wm_softc *sc = device_private(dev);
   11133 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11134 	int rv;
   11135 
   11136 	if (sc->phy.acquire(sc)) {
   11137 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11138 		return -1;
   11139 	}
   11140 
   11141 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11142 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11143 		    || (reg == 31)) ? 1 : phy;
   11144 	/* Page 800 works differently than the rest so it has its own func */
   11145 	if (page == BM_WUC_PAGE) {
   11146 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11147 		goto release;
   11148 	}
   11149 
   11150 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11151 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11152 		    && (sc->sc_type != WM_T_82583))
   11153 			rv = wm_gmii_mdic_writereg(dev, phy,
   11154 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11155 		else
   11156 			rv = wm_gmii_mdic_writereg(dev, phy,
   11157 			    BME1000_PHY_PAGE_SELECT, page);
   11158 		if (rv != 0)
   11159 			goto release;
   11160 	}
   11161 
   11162 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11163 
   11164 release:
   11165 	sc->phy.release(sc);
   11166 	return rv;
   11167 }
   11168 
   11169 /*
   11170  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11171  *  @dev: pointer to the HW structure
   11172  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11173  *
   11174  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11175  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11176  */
   11177 static int
   11178 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11179 {
   11180 	uint16_t temp;
   11181 	int rv;
   11182 
   11183 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11184 		device_xname(dev), __func__));
   11185 
   11186 	if (!phy_regp)
   11187 		return -1;
   11188 
   11189 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11190 
   11191 	/* Select Port Control Registers page */
   11192 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11193 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11194 	if (rv != 0)
   11195 		return rv;
   11196 
   11197 	/* Read WUCE and save it */
   11198 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11199 	if (rv != 0)
   11200 		return rv;
   11201 
   11202 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11203 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11204 	 */
   11205 	temp = *phy_regp;
   11206 	temp |= BM_WUC_ENABLE_BIT;
   11207 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11208 
   11209 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11210 		return rv;
   11211 
   11212 	/* Select Host Wakeup Registers page - caller now able to write
   11213 	 * registers on the Wakeup registers page
   11214 	 */
   11215 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11216 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11217 }
   11218 
   11219 /*
   11220  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11221  *  @dev: pointer to the HW structure
   11222  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11223  *
   11224  *  Restore BM_WUC_ENABLE_REG to its original value.
   11225  *
   11226  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11227  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11228  *  caller.
   11229  */
   11230 static int
   11231 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11232 {
   11233 
   11234 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11235 		device_xname(dev), __func__));
   11236 
   11237 	if (!phy_regp)
   11238 		return -1;
   11239 
   11240 	/* Select Port Control Registers page */
   11241 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11242 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11243 
   11244 	/* Restore 769.17 to its original value */
   11245 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11246 
   11247 	return 0;
   11248 }
   11249 
   11250 /*
   11251  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11252  *  @sc: pointer to the HW structure
   11253  *  @offset: register offset to be read or written
   11254  *  @val: pointer to the data to read or write
   11255  *  @rd: determines if operation is read or write
   11256  *  @page_set: BM_WUC_PAGE already set and access enabled
   11257  *
   11258  *  Read the PHY register at offset and store the retrieved information in
   11259  *  data, or write data to PHY register at offset.  Note the procedure to
   11260  *  access the PHY wakeup registers is different than reading the other PHY
   11261  *  registers. It works as such:
   11262  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11263  *  2) Set page to 800 for host (801 if we were manageability)
   11264  *  3) Write the address using the address opcode (0x11)
   11265  *  4) Read or write the data using the data opcode (0x12)
   11266  *  5) Restore 769.17.2 to its original value
   11267  *
   11268  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11269  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11270  *
   11271  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11272  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11273  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11274  */
   11275 static int
   11276 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11277 	bool page_set)
   11278 {
   11279 	struct wm_softc *sc = device_private(dev);
   11280 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11281 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11282 	uint16_t wuce;
   11283 	int rv = 0;
   11284 
   11285 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11286 		device_xname(dev), __func__));
   11287 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11288 	if ((sc->sc_type == WM_T_PCH)
   11289 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11290 		device_printf(dev,
   11291 		    "Attempting to access page %d while gig enabled.\n", page);
   11292 	}
   11293 
   11294 	if (!page_set) {
   11295 		/* Enable access to PHY wakeup registers */
   11296 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11297 		if (rv != 0) {
   11298 			device_printf(dev,
   11299 			    "%s: Could not enable PHY wakeup reg access\n",
   11300 			    __func__);
   11301 			return rv;
   11302 		}
   11303 	}
   11304 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11305 		device_xname(sc->sc_dev), __func__, page, regnum));
   11306 
   11307 	/*
   11308 	 * 2) Access PHY wakeup register.
   11309 	 * See wm_access_phy_wakeup_reg_bm.
   11310 	 */
   11311 
   11312 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11313 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11314 	if (rv != 0)
   11315 		return rv;
   11316 
   11317 	if (rd) {
   11318 		/* Read the Wakeup register page value using opcode 0x12 */
   11319 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11320 	} else {
   11321 		/* Write the Wakeup register page value using opcode 0x12 */
   11322 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11323 	}
   11324 	if (rv != 0)
   11325 		return rv;
   11326 
   11327 	if (!page_set)
   11328 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11329 
   11330 	return rv;
   11331 }
   11332 
   11333 /*
   11334  * wm_gmii_hv_readreg:	[mii interface function]
   11335  *
   11336  *	Read a PHY register on the kumeran
   11337  * This could be handled by the PHY layer if we didn't have to lock the
   11338  * resource ...
   11339  */
   11340 static int
   11341 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11342 {
   11343 	struct wm_softc *sc = device_private(dev);
   11344 	int rv;
   11345 
   11346 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11347 		device_xname(dev), __func__));
   11348 	if (sc->phy.acquire(sc)) {
   11349 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11350 		return -1;
   11351 	}
   11352 
   11353 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11354 	sc->phy.release(sc);
   11355 	return rv;
   11356 }
   11357 
   11358 static int
   11359 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11360 {
   11361 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11362 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11363 	int rv;
   11364 
   11365 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11366 
   11367 	/* Page 800 works differently than the rest so it has its own func */
   11368 	if (page == BM_WUC_PAGE)
   11369 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11370 
   11371 	/*
   11372 	 * Lower than page 768 works differently than the rest so it has its
   11373 	 * own func
   11374 	 */
   11375 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11376 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11377 		return -1;
   11378 	}
   11379 
   11380 	/*
   11381 	 * XXX I21[789] documents say that the SMBus Address register is at
   11382 	 * PHY address 01, Page 0 (not 768), Register 26.
   11383 	 */
   11384 	if (page == HV_INTC_FC_PAGE_START)
   11385 		page = 0;
   11386 
   11387 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11388 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11389 		    page << BME1000_PAGE_SHIFT);
   11390 		if (rv != 0)
   11391 			return rv;
   11392 	}
   11393 
   11394 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11395 }
   11396 
   11397 /*
   11398  * wm_gmii_hv_writereg:	[mii interface function]
   11399  *
   11400  *	Write a PHY register on the kumeran.
   11401  * This could be handled by the PHY layer if we didn't have to lock the
   11402  * resource ...
   11403  */
   11404 static int
   11405 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11406 {
   11407 	struct wm_softc *sc = device_private(dev);
   11408 	int rv;
   11409 
   11410 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11411 		device_xname(dev), __func__));
   11412 
   11413 	if (sc->phy.acquire(sc)) {
   11414 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11415 		return -1;
   11416 	}
   11417 
   11418 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11419 	sc->phy.release(sc);
   11420 
   11421 	return rv;
   11422 }
   11423 
   11424 static int
   11425 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11426 {
   11427 	struct wm_softc *sc = device_private(dev);
   11428 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11429 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11430 	int rv;
   11431 
   11432 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11433 
   11434 	/* Page 800 works differently than the rest so it has its own func */
   11435 	if (page == BM_WUC_PAGE)
   11436 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11437 		    false);
   11438 
   11439 	/*
   11440 	 * Lower than page 768 works differently than the rest so it has its
   11441 	 * own func
   11442 	 */
   11443 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11444 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11445 		return -1;
   11446 	}
   11447 
   11448 	{
   11449 		/*
   11450 		 * XXX I21[789] documents say that the SMBus Address register
   11451 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11452 		 */
   11453 		if (page == HV_INTC_FC_PAGE_START)
   11454 			page = 0;
   11455 
   11456 		/*
   11457 		 * XXX Workaround MDIO accesses being disabled after entering
   11458 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11459 		 * register is set)
   11460 		 */
   11461 		if (sc->sc_phytype == WMPHY_82578) {
   11462 			struct mii_softc *child;
   11463 
   11464 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11465 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11466 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11467 			    && ((val & (1 << 11)) != 0)) {
   11468 				device_printf(dev, "XXX need workaround\n");
   11469 			}
   11470 		}
   11471 
   11472 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11473 			rv = wm_gmii_mdic_writereg(dev, 1,
   11474 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11475 			if (rv != 0)
   11476 				return rv;
   11477 		}
   11478 	}
   11479 
   11480 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11481 }
   11482 
   11483 /*
   11484  * wm_gmii_82580_readreg:	[mii interface function]
   11485  *
   11486  *	Read a PHY register on the 82580 and I350.
   11487  * This could be handled by the PHY layer if we didn't have to lock the
   11488  * resource ...
   11489  */
   11490 static int
   11491 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11492 {
   11493 	struct wm_softc *sc = device_private(dev);
   11494 	int rv;
   11495 
   11496 	if (sc->phy.acquire(sc) != 0) {
   11497 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11498 		return -1;
   11499 	}
   11500 
   11501 #ifdef DIAGNOSTIC
   11502 	if (reg > MII_ADDRMASK) {
   11503 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11504 		    __func__, sc->sc_phytype, reg);
   11505 		reg &= MII_ADDRMASK;
   11506 	}
   11507 #endif
   11508 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11509 
   11510 	sc->phy.release(sc);
   11511 	return rv;
   11512 }
   11513 
   11514 /*
   11515  * wm_gmii_82580_writereg:	[mii interface function]
   11516  *
   11517  *	Write a PHY register on the 82580 and I350.
   11518  * This could be handled by the PHY layer if we didn't have to lock the
   11519  * resource ...
   11520  */
   11521 static int
   11522 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11523 {
   11524 	struct wm_softc *sc = device_private(dev);
   11525 	int rv;
   11526 
   11527 	if (sc->phy.acquire(sc) != 0) {
   11528 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11529 		return -1;
   11530 	}
   11531 
   11532 #ifdef DIAGNOSTIC
   11533 	if (reg > MII_ADDRMASK) {
   11534 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11535 		    __func__, sc->sc_phytype, reg);
   11536 		reg &= MII_ADDRMASK;
   11537 	}
   11538 #endif
   11539 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11540 
   11541 	sc->phy.release(sc);
   11542 	return rv;
   11543 }
   11544 
   11545 /*
   11546  * wm_gmii_gs40g_readreg:	[mii interface function]
   11547  *
   11548  *	Read a PHY register on the I2100 and I211.
   11549  * This could be handled by the PHY layer if we didn't have to lock the
   11550  * resource ...
   11551  */
   11552 static int
   11553 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11554 {
   11555 	struct wm_softc *sc = device_private(dev);
   11556 	int page, offset;
   11557 	int rv;
   11558 
   11559 	/* Acquire semaphore */
   11560 	if (sc->phy.acquire(sc)) {
   11561 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11562 		return -1;
   11563 	}
   11564 
   11565 	/* Page select */
   11566 	page = reg >> GS40G_PAGE_SHIFT;
   11567 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11568 	if (rv != 0)
   11569 		goto release;
   11570 
   11571 	/* Read reg */
   11572 	offset = reg & GS40G_OFFSET_MASK;
   11573 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11574 
   11575 release:
   11576 	sc->phy.release(sc);
   11577 	return rv;
   11578 }
   11579 
   11580 /*
   11581  * wm_gmii_gs40g_writereg:	[mii interface function]
   11582  *
   11583  *	Write a PHY register on the I210 and I211.
   11584  * This could be handled by the PHY layer if we didn't have to lock the
   11585  * resource ...
   11586  */
   11587 static int
   11588 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11589 {
   11590 	struct wm_softc *sc = device_private(dev);
   11591 	uint16_t page;
   11592 	int offset, rv;
   11593 
   11594 	/* Acquire semaphore */
   11595 	if (sc->phy.acquire(sc)) {
   11596 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11597 		return -1;
   11598 	}
   11599 
   11600 	/* Page select */
   11601 	page = reg >> GS40G_PAGE_SHIFT;
   11602 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11603 	if (rv != 0)
   11604 		goto release;
   11605 
   11606 	/* Write reg */
   11607 	offset = reg & GS40G_OFFSET_MASK;
   11608 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11609 
   11610 release:
   11611 	/* Release semaphore */
   11612 	sc->phy.release(sc);
   11613 	return rv;
   11614 }
   11615 
   11616 /*
   11617  * wm_gmii_statchg:	[mii interface function]
   11618  *
   11619  *	Callback from MII layer when media changes.
   11620  */
   11621 static void
   11622 wm_gmii_statchg(struct ifnet *ifp)
   11623 {
   11624 	struct wm_softc *sc = ifp->if_softc;
   11625 	struct mii_data *mii = &sc->sc_mii;
   11626 
   11627 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11628 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11629 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11630 
   11631 	/* Get flow control negotiation result. */
   11632 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11633 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11634 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11635 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11636 	}
   11637 
   11638 	if (sc->sc_flowflags & IFM_FLOW) {
   11639 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11640 			sc->sc_ctrl |= CTRL_TFCE;
   11641 			sc->sc_fcrtl |= FCRTL_XONE;
   11642 		}
   11643 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11644 			sc->sc_ctrl |= CTRL_RFCE;
   11645 	}
   11646 
   11647 	if (mii->mii_media_active & IFM_FDX) {
   11648 		DPRINTF(WM_DEBUG_LINK,
   11649 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11650 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11651 	} else {
   11652 		DPRINTF(WM_DEBUG_LINK,
   11653 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11654 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11655 	}
   11656 
   11657 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11658 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11659 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11660 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11661 	if (sc->sc_type == WM_T_80003) {
   11662 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   11663 		case IFM_1000_T:
   11664 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11665 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11666 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11667 			break;
   11668 		default:
   11669 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11670 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11671 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11672 			break;
   11673 		}
   11674 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11675 	}
   11676 }
   11677 
   11678 /* kumeran related (80003, ICH* and PCH*) */
   11679 
   11680 /*
   11681  * wm_kmrn_readreg:
   11682  *
   11683  *	Read a kumeran register
   11684  */
   11685 static int
   11686 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11687 {
   11688 	int rv;
   11689 
   11690 	if (sc->sc_type == WM_T_80003)
   11691 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11692 	else
   11693 		rv = sc->phy.acquire(sc);
   11694 	if (rv != 0) {
   11695 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11696 		    __func__);
   11697 		return rv;
   11698 	}
   11699 
   11700 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11701 
   11702 	if (sc->sc_type == WM_T_80003)
   11703 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11704 	else
   11705 		sc->phy.release(sc);
   11706 
   11707 	return rv;
   11708 }
   11709 
   11710 static int
   11711 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11712 {
   11713 
   11714 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11715 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11716 	    KUMCTRLSTA_REN);
   11717 	CSR_WRITE_FLUSH(sc);
   11718 	delay(2);
   11719 
   11720 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11721 
   11722 	return 0;
   11723 }
   11724 
   11725 /*
   11726  * wm_kmrn_writereg:
   11727  *
   11728  *	Write a kumeran register
   11729  */
   11730 static int
   11731 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11732 {
   11733 	int rv;
   11734 
   11735 	if (sc->sc_type == WM_T_80003)
   11736 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11737 	else
   11738 		rv = sc->phy.acquire(sc);
   11739 	if (rv != 0) {
   11740 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11741 		    __func__);
   11742 		return rv;
   11743 	}
   11744 
   11745 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11746 
   11747 	if (sc->sc_type == WM_T_80003)
   11748 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11749 	else
   11750 		sc->phy.release(sc);
   11751 
   11752 	return rv;
   11753 }
   11754 
   11755 static int
   11756 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11757 {
   11758 
   11759 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11760 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11761 
   11762 	return 0;
   11763 }
   11764 
   11765 /*
   11766  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11767  * This access method is different from IEEE MMD.
   11768  */
   11769 static int
   11770 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11771 {
   11772 	struct wm_softc *sc = device_private(dev);
   11773 	int rv;
   11774 
   11775 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11776 	if (rv != 0)
   11777 		return rv;
   11778 
   11779 	if (rd)
   11780 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11781 	else
   11782 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11783 	return rv;
   11784 }
   11785 
   11786 static int
   11787 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11788 {
   11789 
   11790 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11791 }
   11792 
   11793 static int
   11794 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11795 {
   11796 
   11797 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11798 }
   11799 
   11800 /* SGMII related */
   11801 
   11802 /*
   11803  * wm_sgmii_uses_mdio
   11804  *
   11805  * Check whether the transaction is to the internal PHY or the external
   11806  * MDIO interface. Return true if it's MDIO.
   11807  */
   11808 static bool
   11809 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11810 {
   11811 	uint32_t reg;
   11812 	bool ismdio = false;
   11813 
   11814 	switch (sc->sc_type) {
   11815 	case WM_T_82575:
   11816 	case WM_T_82576:
   11817 		reg = CSR_READ(sc, WMREG_MDIC);
   11818 		ismdio = ((reg & MDIC_DEST) != 0);
   11819 		break;
   11820 	case WM_T_82580:
   11821 	case WM_T_I350:
   11822 	case WM_T_I354:
   11823 	case WM_T_I210:
   11824 	case WM_T_I211:
   11825 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11826 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11827 		break;
   11828 	default:
   11829 		break;
   11830 	}
   11831 
   11832 	return ismdio;
   11833 }
   11834 
   11835 /* Setup internal SGMII PHY for SFP */
   11836 static void
   11837 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   11838 {
   11839 	uint16_t id1, id2, phyreg;
   11840 	int i, rv;
   11841 
   11842 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   11843 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   11844 		return;
   11845 
   11846 	for (i = 0; i < MII_NPHY; i++) {
   11847 		sc->phy.no_errprint = true;
   11848 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   11849 		if (rv != 0)
   11850 			continue;
   11851 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   11852 		if (rv != 0)
   11853 			continue;
   11854 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   11855 			continue;
   11856 		sc->phy.no_errprint = false;
   11857 
   11858 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   11859 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   11860 		phyreg |= ESSR_SGMII_WOC_COPPER;
   11861 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   11862 		break;
   11863 	}
   11864 
   11865 }
   11866 
   11867 /*
   11868  * wm_sgmii_readreg:	[mii interface function]
   11869  *
   11870  *	Read a PHY register on the SGMII
   11871  * This could be handled by the PHY layer if we didn't have to lock the
   11872  * resource ...
   11873  */
   11874 static int
   11875 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11876 {
   11877 	struct wm_softc *sc = device_private(dev);
   11878 	int rv;
   11879 
   11880 	if (sc->phy.acquire(sc)) {
   11881 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11882 		return -1;
   11883 	}
   11884 
   11885 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11886 
   11887 	sc->phy.release(sc);
   11888 	return rv;
   11889 }
   11890 
   11891 static int
   11892 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11893 {
   11894 	struct wm_softc *sc = device_private(dev);
   11895 	uint32_t i2ccmd;
   11896 	int i, rv = 0;
   11897 
   11898 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11899 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11900 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11901 
   11902 	/* Poll the ready bit */
   11903 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11904 		delay(50);
   11905 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11906 		if (i2ccmd & I2CCMD_READY)
   11907 			break;
   11908 	}
   11909 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11910 		device_printf(dev, "I2CCMD Read did not complete\n");
   11911 		rv = ETIMEDOUT;
   11912 	}
   11913 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11914 		if (!sc->phy.no_errprint)
   11915 			device_printf(dev, "I2CCMD Error bit set\n");
   11916 		rv = EIO;
   11917 	}
   11918 
   11919 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11920 
   11921 	return rv;
   11922 }
   11923 
   11924 /*
   11925  * wm_sgmii_writereg:	[mii interface function]
   11926  *
   11927  *	Write a PHY register on the SGMII.
   11928  * This could be handled by the PHY layer if we didn't have to lock the
   11929  * resource ...
   11930  */
   11931 static int
   11932 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11933 {
   11934 	struct wm_softc *sc = device_private(dev);
   11935 	int rv;
   11936 
   11937 	if (sc->phy.acquire(sc) != 0) {
   11938 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11939 		return -1;
   11940 	}
   11941 
   11942 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11943 
   11944 	sc->phy.release(sc);
   11945 
   11946 	return rv;
   11947 }
   11948 
   11949 static int
   11950 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11951 {
   11952 	struct wm_softc *sc = device_private(dev);
   11953 	uint32_t i2ccmd;
   11954 	uint16_t swapdata;
   11955 	int rv = 0;
   11956 	int i;
   11957 
   11958 	/* Swap the data bytes for the I2C interface */
   11959 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11960 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11961 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11962 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11963 
   11964 	/* Poll the ready bit */
   11965 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11966 		delay(50);
   11967 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11968 		if (i2ccmd & I2CCMD_READY)
   11969 			break;
   11970 	}
   11971 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11972 		device_printf(dev, "I2CCMD Write did not complete\n");
   11973 		rv = ETIMEDOUT;
   11974 	}
   11975 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11976 		device_printf(dev, "I2CCMD Error bit set\n");
   11977 		rv = EIO;
   11978 	}
   11979 
   11980 	return rv;
   11981 }
   11982 
   11983 /* TBI related */
   11984 
   11985 static bool
   11986 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11987 {
   11988 	bool sig;
   11989 
   11990 	sig = ctrl & CTRL_SWDPIN(1);
   11991 
   11992 	/*
   11993 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11994 	 * detect a signal, 1 if they don't.
   11995 	 */
   11996 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11997 		sig = !sig;
   11998 
   11999 	return sig;
   12000 }
   12001 
   12002 /*
   12003  * wm_tbi_mediainit:
   12004  *
   12005  *	Initialize media for use on 1000BASE-X devices.
   12006  */
   12007 static void
   12008 wm_tbi_mediainit(struct wm_softc *sc)
   12009 {
   12010 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12011 	const char *sep = "";
   12012 
   12013 	if (sc->sc_type < WM_T_82543)
   12014 		sc->sc_tipg = TIPG_WM_DFLT;
   12015 	else
   12016 		sc->sc_tipg = TIPG_LG_DFLT;
   12017 
   12018 	sc->sc_tbi_serdes_anegticks = 5;
   12019 
   12020 	/* Initialize our media structures */
   12021 	sc->sc_mii.mii_ifp = ifp;
   12022 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12023 
   12024 	ifp->if_baudrate = IF_Gbps(1);
   12025 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12026 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12027 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12028 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12029 		    sc->sc_core_lock);
   12030 	} else {
   12031 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12032 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12033 	}
   12034 
   12035 	/*
   12036 	 * SWD Pins:
   12037 	 *
   12038 	 *	0 = Link LED (output)
   12039 	 *	1 = Loss Of Signal (input)
   12040 	 */
   12041 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12042 
   12043 	/* XXX Perhaps this is only for TBI */
   12044 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12045 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12046 
   12047 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12048 		sc->sc_ctrl &= ~CTRL_LRST;
   12049 
   12050 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12051 
   12052 #define	ADD(ss, mm, dd)							\
   12053 do {									\
   12054 	aprint_normal("%s%s", sep, ss);					\
   12055 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12056 	sep = ", ";							\
   12057 } while (/*CONSTCOND*/0)
   12058 
   12059 	aprint_normal_dev(sc->sc_dev, "");
   12060 
   12061 	if (sc->sc_type == WM_T_I354) {
   12062 		uint32_t status;
   12063 
   12064 		status = CSR_READ(sc, WMREG_STATUS);
   12065 		if (((status & STATUS_2P5_SKU) != 0)
   12066 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12067 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12068 		} else
   12069 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12070 	} else if (sc->sc_type == WM_T_82545) {
   12071 		/* Only 82545 is LX (XXX except SFP) */
   12072 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12073 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12074 	} else if (sc->sc_sfptype != 0) {
   12075 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12076 		switch (sc->sc_sfptype) {
   12077 		default:
   12078 		case SFF_SFP_ETH_FLAGS_1000SX:
   12079 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12080 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12081 			break;
   12082 		case SFF_SFP_ETH_FLAGS_1000LX:
   12083 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12084 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12085 			break;
   12086 		case SFF_SFP_ETH_FLAGS_1000CX:
   12087 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12088 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12089 			break;
   12090 		case SFF_SFP_ETH_FLAGS_1000T:
   12091 			ADD("1000baseT", IFM_1000_T, 0);
   12092 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12093 			break;
   12094 		case SFF_SFP_ETH_FLAGS_100FX:
   12095 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12096 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12097 			break;
   12098 		}
   12099 	} else {
   12100 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12101 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12102 	}
   12103 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12104 	aprint_normal("\n");
   12105 
   12106 #undef ADD
   12107 
   12108 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12109 }
   12110 
   12111 /*
   12112  * wm_tbi_mediachange:	[ifmedia interface function]
   12113  *
   12114  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12115  */
   12116 static int
   12117 wm_tbi_mediachange(struct ifnet *ifp)
   12118 {
   12119 	struct wm_softc *sc = ifp->if_softc;
   12120 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12121 	uint32_t status, ctrl;
   12122 	bool signal;
   12123 	int i;
   12124 
   12125 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12126 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12127 		/* XXX need some work for >= 82571 and < 82575 */
   12128 		if (sc->sc_type < WM_T_82575)
   12129 			return 0;
   12130 	}
   12131 
   12132 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12133 	    || (sc->sc_type >= WM_T_82575))
   12134 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12135 
   12136 	sc->sc_ctrl &= ~CTRL_LRST;
   12137 	sc->sc_txcw = TXCW_ANE;
   12138 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12139 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12140 	else if (ife->ifm_media & IFM_FDX)
   12141 		sc->sc_txcw |= TXCW_FD;
   12142 	else
   12143 		sc->sc_txcw |= TXCW_HD;
   12144 
   12145 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12146 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12147 
   12148 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12149 		device_xname(sc->sc_dev), sc->sc_txcw));
   12150 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12151 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12152 	CSR_WRITE_FLUSH(sc);
   12153 	delay(1000);
   12154 
   12155 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12156 	signal = wm_tbi_havesignal(sc, ctrl);
   12157 
   12158 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   12159 		signal));
   12160 
   12161 	if (signal) {
   12162 		/* Have signal; wait for the link to come up. */
   12163 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12164 			delay(10000);
   12165 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12166 				break;
   12167 		}
   12168 
   12169 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   12170 			device_xname(sc->sc_dev), i));
   12171 
   12172 		status = CSR_READ(sc, WMREG_STATUS);
   12173 		DPRINTF(WM_DEBUG_LINK,
   12174 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   12175 			device_xname(sc->sc_dev), status, STATUS_LU));
   12176 		if (status & STATUS_LU) {
   12177 			/* Link is up. */
   12178 			DPRINTF(WM_DEBUG_LINK,
   12179 			    ("%s: LINK: set media -> link up %s\n",
   12180 				device_xname(sc->sc_dev),
   12181 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12182 
   12183 			/*
   12184 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12185 			 * so we should update sc->sc_ctrl
   12186 			 */
   12187 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12188 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12189 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12190 			if (status & STATUS_FD)
   12191 				sc->sc_tctl |=
   12192 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12193 			else
   12194 				sc->sc_tctl |=
   12195 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12196 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12197 				sc->sc_fcrtl |= FCRTL_XONE;
   12198 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12199 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12200 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12201 			sc->sc_tbi_linkup = 1;
   12202 		} else {
   12203 			if (i == WM_LINKUP_TIMEOUT)
   12204 				wm_check_for_link(sc);
   12205 			/* Link is down. */
   12206 			DPRINTF(WM_DEBUG_LINK,
   12207 			    ("%s: LINK: set media -> link down\n",
   12208 				device_xname(sc->sc_dev)));
   12209 			sc->sc_tbi_linkup = 0;
   12210 		}
   12211 	} else {
   12212 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   12213 			device_xname(sc->sc_dev)));
   12214 		sc->sc_tbi_linkup = 0;
   12215 	}
   12216 
   12217 	wm_tbi_serdes_set_linkled(sc);
   12218 
   12219 	return 0;
   12220 }
   12221 
   12222 /*
   12223  * wm_tbi_mediastatus:	[ifmedia interface function]
   12224  *
   12225  *	Get the current interface media status on a 1000BASE-X device.
   12226  */
   12227 static void
   12228 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12229 {
   12230 	struct wm_softc *sc = ifp->if_softc;
   12231 	uint32_t ctrl, status;
   12232 
   12233 	ifmr->ifm_status = IFM_AVALID;
   12234 	ifmr->ifm_active = IFM_ETHER;
   12235 
   12236 	status = CSR_READ(sc, WMREG_STATUS);
   12237 	if ((status & STATUS_LU) == 0) {
   12238 		ifmr->ifm_active |= IFM_NONE;
   12239 		return;
   12240 	}
   12241 
   12242 	ifmr->ifm_status |= IFM_ACTIVE;
   12243 	/* Only 82545 is LX */
   12244 	if (sc->sc_type == WM_T_82545)
   12245 		ifmr->ifm_active |= IFM_1000_LX;
   12246 	else
   12247 		ifmr->ifm_active |= IFM_1000_SX;
   12248 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   12249 		ifmr->ifm_active |= IFM_FDX;
   12250 	else
   12251 		ifmr->ifm_active |= IFM_HDX;
   12252 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12253 	if (ctrl & CTRL_RFCE)
   12254 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   12255 	if (ctrl & CTRL_TFCE)
   12256 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   12257 }
   12258 
   12259 /* XXX TBI only */
   12260 static int
   12261 wm_check_for_link(struct wm_softc *sc)
   12262 {
   12263 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12264 	uint32_t rxcw;
   12265 	uint32_t ctrl;
   12266 	uint32_t status;
   12267 	bool signal;
   12268 
   12269 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   12270 		device_xname(sc->sc_dev), __func__));
   12271 
   12272 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12273 		/* XXX need some work for >= 82571 */
   12274 		if (sc->sc_type >= WM_T_82571) {
   12275 			sc->sc_tbi_linkup = 1;
   12276 			return 0;
   12277 		}
   12278 	}
   12279 
   12280 	rxcw = CSR_READ(sc, WMREG_RXCW);
   12281 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12282 	status = CSR_READ(sc, WMREG_STATUS);
   12283 	signal = wm_tbi_havesignal(sc, ctrl);
   12284 
   12285 	DPRINTF(WM_DEBUG_LINK,
   12286 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   12287 		device_xname(sc->sc_dev), __func__, signal,
   12288 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   12289 
   12290 	/*
   12291 	 * SWDPIN   LU RXCW
   12292 	 *	0    0	  0
   12293 	 *	0    0	  1	(should not happen)
   12294 	 *	0    1	  0	(should not happen)
   12295 	 *	0    1	  1	(should not happen)
   12296 	 *	1    0	  0	Disable autonego and force linkup
   12297 	 *	1    0	  1	got /C/ but not linkup yet
   12298 	 *	1    1	  0	(linkup)
   12299 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12300 	 *
   12301 	 */
   12302 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12303 		DPRINTF(WM_DEBUG_LINK,
   12304 		    ("%s: %s: force linkup and fullduplex\n",
   12305 			device_xname(sc->sc_dev), __func__));
   12306 		sc->sc_tbi_linkup = 0;
   12307 		/* Disable auto-negotiation in the TXCW register */
   12308 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12309 
   12310 		/*
   12311 		 * Force link-up and also force full-duplex.
   12312 		 *
   12313 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12314 		 * so we should update sc->sc_ctrl
   12315 		 */
   12316 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12317 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12318 	} else if (((status & STATUS_LU) != 0)
   12319 	    && ((rxcw & RXCW_C) != 0)
   12320 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12321 		sc->sc_tbi_linkup = 1;
   12322 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12323 			device_xname(sc->sc_dev),
   12324 			__func__));
   12325 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12326 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12327 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12328 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   12329 			device_xname(sc->sc_dev), __func__));
   12330 	} else {
   12331 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12332 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12333 			status));
   12334 	}
   12335 
   12336 	return 0;
   12337 }
   12338 
   12339 /*
   12340  * wm_tbi_tick:
   12341  *
   12342  *	Check the link on TBI devices.
   12343  *	This function acts as mii_tick().
   12344  */
   12345 static void
   12346 wm_tbi_tick(struct wm_softc *sc)
   12347 {
   12348 	struct mii_data *mii = &sc->sc_mii;
   12349 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12350 	uint32_t status;
   12351 
   12352 	KASSERT(WM_CORE_LOCKED(sc));
   12353 
   12354 	status = CSR_READ(sc, WMREG_STATUS);
   12355 
   12356 	/* XXX is this needed? */
   12357 	(void)CSR_READ(sc, WMREG_RXCW);
   12358 	(void)CSR_READ(sc, WMREG_CTRL);
   12359 
   12360 	/* set link status */
   12361 	if ((status & STATUS_LU) == 0) {
   12362 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12363 			device_xname(sc->sc_dev)));
   12364 		sc->sc_tbi_linkup = 0;
   12365 	} else if (sc->sc_tbi_linkup == 0) {
   12366 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12367 			device_xname(sc->sc_dev),
   12368 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12369 		sc->sc_tbi_linkup = 1;
   12370 		sc->sc_tbi_serdes_ticks = 0;
   12371 	}
   12372 
   12373 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12374 		goto setled;
   12375 
   12376 	if ((status & STATUS_LU) == 0) {
   12377 		sc->sc_tbi_linkup = 0;
   12378 		/* If the timer expired, retry autonegotiation */
   12379 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12380 		    && (++sc->sc_tbi_serdes_ticks
   12381 			>= sc->sc_tbi_serdes_anegticks)) {
   12382 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12383 				device_xname(sc->sc_dev), __func__));
   12384 			sc->sc_tbi_serdes_ticks = 0;
   12385 			/*
   12386 			 * Reset the link, and let autonegotiation do
   12387 			 * its thing
   12388 			 */
   12389 			sc->sc_ctrl |= CTRL_LRST;
   12390 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12391 			CSR_WRITE_FLUSH(sc);
   12392 			delay(1000);
   12393 			sc->sc_ctrl &= ~CTRL_LRST;
   12394 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12395 			CSR_WRITE_FLUSH(sc);
   12396 			delay(1000);
   12397 			CSR_WRITE(sc, WMREG_TXCW,
   12398 			    sc->sc_txcw & ~TXCW_ANE);
   12399 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12400 		}
   12401 	}
   12402 
   12403 setled:
   12404 	wm_tbi_serdes_set_linkled(sc);
   12405 }
   12406 
   12407 /* SERDES related */
   12408 static void
   12409 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12410 {
   12411 	uint32_t reg;
   12412 
   12413 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12414 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12415 		return;
   12416 
   12417 	/* Enable PCS to turn on link */
   12418 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12419 	reg |= PCS_CFG_PCS_EN;
   12420 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12421 
   12422 	/* Power up the laser */
   12423 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12424 	reg &= ~CTRL_EXT_SWDPIN(3);
   12425 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12426 
   12427 	/* Flush the write to verify completion */
   12428 	CSR_WRITE_FLUSH(sc);
   12429 	delay(1000);
   12430 }
   12431 
   12432 static int
   12433 wm_serdes_mediachange(struct ifnet *ifp)
   12434 {
   12435 	struct wm_softc *sc = ifp->if_softc;
   12436 	bool pcs_autoneg = true; /* XXX */
   12437 	uint32_t ctrl_ext, pcs_lctl, reg;
   12438 
   12439 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12440 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12441 		return 0;
   12442 
   12443 	/* XXX Currently, this function is not called on 8257[12] */
   12444 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12445 	    || (sc->sc_type >= WM_T_82575))
   12446 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12447 
   12448 	/* Power on the sfp cage if present */
   12449 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12450 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12451 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   12452 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12453 
   12454 	sc->sc_ctrl |= CTRL_SLU;
   12455 
   12456 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   12457 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12458 
   12459 		reg = CSR_READ(sc, WMREG_CONNSW);
   12460 		reg |= CONNSW_ENRGSRC;
   12461 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   12462 	}
   12463 
   12464 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12465 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12466 	case CTRL_EXT_LINK_MODE_SGMII:
   12467 		/* SGMII mode lets the phy handle forcing speed/duplex */
   12468 		pcs_autoneg = true;
   12469 		/* Autoneg time out should be disabled for SGMII mode */
   12470 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12471 		break;
   12472 	case CTRL_EXT_LINK_MODE_1000KX:
   12473 		pcs_autoneg = false;
   12474 		/* FALLTHROUGH */
   12475 	default:
   12476 		if ((sc->sc_type == WM_T_82575)
   12477 		    || (sc->sc_type == WM_T_82576)) {
   12478 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12479 				pcs_autoneg = false;
   12480 		}
   12481 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12482 		    | CTRL_FRCFDX;
   12483 
   12484 		/* Set speed of 1000/Full if speed/duplex is forced */
   12485 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12486 	}
   12487 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12488 
   12489 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   12490 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   12491 
   12492 	if (pcs_autoneg) {
   12493 		/* Set PCS register for autoneg */
   12494 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12495 
   12496 		/* Disable force flow control for autoneg */
   12497 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12498 
   12499 		/* Configure flow control advertisement for autoneg */
   12500 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12501 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12502 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12503 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12504 	} else
   12505 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12506 
   12507 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12508 
   12509 	return 0;
   12510 }
   12511 
   12512 static void
   12513 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12514 {
   12515 	struct wm_softc *sc = ifp->if_softc;
   12516 	struct mii_data *mii = &sc->sc_mii;
   12517 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12518 	uint32_t pcs_adv, pcs_lpab, reg;
   12519 
   12520 	ifmr->ifm_status = IFM_AVALID;
   12521 	ifmr->ifm_active = IFM_ETHER;
   12522 
   12523 	/* Check PCS */
   12524 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12525 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12526 		ifmr->ifm_active |= IFM_NONE;
   12527 		sc->sc_tbi_linkup = 0;
   12528 		goto setled;
   12529 	}
   12530 
   12531 	sc->sc_tbi_linkup = 1;
   12532 	ifmr->ifm_status |= IFM_ACTIVE;
   12533 	if (sc->sc_type == WM_T_I354) {
   12534 		uint32_t status;
   12535 
   12536 		status = CSR_READ(sc, WMREG_STATUS);
   12537 		if (((status & STATUS_2P5_SKU) != 0)
   12538 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12539 			ifmr->ifm_active |= IFM_2500_KX;
   12540 		} else
   12541 			ifmr->ifm_active |= IFM_1000_KX;
   12542 	} else {
   12543 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12544 		case PCS_LSTS_SPEED_10:
   12545 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12546 			break;
   12547 		case PCS_LSTS_SPEED_100:
   12548 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12549 			break;
   12550 		case PCS_LSTS_SPEED_1000:
   12551 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12552 			break;
   12553 		default:
   12554 			device_printf(sc->sc_dev, "Unknown speed\n");
   12555 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12556 			break;
   12557 		}
   12558 	}
   12559 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   12560 	if ((reg & PCS_LSTS_FDX) != 0)
   12561 		ifmr->ifm_active |= IFM_FDX;
   12562 	else
   12563 		ifmr->ifm_active |= IFM_HDX;
   12564 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12565 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12566 		/* Check flow */
   12567 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12568 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12569 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12570 			goto setled;
   12571 		}
   12572 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12573 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12574 		DPRINTF(WM_DEBUG_LINK,
   12575 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12576 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12577 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12578 			mii->mii_media_active |= IFM_FLOW
   12579 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12580 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12581 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12582 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12583 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12584 			mii->mii_media_active |= IFM_FLOW
   12585 			    | IFM_ETH_TXPAUSE;
   12586 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12587 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12588 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12589 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12590 			mii->mii_media_active |= IFM_FLOW
   12591 			    | IFM_ETH_RXPAUSE;
   12592 		}
   12593 	}
   12594 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12595 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12596 setled:
   12597 	wm_tbi_serdes_set_linkled(sc);
   12598 }
   12599 
   12600 /*
   12601  * wm_serdes_tick:
   12602  *
   12603  *	Check the link on serdes devices.
   12604  */
   12605 static void
   12606 wm_serdes_tick(struct wm_softc *sc)
   12607 {
   12608 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12609 	struct mii_data *mii = &sc->sc_mii;
   12610 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12611 	uint32_t reg;
   12612 
   12613 	KASSERT(WM_CORE_LOCKED(sc));
   12614 
   12615 	mii->mii_media_status = IFM_AVALID;
   12616 	mii->mii_media_active = IFM_ETHER;
   12617 
   12618 	/* Check PCS */
   12619 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12620 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12621 		mii->mii_media_status |= IFM_ACTIVE;
   12622 		sc->sc_tbi_linkup = 1;
   12623 		sc->sc_tbi_serdes_ticks = 0;
   12624 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12625 		if ((reg & PCS_LSTS_FDX) != 0)
   12626 			mii->mii_media_active |= IFM_FDX;
   12627 		else
   12628 			mii->mii_media_active |= IFM_HDX;
   12629 	} else {
   12630 		mii->mii_media_status |= IFM_NONE;
   12631 		sc->sc_tbi_linkup = 0;
   12632 		/* If the timer expired, retry autonegotiation */
   12633 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12634 		    && (++sc->sc_tbi_serdes_ticks
   12635 			>= sc->sc_tbi_serdes_anegticks)) {
   12636 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12637 				device_xname(sc->sc_dev), __func__));
   12638 			sc->sc_tbi_serdes_ticks = 0;
   12639 			/* XXX */
   12640 			wm_serdes_mediachange(ifp);
   12641 		}
   12642 	}
   12643 
   12644 	wm_tbi_serdes_set_linkled(sc);
   12645 }
   12646 
   12647 /* SFP related */
   12648 
   12649 static int
   12650 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12651 {
   12652 	uint32_t i2ccmd;
   12653 	int i;
   12654 
   12655 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12656 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12657 
   12658 	/* Poll the ready bit */
   12659 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12660 		delay(50);
   12661 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12662 		if (i2ccmd & I2CCMD_READY)
   12663 			break;
   12664 	}
   12665 	if ((i2ccmd & I2CCMD_READY) == 0)
   12666 		return -1;
   12667 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12668 		return -1;
   12669 
   12670 	*data = i2ccmd & 0x00ff;
   12671 
   12672 	return 0;
   12673 }
   12674 
   12675 static uint32_t
   12676 wm_sfp_get_media_type(struct wm_softc *sc)
   12677 {
   12678 	uint32_t ctrl_ext;
   12679 	uint8_t val = 0;
   12680 	int timeout = 3;
   12681 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12682 	int rv = -1;
   12683 
   12684 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12685 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12686 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12687 	CSR_WRITE_FLUSH(sc);
   12688 
   12689 	/* Read SFP module data */
   12690 	while (timeout) {
   12691 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12692 		if (rv == 0)
   12693 			break;
   12694 		delay(100*1000); /* XXX too big */
   12695 		timeout--;
   12696 	}
   12697 	if (rv != 0)
   12698 		goto out;
   12699 
   12700 	switch (val) {
   12701 	case SFF_SFP_ID_SFF:
   12702 		aprint_normal_dev(sc->sc_dev,
   12703 		    "Module/Connector soldered to board\n");
   12704 		break;
   12705 	case SFF_SFP_ID_SFP:
   12706 		sc->sc_flags |= WM_F_SFP;
   12707 		break;
   12708 	case SFF_SFP_ID_UNKNOWN:
   12709 		goto out;
   12710 	default:
   12711 		break;
   12712 	}
   12713 
   12714 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12715 	if (rv != 0)
   12716 		goto out;
   12717 
   12718 	sc->sc_sfptype = val;
   12719 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12720 		mediatype = WM_MEDIATYPE_SERDES;
   12721 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12722 		sc->sc_flags |= WM_F_SGMII;
   12723 		mediatype = WM_MEDIATYPE_COPPER;
   12724 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12725 		sc->sc_flags |= WM_F_SGMII;
   12726 		mediatype = WM_MEDIATYPE_SERDES;
   12727 	} else {
   12728 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   12729 		    __func__, sc->sc_sfptype);
   12730 		sc->sc_sfptype = 0; /* XXX unknown */
   12731 	}
   12732 
   12733 out:
   12734 	/* Restore I2C interface setting */
   12735 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12736 
   12737 	return mediatype;
   12738 }
   12739 
   12740 /*
   12741  * NVM related.
   12742  * Microwire, SPI (w/wo EERD) and Flash.
   12743  */
   12744 
   12745 /* Both spi and uwire */
   12746 
   12747 /*
   12748  * wm_eeprom_sendbits:
   12749  *
   12750  *	Send a series of bits to the EEPROM.
   12751  */
   12752 static void
   12753 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12754 {
   12755 	uint32_t reg;
   12756 	int x;
   12757 
   12758 	reg = CSR_READ(sc, WMREG_EECD);
   12759 
   12760 	for (x = nbits; x > 0; x--) {
   12761 		if (bits & (1U << (x - 1)))
   12762 			reg |= EECD_DI;
   12763 		else
   12764 			reg &= ~EECD_DI;
   12765 		CSR_WRITE(sc, WMREG_EECD, reg);
   12766 		CSR_WRITE_FLUSH(sc);
   12767 		delay(2);
   12768 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12769 		CSR_WRITE_FLUSH(sc);
   12770 		delay(2);
   12771 		CSR_WRITE(sc, WMREG_EECD, reg);
   12772 		CSR_WRITE_FLUSH(sc);
   12773 		delay(2);
   12774 	}
   12775 }
   12776 
   12777 /*
   12778  * wm_eeprom_recvbits:
   12779  *
   12780  *	Receive a series of bits from the EEPROM.
   12781  */
   12782 static void
   12783 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12784 {
   12785 	uint32_t reg, val;
   12786 	int x;
   12787 
   12788 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12789 
   12790 	val = 0;
   12791 	for (x = nbits; x > 0; x--) {
   12792 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12793 		CSR_WRITE_FLUSH(sc);
   12794 		delay(2);
   12795 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12796 			val |= (1U << (x - 1));
   12797 		CSR_WRITE(sc, WMREG_EECD, reg);
   12798 		CSR_WRITE_FLUSH(sc);
   12799 		delay(2);
   12800 	}
   12801 	*valp = val;
   12802 }
   12803 
   12804 /* Microwire */
   12805 
   12806 /*
   12807  * wm_nvm_read_uwire:
   12808  *
   12809  *	Read a word from the EEPROM using the MicroWire protocol.
   12810  */
   12811 static int
   12812 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12813 {
   12814 	uint32_t reg, val;
   12815 	int i;
   12816 
   12817 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12818 		device_xname(sc->sc_dev), __func__));
   12819 
   12820 	if (sc->nvm.acquire(sc) != 0)
   12821 		return -1;
   12822 
   12823 	for (i = 0; i < wordcnt; i++) {
   12824 		/* Clear SK and DI. */
   12825 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12826 		CSR_WRITE(sc, WMREG_EECD, reg);
   12827 
   12828 		/*
   12829 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12830 		 * and Xen.
   12831 		 *
   12832 		 * We use this workaround only for 82540 because qemu's
   12833 		 * e1000 act as 82540.
   12834 		 */
   12835 		if (sc->sc_type == WM_T_82540) {
   12836 			reg |= EECD_SK;
   12837 			CSR_WRITE(sc, WMREG_EECD, reg);
   12838 			reg &= ~EECD_SK;
   12839 			CSR_WRITE(sc, WMREG_EECD, reg);
   12840 			CSR_WRITE_FLUSH(sc);
   12841 			delay(2);
   12842 		}
   12843 		/* XXX: end of workaround */
   12844 
   12845 		/* Set CHIP SELECT. */
   12846 		reg |= EECD_CS;
   12847 		CSR_WRITE(sc, WMREG_EECD, reg);
   12848 		CSR_WRITE_FLUSH(sc);
   12849 		delay(2);
   12850 
   12851 		/* Shift in the READ command. */
   12852 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12853 
   12854 		/* Shift in address. */
   12855 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12856 
   12857 		/* Shift out the data. */
   12858 		wm_eeprom_recvbits(sc, &val, 16);
   12859 		data[i] = val & 0xffff;
   12860 
   12861 		/* Clear CHIP SELECT. */
   12862 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12863 		CSR_WRITE(sc, WMREG_EECD, reg);
   12864 		CSR_WRITE_FLUSH(sc);
   12865 		delay(2);
   12866 	}
   12867 
   12868 	sc->nvm.release(sc);
   12869 	return 0;
   12870 }
   12871 
   12872 /* SPI */
   12873 
   12874 /*
   12875  * Set SPI and FLASH related information from the EECD register.
   12876  * For 82541 and 82547, the word size is taken from EEPROM.
   12877  */
   12878 static int
   12879 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12880 {
   12881 	int size;
   12882 	uint32_t reg;
   12883 	uint16_t data;
   12884 
   12885 	reg = CSR_READ(sc, WMREG_EECD);
   12886 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12887 
   12888 	/* Read the size of NVM from EECD by default */
   12889 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12890 	switch (sc->sc_type) {
   12891 	case WM_T_82541:
   12892 	case WM_T_82541_2:
   12893 	case WM_T_82547:
   12894 	case WM_T_82547_2:
   12895 		/* Set dummy value to access EEPROM */
   12896 		sc->sc_nvm_wordsize = 64;
   12897 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12898 			aprint_error_dev(sc->sc_dev,
   12899 			    "%s: failed to read EEPROM size\n", __func__);
   12900 		}
   12901 		reg = data;
   12902 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12903 		if (size == 0)
   12904 			size = 6; /* 64 word size */
   12905 		else
   12906 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12907 		break;
   12908 	case WM_T_80003:
   12909 	case WM_T_82571:
   12910 	case WM_T_82572:
   12911 	case WM_T_82573: /* SPI case */
   12912 	case WM_T_82574: /* SPI case */
   12913 	case WM_T_82583: /* SPI case */
   12914 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12915 		if (size > 14)
   12916 			size = 14;
   12917 		break;
   12918 	case WM_T_82575:
   12919 	case WM_T_82576:
   12920 	case WM_T_82580:
   12921 	case WM_T_I350:
   12922 	case WM_T_I354:
   12923 	case WM_T_I210:
   12924 	case WM_T_I211:
   12925 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12926 		if (size > 15)
   12927 			size = 15;
   12928 		break;
   12929 	default:
   12930 		aprint_error_dev(sc->sc_dev,
   12931 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12932 		return -1;
   12933 		break;
   12934 	}
   12935 
   12936 	sc->sc_nvm_wordsize = 1 << size;
   12937 
   12938 	return 0;
   12939 }
   12940 
   12941 /*
   12942  * wm_nvm_ready_spi:
   12943  *
   12944  *	Wait for a SPI EEPROM to be ready for commands.
   12945  */
   12946 static int
   12947 wm_nvm_ready_spi(struct wm_softc *sc)
   12948 {
   12949 	uint32_t val;
   12950 	int usec;
   12951 
   12952 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12953 		device_xname(sc->sc_dev), __func__));
   12954 
   12955 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12956 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12957 		wm_eeprom_recvbits(sc, &val, 8);
   12958 		if ((val & SPI_SR_RDY) == 0)
   12959 			break;
   12960 	}
   12961 	if (usec >= SPI_MAX_RETRIES) {
   12962 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12963 		return -1;
   12964 	}
   12965 	return 0;
   12966 }
   12967 
   12968 /*
   12969  * wm_nvm_read_spi:
   12970  *
   12971  *	Read a work from the EEPROM using the SPI protocol.
   12972  */
   12973 static int
   12974 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12975 {
   12976 	uint32_t reg, val;
   12977 	int i;
   12978 	uint8_t opc;
   12979 	int rv = 0;
   12980 
   12981 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12982 		device_xname(sc->sc_dev), __func__));
   12983 
   12984 	if (sc->nvm.acquire(sc) != 0)
   12985 		return -1;
   12986 
   12987 	/* Clear SK and CS. */
   12988 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12989 	CSR_WRITE(sc, WMREG_EECD, reg);
   12990 	CSR_WRITE_FLUSH(sc);
   12991 	delay(2);
   12992 
   12993 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12994 		goto out;
   12995 
   12996 	/* Toggle CS to flush commands. */
   12997 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12998 	CSR_WRITE_FLUSH(sc);
   12999 	delay(2);
   13000 	CSR_WRITE(sc, WMREG_EECD, reg);
   13001 	CSR_WRITE_FLUSH(sc);
   13002 	delay(2);
   13003 
   13004 	opc = SPI_OPC_READ;
   13005 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13006 		opc |= SPI_OPC_A8;
   13007 
   13008 	wm_eeprom_sendbits(sc, opc, 8);
   13009 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13010 
   13011 	for (i = 0; i < wordcnt; i++) {
   13012 		wm_eeprom_recvbits(sc, &val, 16);
   13013 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13014 	}
   13015 
   13016 	/* Raise CS and clear SK. */
   13017 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13018 	CSR_WRITE(sc, WMREG_EECD, reg);
   13019 	CSR_WRITE_FLUSH(sc);
   13020 	delay(2);
   13021 
   13022 out:
   13023 	sc->nvm.release(sc);
   13024 	return rv;
   13025 }
   13026 
   13027 /* Using with EERD */
   13028 
   13029 static int
   13030 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13031 {
   13032 	uint32_t attempts = 100000;
   13033 	uint32_t i, reg = 0;
   13034 	int32_t done = -1;
   13035 
   13036 	for (i = 0; i < attempts; i++) {
   13037 		reg = CSR_READ(sc, rw);
   13038 
   13039 		if (reg & EERD_DONE) {
   13040 			done = 0;
   13041 			break;
   13042 		}
   13043 		delay(5);
   13044 	}
   13045 
   13046 	return done;
   13047 }
   13048 
   13049 static int
   13050 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13051 {
   13052 	int i, eerd = 0;
   13053 	int rv = 0;
   13054 
   13055 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13056 		device_xname(sc->sc_dev), __func__));
   13057 
   13058 	if (sc->nvm.acquire(sc) != 0)
   13059 		return -1;
   13060 
   13061 	for (i = 0; i < wordcnt; i++) {
   13062 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13063 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13064 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13065 		if (rv != 0) {
   13066 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13067 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13068 			break;
   13069 		}
   13070 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13071 	}
   13072 
   13073 	sc->nvm.release(sc);
   13074 	return rv;
   13075 }
   13076 
   13077 /* Flash */
   13078 
   13079 static int
   13080 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13081 {
   13082 	uint32_t eecd;
   13083 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13084 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13085 	uint32_t nvm_dword = 0;
   13086 	uint8_t sig_byte = 0;
   13087 	int rv;
   13088 
   13089 	switch (sc->sc_type) {
   13090 	case WM_T_PCH_SPT:
   13091 	case WM_T_PCH_CNP:
   13092 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13093 		act_offset = ICH_NVM_SIG_WORD * 2;
   13094 
   13095 		/* Set bank to 0 in case flash read fails. */
   13096 		*bank = 0;
   13097 
   13098 		/* Check bank 0 */
   13099 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13100 		if (rv != 0)
   13101 			return rv;
   13102 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13103 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13104 			*bank = 0;
   13105 			return 0;
   13106 		}
   13107 
   13108 		/* Check bank 1 */
   13109 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13110 		    &nvm_dword);
   13111 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13112 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13113 			*bank = 1;
   13114 			return 0;
   13115 		}
   13116 		aprint_error_dev(sc->sc_dev,
   13117 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13118 		return -1;
   13119 	case WM_T_ICH8:
   13120 	case WM_T_ICH9:
   13121 		eecd = CSR_READ(sc, WMREG_EECD);
   13122 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13123 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13124 			return 0;
   13125 		}
   13126 		/* FALLTHROUGH */
   13127 	default:
   13128 		/* Default to 0 */
   13129 		*bank = 0;
   13130 
   13131 		/* Check bank 0 */
   13132 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13133 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13134 			*bank = 0;
   13135 			return 0;
   13136 		}
   13137 
   13138 		/* Check bank 1 */
   13139 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13140 		    &sig_byte);
   13141 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13142 			*bank = 1;
   13143 			return 0;
   13144 		}
   13145 	}
   13146 
   13147 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13148 		device_xname(sc->sc_dev)));
   13149 	return -1;
   13150 }
   13151 
   13152 /******************************************************************************
   13153  * This function does initial flash setup so that a new read/write/erase cycle
   13154  * can be started.
   13155  *
   13156  * sc - The pointer to the hw structure
   13157  ****************************************************************************/
   13158 static int32_t
   13159 wm_ich8_cycle_init(struct wm_softc *sc)
   13160 {
   13161 	uint16_t hsfsts;
   13162 	int32_t error = 1;
   13163 	int32_t i     = 0;
   13164 
   13165 	if (sc->sc_type >= WM_T_PCH_SPT)
   13166 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13167 	else
   13168 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13169 
   13170 	/* May be check the Flash Des Valid bit in Hw status */
   13171 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13172 		return error;
   13173 
   13174 	/* Clear FCERR in Hw status by writing 1 */
   13175 	/* Clear DAEL in Hw status by writing a 1 */
   13176 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13177 
   13178 	if (sc->sc_type >= WM_T_PCH_SPT)
   13179 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13180 	else
   13181 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13182 
   13183 	/*
   13184 	 * Either we should have a hardware SPI cycle in progress bit to check
   13185 	 * against, in order to start a new cycle or FDONE bit should be
   13186 	 * changed in the hardware so that it is 1 after hardware reset, which
   13187 	 * can then be used as an indication whether a cycle is in progress or
   13188 	 * has been completed .. we should also have some software semaphore
   13189 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13190 	 * threads access to those bits can be sequentiallized or a way so that
   13191 	 * 2 threads don't start the cycle at the same time
   13192 	 */
   13193 
   13194 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13195 		/*
   13196 		 * There is no cycle running at present, so we can start a
   13197 		 * cycle
   13198 		 */
   13199 
   13200 		/* Begin by setting Flash Cycle Done. */
   13201 		hsfsts |= HSFSTS_DONE;
   13202 		if (sc->sc_type >= WM_T_PCH_SPT)
   13203 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13204 			    hsfsts & 0xffffUL);
   13205 		else
   13206 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13207 		error = 0;
   13208 	} else {
   13209 		/*
   13210 		 * Otherwise poll for sometime so the current cycle has a
   13211 		 * chance to end before giving up.
   13212 		 */
   13213 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   13214 			if (sc->sc_type >= WM_T_PCH_SPT)
   13215 				hsfsts = ICH8_FLASH_READ32(sc,
   13216 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13217 			else
   13218 				hsfsts = ICH8_FLASH_READ16(sc,
   13219 				    ICH_FLASH_HSFSTS);
   13220 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13221 				error = 0;
   13222 				break;
   13223 			}
   13224 			delay(1);
   13225 		}
   13226 		if (error == 0) {
   13227 			/*
   13228 			 * Successful in waiting for previous cycle to timeout,
   13229 			 * now set the Flash Cycle Done.
   13230 			 */
   13231 			hsfsts |= HSFSTS_DONE;
   13232 			if (sc->sc_type >= WM_T_PCH_SPT)
   13233 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13234 				    hsfsts & 0xffffUL);
   13235 			else
   13236 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   13237 				    hsfsts);
   13238 		}
   13239 	}
   13240 	return error;
   13241 }
   13242 
   13243 /******************************************************************************
   13244  * This function starts a flash cycle and waits for its completion
   13245  *
   13246  * sc - The pointer to the hw structure
   13247  ****************************************************************************/
   13248 static int32_t
   13249 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   13250 {
   13251 	uint16_t hsflctl;
   13252 	uint16_t hsfsts;
   13253 	int32_t error = 1;
   13254 	uint32_t i = 0;
   13255 
   13256 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   13257 	if (sc->sc_type >= WM_T_PCH_SPT)
   13258 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   13259 	else
   13260 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13261 	hsflctl |= HSFCTL_GO;
   13262 	if (sc->sc_type >= WM_T_PCH_SPT)
   13263 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13264 		    (uint32_t)hsflctl << 16);
   13265 	else
   13266 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13267 
   13268 	/* Wait till FDONE bit is set to 1 */
   13269 	do {
   13270 		if (sc->sc_type >= WM_T_PCH_SPT)
   13271 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13272 			    & 0xffffUL;
   13273 		else
   13274 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13275 		if (hsfsts & HSFSTS_DONE)
   13276 			break;
   13277 		delay(1);
   13278 		i++;
   13279 	} while (i < timeout);
   13280 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   13281 		error = 0;
   13282 
   13283 	return error;
   13284 }
   13285 
   13286 /******************************************************************************
   13287  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   13288  *
   13289  * sc - The pointer to the hw structure
   13290  * index - The index of the byte or word to read.
   13291  * size - Size of data to read, 1=byte 2=word, 4=dword
   13292  * data - Pointer to the word to store the value read.
   13293  *****************************************************************************/
   13294 static int32_t
   13295 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   13296     uint32_t size, uint32_t *data)
   13297 {
   13298 	uint16_t hsfsts;
   13299 	uint16_t hsflctl;
   13300 	uint32_t flash_linear_address;
   13301 	uint32_t flash_data = 0;
   13302 	int32_t error = 1;
   13303 	int32_t count = 0;
   13304 
   13305 	if (size < 1  || size > 4 || data == 0x0 ||
   13306 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   13307 		return error;
   13308 
   13309 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   13310 	    sc->sc_ich8_flash_base;
   13311 
   13312 	do {
   13313 		delay(1);
   13314 		/* Steps */
   13315 		error = wm_ich8_cycle_init(sc);
   13316 		if (error)
   13317 			break;
   13318 
   13319 		if (sc->sc_type >= WM_T_PCH_SPT)
   13320 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13321 			    >> 16;
   13322 		else
   13323 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13324 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13325 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13326 		    & HSFCTL_BCOUNT_MASK;
   13327 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13328 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13329 			/*
   13330 			 * In SPT, This register is in Lan memory space, not
   13331 			 * flash. Therefore, only 32 bit access is supported.
   13332 			 */
   13333 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13334 			    (uint32_t)hsflctl << 16);
   13335 		} else
   13336 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13337 
   13338 		/*
   13339 		 * Write the last 24 bits of index into Flash Linear address
   13340 		 * field in Flash Address
   13341 		 */
   13342 		/* TODO: TBD maybe check the index against the size of flash */
   13343 
   13344 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13345 
   13346 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13347 
   13348 		/*
   13349 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13350 		 * the whole sequence a few more times, else read in (shift in)
   13351 		 * the Flash Data0, the order is least significant byte first
   13352 		 * msb to lsb
   13353 		 */
   13354 		if (error == 0) {
   13355 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13356 			if (size == 1)
   13357 				*data = (uint8_t)(flash_data & 0x000000FF);
   13358 			else if (size == 2)
   13359 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13360 			else if (size == 4)
   13361 				*data = (uint32_t)flash_data;
   13362 			break;
   13363 		} else {
   13364 			/*
   13365 			 * If we've gotten here, then things are probably
   13366 			 * completely hosed, but if the error condition is
   13367 			 * detected, it won't hurt to give it another try...
   13368 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13369 			 */
   13370 			if (sc->sc_type >= WM_T_PCH_SPT)
   13371 				hsfsts = ICH8_FLASH_READ32(sc,
   13372 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13373 			else
   13374 				hsfsts = ICH8_FLASH_READ16(sc,
   13375 				    ICH_FLASH_HSFSTS);
   13376 
   13377 			if (hsfsts & HSFSTS_ERR) {
   13378 				/* Repeat for some time before giving up. */
   13379 				continue;
   13380 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13381 				break;
   13382 		}
   13383 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13384 
   13385 	return error;
   13386 }
   13387 
   13388 /******************************************************************************
   13389  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13390  *
   13391  * sc - pointer to wm_hw structure
   13392  * index - The index of the byte to read.
   13393  * data - Pointer to a byte to store the value read.
   13394  *****************************************************************************/
   13395 static int32_t
   13396 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13397 {
   13398 	int32_t status;
   13399 	uint32_t word = 0;
   13400 
   13401 	status = wm_read_ich8_data(sc, index, 1, &word);
   13402 	if (status == 0)
   13403 		*data = (uint8_t)word;
   13404 	else
   13405 		*data = 0;
   13406 
   13407 	return status;
   13408 }
   13409 
   13410 /******************************************************************************
   13411  * Reads a word from the NVM using the ICH8 flash access registers.
   13412  *
   13413  * sc - pointer to wm_hw structure
   13414  * index - The starting byte index of the word to read.
   13415  * data - Pointer to a word to store the value read.
   13416  *****************************************************************************/
   13417 static int32_t
   13418 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13419 {
   13420 	int32_t status;
   13421 	uint32_t word = 0;
   13422 
   13423 	status = wm_read_ich8_data(sc, index, 2, &word);
   13424 	if (status == 0)
   13425 		*data = (uint16_t)word;
   13426 	else
   13427 		*data = 0;
   13428 
   13429 	return status;
   13430 }
   13431 
   13432 /******************************************************************************
   13433  * Reads a dword from the NVM using the ICH8 flash access registers.
   13434  *
   13435  * sc - pointer to wm_hw structure
   13436  * index - The starting byte index of the word to read.
   13437  * data - Pointer to a word to store the value read.
   13438  *****************************************************************************/
   13439 static int32_t
   13440 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13441 {
   13442 	int32_t status;
   13443 
   13444 	status = wm_read_ich8_data(sc, index, 4, data);
   13445 	return status;
   13446 }
   13447 
   13448 /******************************************************************************
   13449  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13450  * register.
   13451  *
   13452  * sc - Struct containing variables accessed by shared code
   13453  * offset - offset of word in the EEPROM to read
   13454  * data - word read from the EEPROM
   13455  * words - number of words to read
   13456  *****************************************************************************/
   13457 static int
   13458 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13459 {
   13460 	int32_t	 rv = 0;
   13461 	uint32_t flash_bank = 0;
   13462 	uint32_t act_offset = 0;
   13463 	uint32_t bank_offset = 0;
   13464 	uint16_t word = 0;
   13465 	uint16_t i = 0;
   13466 
   13467 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13468 		device_xname(sc->sc_dev), __func__));
   13469 
   13470 	if (sc->nvm.acquire(sc) != 0)
   13471 		return -1;
   13472 
   13473 	/*
   13474 	 * We need to know which is the valid flash bank.  In the event
   13475 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13476 	 * managing flash_bank. So it cannot be trusted and needs
   13477 	 * to be updated with each read.
   13478 	 */
   13479 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13480 	if (rv) {
   13481 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13482 			device_xname(sc->sc_dev)));
   13483 		flash_bank = 0;
   13484 	}
   13485 
   13486 	/*
   13487 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13488 	 * size
   13489 	 */
   13490 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13491 
   13492 	for (i = 0; i < words; i++) {
   13493 		/* The NVM part needs a byte offset, hence * 2 */
   13494 		act_offset = bank_offset + ((offset + i) * 2);
   13495 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13496 		if (rv) {
   13497 			aprint_error_dev(sc->sc_dev,
   13498 			    "%s: failed to read NVM\n", __func__);
   13499 			break;
   13500 		}
   13501 		data[i] = word;
   13502 	}
   13503 
   13504 	sc->nvm.release(sc);
   13505 	return rv;
   13506 }
   13507 
   13508 /******************************************************************************
   13509  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13510  * register.
   13511  *
   13512  * sc - Struct containing variables accessed by shared code
   13513  * offset - offset of word in the EEPROM to read
   13514  * data - word read from the EEPROM
   13515  * words - number of words to read
   13516  *****************************************************************************/
   13517 static int
   13518 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13519 {
   13520 	int32_t	 rv = 0;
   13521 	uint32_t flash_bank = 0;
   13522 	uint32_t act_offset = 0;
   13523 	uint32_t bank_offset = 0;
   13524 	uint32_t dword = 0;
   13525 	uint16_t i = 0;
   13526 
   13527 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13528 		device_xname(sc->sc_dev), __func__));
   13529 
   13530 	if (sc->nvm.acquire(sc) != 0)
   13531 		return -1;
   13532 
   13533 	/*
   13534 	 * We need to know which is the valid flash bank.  In the event
   13535 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13536 	 * managing flash_bank. So it cannot be trusted and needs
   13537 	 * to be updated with each read.
   13538 	 */
   13539 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13540 	if (rv) {
   13541 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13542 			device_xname(sc->sc_dev)));
   13543 		flash_bank = 0;
   13544 	}
   13545 
   13546 	/*
   13547 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13548 	 * size
   13549 	 */
   13550 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13551 
   13552 	for (i = 0; i < words; i++) {
   13553 		/* The NVM part needs a byte offset, hence * 2 */
   13554 		act_offset = bank_offset + ((offset + i) * 2);
   13555 		/* but we must read dword aligned, so mask ... */
   13556 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13557 		if (rv) {
   13558 			aprint_error_dev(sc->sc_dev,
   13559 			    "%s: failed to read NVM\n", __func__);
   13560 			break;
   13561 		}
   13562 		/* ... and pick out low or high word */
   13563 		if ((act_offset & 0x2) == 0)
   13564 			data[i] = (uint16_t)(dword & 0xFFFF);
   13565 		else
   13566 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13567 	}
   13568 
   13569 	sc->nvm.release(sc);
   13570 	return rv;
   13571 }
   13572 
   13573 /* iNVM */
   13574 
   13575 static int
   13576 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13577 {
   13578 	int32_t	 rv = 0;
   13579 	uint32_t invm_dword;
   13580 	uint16_t i;
   13581 	uint8_t record_type, word_address;
   13582 
   13583 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13584 		device_xname(sc->sc_dev), __func__));
   13585 
   13586 	for (i = 0; i < INVM_SIZE; i++) {
   13587 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13588 		/* Get record type */
   13589 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13590 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13591 			break;
   13592 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13593 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13594 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13595 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13596 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13597 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13598 			if (word_address == address) {
   13599 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13600 				rv = 0;
   13601 				break;
   13602 			}
   13603 		}
   13604 	}
   13605 
   13606 	return rv;
   13607 }
   13608 
   13609 static int
   13610 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13611 {
   13612 	int rv = 0;
   13613 	int i;
   13614 
   13615 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13616 		device_xname(sc->sc_dev), __func__));
   13617 
   13618 	if (sc->nvm.acquire(sc) != 0)
   13619 		return -1;
   13620 
   13621 	for (i = 0; i < words; i++) {
   13622 		switch (offset + i) {
   13623 		case NVM_OFF_MACADDR:
   13624 		case NVM_OFF_MACADDR1:
   13625 		case NVM_OFF_MACADDR2:
   13626 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13627 			if (rv != 0) {
   13628 				data[i] = 0xffff;
   13629 				rv = -1;
   13630 			}
   13631 			break;
   13632 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   13633 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13634 			if (rv != 0) {
   13635 				*data = INVM_DEFAULT_AL;
   13636 				rv = 0;
   13637 			}
   13638 			break;
   13639 		case NVM_OFF_CFG2:
   13640 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13641 			if (rv != 0) {
   13642 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13643 				rv = 0;
   13644 			}
   13645 			break;
   13646 		case NVM_OFF_CFG4:
   13647 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13648 			if (rv != 0) {
   13649 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13650 				rv = 0;
   13651 			}
   13652 			break;
   13653 		case NVM_OFF_LED_1_CFG:
   13654 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13655 			if (rv != 0) {
   13656 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13657 				rv = 0;
   13658 			}
   13659 			break;
   13660 		case NVM_OFF_LED_0_2_CFG:
   13661 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13662 			if (rv != 0) {
   13663 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13664 				rv = 0;
   13665 			}
   13666 			break;
   13667 		case NVM_OFF_ID_LED_SETTINGS:
   13668 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13669 			if (rv != 0) {
   13670 				*data = ID_LED_RESERVED_FFFF;
   13671 				rv = 0;
   13672 			}
   13673 			break;
   13674 		default:
   13675 			DPRINTF(WM_DEBUG_NVM,
   13676 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13677 			*data = NVM_RESERVED_WORD;
   13678 			break;
   13679 		}
   13680 	}
   13681 
   13682 	sc->nvm.release(sc);
   13683 	return rv;
   13684 }
   13685 
   13686 /* Lock, detecting NVM type, validate checksum, version and read */
   13687 
   13688 static int
   13689 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13690 {
   13691 	uint32_t eecd = 0;
   13692 
   13693 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13694 	    || sc->sc_type == WM_T_82583) {
   13695 		eecd = CSR_READ(sc, WMREG_EECD);
   13696 
   13697 		/* Isolate bits 15 & 16 */
   13698 		eecd = ((eecd >> 15) & 0x03);
   13699 
   13700 		/* If both bits are set, device is Flash type */
   13701 		if (eecd == 0x03)
   13702 			return 0;
   13703 	}
   13704 	return 1;
   13705 }
   13706 
   13707 static int
   13708 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13709 {
   13710 	uint32_t eec;
   13711 
   13712 	eec = CSR_READ(sc, WMREG_EEC);
   13713 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13714 		return 1;
   13715 
   13716 	return 0;
   13717 }
   13718 
   13719 /*
   13720  * wm_nvm_validate_checksum
   13721  *
   13722  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13723  */
   13724 static int
   13725 wm_nvm_validate_checksum(struct wm_softc *sc)
   13726 {
   13727 	uint16_t checksum;
   13728 	uint16_t eeprom_data;
   13729 #ifdef WM_DEBUG
   13730 	uint16_t csum_wordaddr, valid_checksum;
   13731 #endif
   13732 	int i;
   13733 
   13734 	checksum = 0;
   13735 
   13736 	/* Don't check for I211 */
   13737 	if (sc->sc_type == WM_T_I211)
   13738 		return 0;
   13739 
   13740 #ifdef WM_DEBUG
   13741 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13742 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13743 		csum_wordaddr = NVM_OFF_COMPAT;
   13744 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13745 	} else {
   13746 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13747 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13748 	}
   13749 
   13750 	/* Dump EEPROM image for debug */
   13751 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13752 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13753 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13754 		/* XXX PCH_SPT? */
   13755 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13756 		if ((eeprom_data & valid_checksum) == 0)
   13757 			DPRINTF(WM_DEBUG_NVM,
   13758 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13759 				device_xname(sc->sc_dev), eeprom_data,
   13760 				    valid_checksum));
   13761 	}
   13762 
   13763 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13764 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13765 		for (i = 0; i < NVM_SIZE; i++) {
   13766 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13767 				printf("XXXX ");
   13768 			else
   13769 				printf("%04hx ", eeprom_data);
   13770 			if (i % 8 == 7)
   13771 				printf("\n");
   13772 		}
   13773 	}
   13774 
   13775 #endif /* WM_DEBUG */
   13776 
   13777 	for (i = 0; i < NVM_SIZE; i++) {
   13778 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13779 			return 1;
   13780 		checksum += eeprom_data;
   13781 	}
   13782 
   13783 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13784 #ifdef WM_DEBUG
   13785 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13786 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13787 #endif
   13788 	}
   13789 
   13790 	return 0;
   13791 }
   13792 
   13793 static void
   13794 wm_nvm_version_invm(struct wm_softc *sc)
   13795 {
   13796 	uint32_t dword;
   13797 
   13798 	/*
   13799 	 * Linux's code to decode version is very strange, so we don't
   13800 	 * obey that algorithm and just use word 61 as the document.
   13801 	 * Perhaps it's not perfect though...
   13802 	 *
   13803 	 * Example:
   13804 	 *
   13805 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13806 	 */
   13807 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13808 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13809 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13810 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13811 }
   13812 
   13813 static void
   13814 wm_nvm_version(struct wm_softc *sc)
   13815 {
   13816 	uint16_t major, minor, build, patch;
   13817 	uint16_t uid0, uid1;
   13818 	uint16_t nvm_data;
   13819 	uint16_t off;
   13820 	bool check_version = false;
   13821 	bool check_optionrom = false;
   13822 	bool have_build = false;
   13823 	bool have_uid = true;
   13824 
   13825 	/*
   13826 	 * Version format:
   13827 	 *
   13828 	 * XYYZ
   13829 	 * X0YZ
   13830 	 * X0YY
   13831 	 *
   13832 	 * Example:
   13833 	 *
   13834 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13835 	 *	82571	0x50a6	5.10.6?
   13836 	 *	82572	0x506a	5.6.10?
   13837 	 *	82572EI	0x5069	5.6.9?
   13838 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13839 	 *		0x2013	2.1.3?
   13840 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   13841 	 * ICH8+82567	0x0040	0.4.0?
   13842 	 * ICH9+82566	0x1040	1.4.0?
   13843 	 *ICH10+82567	0x0043	0.4.3?
   13844 	 *  PCH+82577	0x00c1	0.12.1?
   13845 	 * PCH2+82579	0x00d3	0.13.3?
   13846 	 *		0x00d4	0.13.4?
   13847 	 *  LPT+I218	0x0023	0.2.3?
   13848 	 *  SPT+I219	0x0084	0.8.4?
   13849 	 *  CNP+I219	0x0054	0.5.4?
   13850 	 */
   13851 
   13852 	/*
   13853 	 * XXX
   13854 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13855 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13856 	 */
   13857 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13858 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13859 		have_uid = false;
   13860 
   13861 	switch (sc->sc_type) {
   13862 	case WM_T_82571:
   13863 	case WM_T_82572:
   13864 	case WM_T_82574:
   13865 	case WM_T_82583:
   13866 		check_version = true;
   13867 		check_optionrom = true;
   13868 		have_build = true;
   13869 		break;
   13870 	case WM_T_ICH8:
   13871 	case WM_T_ICH9:
   13872 	case WM_T_ICH10:
   13873 	case WM_T_PCH:
   13874 	case WM_T_PCH2:
   13875 	case WM_T_PCH_LPT:
   13876 	case WM_T_PCH_SPT:
   13877 	case WM_T_PCH_CNP:
   13878 		check_version = true;
   13879 		have_build = true;
   13880 		have_uid = false;
   13881 		break;
   13882 	case WM_T_82575:
   13883 	case WM_T_82576:
   13884 	case WM_T_82580:
   13885 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13886 			check_version = true;
   13887 		break;
   13888 	case WM_T_I211:
   13889 		wm_nvm_version_invm(sc);
   13890 		have_uid = false;
   13891 		goto printver;
   13892 	case WM_T_I210:
   13893 		if (!wm_nvm_flash_presence_i210(sc)) {
   13894 			wm_nvm_version_invm(sc);
   13895 			have_uid = false;
   13896 			goto printver;
   13897 		}
   13898 		/* FALLTHROUGH */
   13899 	case WM_T_I350:
   13900 	case WM_T_I354:
   13901 		check_version = true;
   13902 		check_optionrom = true;
   13903 		break;
   13904 	default:
   13905 		return;
   13906 	}
   13907 	if (check_version
   13908 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13909 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13910 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13911 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13912 			build = nvm_data & NVM_BUILD_MASK;
   13913 			have_build = true;
   13914 		} else
   13915 			minor = nvm_data & 0x00ff;
   13916 
   13917 		/* Decimal */
   13918 		minor = (minor / 16) * 10 + (minor % 16);
   13919 		sc->sc_nvm_ver_major = major;
   13920 		sc->sc_nvm_ver_minor = minor;
   13921 
   13922 printver:
   13923 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13924 		    sc->sc_nvm_ver_minor);
   13925 		if (have_build) {
   13926 			sc->sc_nvm_ver_build = build;
   13927 			aprint_verbose(".%d", build);
   13928 		}
   13929 	}
   13930 
   13931 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13932 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13933 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13934 		/* Option ROM Version */
   13935 		if ((off != 0x0000) && (off != 0xffff)) {
   13936 			int rv;
   13937 
   13938 			off += NVM_COMBO_VER_OFF;
   13939 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13940 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13941 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13942 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13943 				/* 16bits */
   13944 				major = uid0 >> 8;
   13945 				build = (uid0 << 8) | (uid1 >> 8);
   13946 				patch = uid1 & 0x00ff;
   13947 				aprint_verbose(", option ROM Version %d.%d.%d",
   13948 				    major, build, patch);
   13949 			}
   13950 		}
   13951 	}
   13952 
   13953 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13954 		aprint_verbose(", Image Unique ID %08x",
   13955 		    ((uint32_t)uid1 << 16) | uid0);
   13956 }
   13957 
   13958 /*
   13959  * wm_nvm_read:
   13960  *
   13961  *	Read data from the serial EEPROM.
   13962  */
   13963 static int
   13964 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13965 {
   13966 	int rv;
   13967 
   13968 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13969 		device_xname(sc->sc_dev), __func__));
   13970 
   13971 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13972 		return -1;
   13973 
   13974 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13975 
   13976 	return rv;
   13977 }
   13978 
   13979 /*
   13980  * Hardware semaphores.
   13981  * Very complexed...
   13982  */
   13983 
   13984 static int
   13985 wm_get_null(struct wm_softc *sc)
   13986 {
   13987 
   13988 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13989 		device_xname(sc->sc_dev), __func__));
   13990 	return 0;
   13991 }
   13992 
   13993 static void
   13994 wm_put_null(struct wm_softc *sc)
   13995 {
   13996 
   13997 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13998 		device_xname(sc->sc_dev), __func__));
   13999 	return;
   14000 }
   14001 
   14002 static int
   14003 wm_get_eecd(struct wm_softc *sc)
   14004 {
   14005 	uint32_t reg;
   14006 	int x;
   14007 
   14008 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14009 		device_xname(sc->sc_dev), __func__));
   14010 
   14011 	reg = CSR_READ(sc, WMREG_EECD);
   14012 
   14013 	/* Request EEPROM access. */
   14014 	reg |= EECD_EE_REQ;
   14015 	CSR_WRITE(sc, WMREG_EECD, reg);
   14016 
   14017 	/* ..and wait for it to be granted. */
   14018 	for (x = 0; x < 1000; x++) {
   14019 		reg = CSR_READ(sc, WMREG_EECD);
   14020 		if (reg & EECD_EE_GNT)
   14021 			break;
   14022 		delay(5);
   14023 	}
   14024 	if ((reg & EECD_EE_GNT) == 0) {
   14025 		aprint_error_dev(sc->sc_dev,
   14026 		    "could not acquire EEPROM GNT\n");
   14027 		reg &= ~EECD_EE_REQ;
   14028 		CSR_WRITE(sc, WMREG_EECD, reg);
   14029 		return -1;
   14030 	}
   14031 
   14032 	return 0;
   14033 }
   14034 
   14035 static void
   14036 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14037 {
   14038 
   14039 	*eecd |= EECD_SK;
   14040 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14041 	CSR_WRITE_FLUSH(sc);
   14042 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14043 		delay(1);
   14044 	else
   14045 		delay(50);
   14046 }
   14047 
   14048 static void
   14049 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14050 {
   14051 
   14052 	*eecd &= ~EECD_SK;
   14053 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14054 	CSR_WRITE_FLUSH(sc);
   14055 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14056 		delay(1);
   14057 	else
   14058 		delay(50);
   14059 }
   14060 
   14061 static void
   14062 wm_put_eecd(struct wm_softc *sc)
   14063 {
   14064 	uint32_t reg;
   14065 
   14066 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14067 		device_xname(sc->sc_dev), __func__));
   14068 
   14069 	/* Stop nvm */
   14070 	reg = CSR_READ(sc, WMREG_EECD);
   14071 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14072 		/* Pull CS high */
   14073 		reg |= EECD_CS;
   14074 		wm_nvm_eec_clock_lower(sc, &reg);
   14075 	} else {
   14076 		/* CS on Microwire is active-high */
   14077 		reg &= ~(EECD_CS | EECD_DI);
   14078 		CSR_WRITE(sc, WMREG_EECD, reg);
   14079 		wm_nvm_eec_clock_raise(sc, &reg);
   14080 		wm_nvm_eec_clock_lower(sc, &reg);
   14081 	}
   14082 
   14083 	reg = CSR_READ(sc, WMREG_EECD);
   14084 	reg &= ~EECD_EE_REQ;
   14085 	CSR_WRITE(sc, WMREG_EECD, reg);
   14086 
   14087 	return;
   14088 }
   14089 
   14090 /*
   14091  * Get hardware semaphore.
   14092  * Same as e1000_get_hw_semaphore_generic()
   14093  */
   14094 static int
   14095 wm_get_swsm_semaphore(struct wm_softc *sc)
   14096 {
   14097 	int32_t timeout;
   14098 	uint32_t swsm;
   14099 
   14100 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14101 		device_xname(sc->sc_dev), __func__));
   14102 	KASSERT(sc->sc_nvm_wordsize > 0);
   14103 
   14104 retry:
   14105 	/* Get the SW semaphore. */
   14106 	timeout = sc->sc_nvm_wordsize + 1;
   14107 	while (timeout) {
   14108 		swsm = CSR_READ(sc, WMREG_SWSM);
   14109 
   14110 		if ((swsm & SWSM_SMBI) == 0)
   14111 			break;
   14112 
   14113 		delay(50);
   14114 		timeout--;
   14115 	}
   14116 
   14117 	if (timeout == 0) {
   14118 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14119 			/*
   14120 			 * In rare circumstances, the SW semaphore may already
   14121 			 * be held unintentionally. Clear the semaphore once
   14122 			 * before giving up.
   14123 			 */
   14124 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14125 			wm_put_swsm_semaphore(sc);
   14126 			goto retry;
   14127 		}
   14128 		aprint_error_dev(sc->sc_dev,
   14129 		    "could not acquire SWSM SMBI\n");
   14130 		return 1;
   14131 	}
   14132 
   14133 	/* Get the FW semaphore. */
   14134 	timeout = sc->sc_nvm_wordsize + 1;
   14135 	while (timeout) {
   14136 		swsm = CSR_READ(sc, WMREG_SWSM);
   14137 		swsm |= SWSM_SWESMBI;
   14138 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14139 		/* If we managed to set the bit we got the semaphore. */
   14140 		swsm = CSR_READ(sc, WMREG_SWSM);
   14141 		if (swsm & SWSM_SWESMBI)
   14142 			break;
   14143 
   14144 		delay(50);
   14145 		timeout--;
   14146 	}
   14147 
   14148 	if (timeout == 0) {
   14149 		aprint_error_dev(sc->sc_dev,
   14150 		    "could not acquire SWSM SWESMBI\n");
   14151 		/* Release semaphores */
   14152 		wm_put_swsm_semaphore(sc);
   14153 		return 1;
   14154 	}
   14155 	return 0;
   14156 }
   14157 
   14158 /*
   14159  * Put hardware semaphore.
   14160  * Same as e1000_put_hw_semaphore_generic()
   14161  */
   14162 static void
   14163 wm_put_swsm_semaphore(struct wm_softc *sc)
   14164 {
   14165 	uint32_t swsm;
   14166 
   14167 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14168 		device_xname(sc->sc_dev), __func__));
   14169 
   14170 	swsm = CSR_READ(sc, WMREG_SWSM);
   14171 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14172 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14173 }
   14174 
   14175 /*
   14176  * Get SW/FW semaphore.
   14177  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14178  */
   14179 static int
   14180 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14181 {
   14182 	uint32_t swfw_sync;
   14183 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14184 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14185 	int timeout;
   14186 
   14187 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14188 		device_xname(sc->sc_dev), __func__));
   14189 
   14190 	if (sc->sc_type == WM_T_80003)
   14191 		timeout = 50;
   14192 	else
   14193 		timeout = 200;
   14194 
   14195 	while (timeout) {
   14196 		if (wm_get_swsm_semaphore(sc)) {
   14197 			aprint_error_dev(sc->sc_dev,
   14198 			    "%s: failed to get semaphore\n",
   14199 			    __func__);
   14200 			return 1;
   14201 		}
   14202 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14203 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14204 			swfw_sync |= swmask;
   14205 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14206 			wm_put_swsm_semaphore(sc);
   14207 			return 0;
   14208 		}
   14209 		wm_put_swsm_semaphore(sc);
   14210 		delay(5000);
   14211 		timeout--;
   14212 	}
   14213 	device_printf(sc->sc_dev,
   14214 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   14215 	    mask, swfw_sync);
   14216 	return 1;
   14217 }
   14218 
   14219 static void
   14220 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14221 {
   14222 	uint32_t swfw_sync;
   14223 
   14224 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14225 		device_xname(sc->sc_dev), __func__));
   14226 
   14227 	while (wm_get_swsm_semaphore(sc) != 0)
   14228 		continue;
   14229 
   14230 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14231 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   14232 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14233 
   14234 	wm_put_swsm_semaphore(sc);
   14235 }
   14236 
   14237 static int
   14238 wm_get_nvm_80003(struct wm_softc *sc)
   14239 {
   14240 	int rv;
   14241 
   14242 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14243 		device_xname(sc->sc_dev), __func__));
   14244 
   14245 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   14246 		aprint_error_dev(sc->sc_dev,
   14247 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   14248 		return rv;
   14249 	}
   14250 
   14251 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14252 	    && (rv = wm_get_eecd(sc)) != 0) {
   14253 		aprint_error_dev(sc->sc_dev,
   14254 		    "%s: failed to get semaphore(EECD)\n", __func__);
   14255 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14256 		return rv;
   14257 	}
   14258 
   14259 	return 0;
   14260 }
   14261 
   14262 static void
   14263 wm_put_nvm_80003(struct wm_softc *sc)
   14264 {
   14265 
   14266 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14267 		device_xname(sc->sc_dev), __func__));
   14268 
   14269 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14270 		wm_put_eecd(sc);
   14271 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14272 }
   14273 
   14274 static int
   14275 wm_get_nvm_82571(struct wm_softc *sc)
   14276 {
   14277 	int rv;
   14278 
   14279 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14280 		device_xname(sc->sc_dev), __func__));
   14281 
   14282 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   14283 		return rv;
   14284 
   14285 	switch (sc->sc_type) {
   14286 	case WM_T_82573:
   14287 		break;
   14288 	default:
   14289 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14290 			rv = wm_get_eecd(sc);
   14291 		break;
   14292 	}
   14293 
   14294 	if (rv != 0) {
   14295 		aprint_error_dev(sc->sc_dev,
   14296 		    "%s: failed to get semaphore\n",
   14297 		    __func__);
   14298 		wm_put_swsm_semaphore(sc);
   14299 	}
   14300 
   14301 	return rv;
   14302 }
   14303 
   14304 static void
   14305 wm_put_nvm_82571(struct wm_softc *sc)
   14306 {
   14307 
   14308 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14309 		device_xname(sc->sc_dev), __func__));
   14310 
   14311 	switch (sc->sc_type) {
   14312 	case WM_T_82573:
   14313 		break;
   14314 	default:
   14315 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14316 			wm_put_eecd(sc);
   14317 		break;
   14318 	}
   14319 
   14320 	wm_put_swsm_semaphore(sc);
   14321 }
   14322 
   14323 static int
   14324 wm_get_phy_82575(struct wm_softc *sc)
   14325 {
   14326 
   14327 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14328 		device_xname(sc->sc_dev), __func__));
   14329 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14330 }
   14331 
   14332 static void
   14333 wm_put_phy_82575(struct wm_softc *sc)
   14334 {
   14335 
   14336 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14337 		device_xname(sc->sc_dev), __func__));
   14338 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14339 }
   14340 
   14341 static int
   14342 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14343 {
   14344 	uint32_t ext_ctrl;
   14345 	int timeout = 200;
   14346 
   14347 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14348 		device_xname(sc->sc_dev), __func__));
   14349 
   14350 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14351 	for (timeout = 0; timeout < 200; timeout++) {
   14352 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14353 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14354 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14355 
   14356 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14357 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14358 			return 0;
   14359 		delay(5000);
   14360 	}
   14361 	device_printf(sc->sc_dev,
   14362 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14363 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14364 	return 1;
   14365 }
   14366 
   14367 static void
   14368 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14369 {
   14370 	uint32_t ext_ctrl;
   14371 
   14372 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14373 		device_xname(sc->sc_dev), __func__));
   14374 
   14375 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14376 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14377 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14378 
   14379 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14380 }
   14381 
   14382 static int
   14383 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14384 {
   14385 	uint32_t ext_ctrl;
   14386 	int timeout;
   14387 
   14388 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14389 		device_xname(sc->sc_dev), __func__));
   14390 	mutex_enter(sc->sc_ich_phymtx);
   14391 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14392 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14393 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14394 			break;
   14395 		delay(1000);
   14396 	}
   14397 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14398 		device_printf(sc->sc_dev,
   14399 		    "SW has already locked the resource\n");
   14400 		goto out;
   14401 	}
   14402 
   14403 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14404 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14405 	for (timeout = 0; timeout < 1000; timeout++) {
   14406 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14407 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14408 			break;
   14409 		delay(1000);
   14410 	}
   14411 	if (timeout >= 1000) {
   14412 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14413 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14414 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14415 		goto out;
   14416 	}
   14417 	return 0;
   14418 
   14419 out:
   14420 	mutex_exit(sc->sc_ich_phymtx);
   14421 	return 1;
   14422 }
   14423 
   14424 static void
   14425 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14426 {
   14427 	uint32_t ext_ctrl;
   14428 
   14429 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14430 		device_xname(sc->sc_dev), __func__));
   14431 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14432 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14433 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14434 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14435 	} else {
   14436 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14437 	}
   14438 
   14439 	mutex_exit(sc->sc_ich_phymtx);
   14440 }
   14441 
   14442 static int
   14443 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14444 {
   14445 
   14446 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14447 		device_xname(sc->sc_dev), __func__));
   14448 	mutex_enter(sc->sc_ich_nvmmtx);
   14449 
   14450 	return 0;
   14451 }
   14452 
   14453 static void
   14454 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14455 {
   14456 
   14457 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14458 		device_xname(sc->sc_dev), __func__));
   14459 	mutex_exit(sc->sc_ich_nvmmtx);
   14460 }
   14461 
   14462 static int
   14463 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14464 {
   14465 	int i = 0;
   14466 	uint32_t reg;
   14467 
   14468 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14469 		device_xname(sc->sc_dev), __func__));
   14470 
   14471 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14472 	do {
   14473 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14474 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14475 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14476 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14477 			break;
   14478 		delay(2*1000);
   14479 		i++;
   14480 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14481 
   14482 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14483 		wm_put_hw_semaphore_82573(sc);
   14484 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14485 		    device_xname(sc->sc_dev));
   14486 		return -1;
   14487 	}
   14488 
   14489 	return 0;
   14490 }
   14491 
   14492 static void
   14493 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14494 {
   14495 	uint32_t reg;
   14496 
   14497 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14498 		device_xname(sc->sc_dev), __func__));
   14499 
   14500 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14501 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14502 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14503 }
   14504 
   14505 /*
   14506  * Management mode and power management related subroutines.
   14507  * BMC, AMT, suspend/resume and EEE.
   14508  */
   14509 
   14510 #ifdef WM_WOL
   14511 static int
   14512 wm_check_mng_mode(struct wm_softc *sc)
   14513 {
   14514 	int rv;
   14515 
   14516 	switch (sc->sc_type) {
   14517 	case WM_T_ICH8:
   14518 	case WM_T_ICH9:
   14519 	case WM_T_ICH10:
   14520 	case WM_T_PCH:
   14521 	case WM_T_PCH2:
   14522 	case WM_T_PCH_LPT:
   14523 	case WM_T_PCH_SPT:
   14524 	case WM_T_PCH_CNP:
   14525 		rv = wm_check_mng_mode_ich8lan(sc);
   14526 		break;
   14527 	case WM_T_82574:
   14528 	case WM_T_82583:
   14529 		rv = wm_check_mng_mode_82574(sc);
   14530 		break;
   14531 	case WM_T_82571:
   14532 	case WM_T_82572:
   14533 	case WM_T_82573:
   14534 	case WM_T_80003:
   14535 		rv = wm_check_mng_mode_generic(sc);
   14536 		break;
   14537 	default:
   14538 		/* Noting to do */
   14539 		rv = 0;
   14540 		break;
   14541 	}
   14542 
   14543 	return rv;
   14544 }
   14545 
   14546 static int
   14547 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14548 {
   14549 	uint32_t fwsm;
   14550 
   14551 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14552 
   14553 	if (((fwsm & FWSM_FW_VALID) != 0)
   14554 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14555 		return 1;
   14556 
   14557 	return 0;
   14558 }
   14559 
   14560 static int
   14561 wm_check_mng_mode_82574(struct wm_softc *sc)
   14562 {
   14563 	uint16_t data;
   14564 
   14565 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14566 
   14567 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14568 		return 1;
   14569 
   14570 	return 0;
   14571 }
   14572 
   14573 static int
   14574 wm_check_mng_mode_generic(struct wm_softc *sc)
   14575 {
   14576 	uint32_t fwsm;
   14577 
   14578 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14579 
   14580 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14581 		return 1;
   14582 
   14583 	return 0;
   14584 }
   14585 #endif /* WM_WOL */
   14586 
   14587 static int
   14588 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14589 {
   14590 	uint32_t manc, fwsm, factps;
   14591 
   14592 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14593 		return 0;
   14594 
   14595 	manc = CSR_READ(sc, WMREG_MANC);
   14596 
   14597 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14598 		device_xname(sc->sc_dev), manc));
   14599 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14600 		return 0;
   14601 
   14602 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14603 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14604 		factps = CSR_READ(sc, WMREG_FACTPS);
   14605 		if (((factps & FACTPS_MNGCG) == 0)
   14606 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14607 			return 1;
   14608 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14609 		uint16_t data;
   14610 
   14611 		factps = CSR_READ(sc, WMREG_FACTPS);
   14612 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14613 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14614 			device_xname(sc->sc_dev), factps, data));
   14615 		if (((factps & FACTPS_MNGCG) == 0)
   14616 		    && ((data & NVM_CFG2_MNGM_MASK)
   14617 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14618 			return 1;
   14619 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14620 	    && ((manc & MANC_ASF_EN) == 0))
   14621 		return 1;
   14622 
   14623 	return 0;
   14624 }
   14625 
   14626 static bool
   14627 wm_phy_resetisblocked(struct wm_softc *sc)
   14628 {
   14629 	bool blocked = false;
   14630 	uint32_t reg;
   14631 	int i = 0;
   14632 
   14633 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14634 		device_xname(sc->sc_dev), __func__));
   14635 
   14636 	switch (sc->sc_type) {
   14637 	case WM_T_ICH8:
   14638 	case WM_T_ICH9:
   14639 	case WM_T_ICH10:
   14640 	case WM_T_PCH:
   14641 	case WM_T_PCH2:
   14642 	case WM_T_PCH_LPT:
   14643 	case WM_T_PCH_SPT:
   14644 	case WM_T_PCH_CNP:
   14645 		do {
   14646 			reg = CSR_READ(sc, WMREG_FWSM);
   14647 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14648 				blocked = true;
   14649 				delay(10*1000);
   14650 				continue;
   14651 			}
   14652 			blocked = false;
   14653 		} while (blocked && (i++ < 30));
   14654 		return blocked;
   14655 		break;
   14656 	case WM_T_82571:
   14657 	case WM_T_82572:
   14658 	case WM_T_82573:
   14659 	case WM_T_82574:
   14660 	case WM_T_82583:
   14661 	case WM_T_80003:
   14662 		reg = CSR_READ(sc, WMREG_MANC);
   14663 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14664 			return true;
   14665 		else
   14666 			return false;
   14667 		break;
   14668 	default:
   14669 		/* No problem */
   14670 		break;
   14671 	}
   14672 
   14673 	return false;
   14674 }
   14675 
   14676 static void
   14677 wm_get_hw_control(struct wm_softc *sc)
   14678 {
   14679 	uint32_t reg;
   14680 
   14681 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14682 		device_xname(sc->sc_dev), __func__));
   14683 
   14684 	if (sc->sc_type == WM_T_82573) {
   14685 		reg = CSR_READ(sc, WMREG_SWSM);
   14686 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14687 	} else if (sc->sc_type >= WM_T_82571) {
   14688 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14689 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14690 	}
   14691 }
   14692 
   14693 static void
   14694 wm_release_hw_control(struct wm_softc *sc)
   14695 {
   14696 	uint32_t reg;
   14697 
   14698 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14699 		device_xname(sc->sc_dev), __func__));
   14700 
   14701 	if (sc->sc_type == WM_T_82573) {
   14702 		reg = CSR_READ(sc, WMREG_SWSM);
   14703 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14704 	} else if (sc->sc_type >= WM_T_82571) {
   14705 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14706 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14707 	}
   14708 }
   14709 
   14710 static void
   14711 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14712 {
   14713 	uint32_t reg;
   14714 
   14715 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14716 		device_xname(sc->sc_dev), __func__));
   14717 
   14718 	if (sc->sc_type < WM_T_PCH2)
   14719 		return;
   14720 
   14721 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14722 
   14723 	if (gate)
   14724 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14725 	else
   14726 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14727 
   14728 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14729 }
   14730 
   14731 static int
   14732 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14733 {
   14734 	uint32_t fwsm, reg;
   14735 	int rv = 0;
   14736 
   14737 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14738 		device_xname(sc->sc_dev), __func__));
   14739 
   14740 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14741 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14742 
   14743 	/* Disable ULP */
   14744 	wm_ulp_disable(sc);
   14745 
   14746 	/* Acquire PHY semaphore */
   14747 	rv = sc->phy.acquire(sc);
   14748 	if (rv != 0) {
   14749 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14750 		device_xname(sc->sc_dev), __func__));
   14751 		return -1;
   14752 	}
   14753 
   14754 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14755 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14756 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14757 	 */
   14758 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14759 	switch (sc->sc_type) {
   14760 	case WM_T_PCH_LPT:
   14761 	case WM_T_PCH_SPT:
   14762 	case WM_T_PCH_CNP:
   14763 		if (wm_phy_is_accessible_pchlan(sc))
   14764 			break;
   14765 
   14766 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14767 		 * forcing MAC to SMBus mode first.
   14768 		 */
   14769 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14770 		reg |= CTRL_EXT_FORCE_SMBUS;
   14771 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14772 #if 0
   14773 		/* XXX Isn't this required??? */
   14774 		CSR_WRITE_FLUSH(sc);
   14775 #endif
   14776 		/* Wait 50 milliseconds for MAC to finish any retries
   14777 		 * that it might be trying to perform from previous
   14778 		 * attempts to acknowledge any phy read requests.
   14779 		 */
   14780 		delay(50 * 1000);
   14781 		/* FALLTHROUGH */
   14782 	case WM_T_PCH2:
   14783 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14784 			break;
   14785 		/* FALLTHROUGH */
   14786 	case WM_T_PCH:
   14787 		if (sc->sc_type == WM_T_PCH)
   14788 			if ((fwsm & FWSM_FW_VALID) != 0)
   14789 				break;
   14790 
   14791 		if (wm_phy_resetisblocked(sc) == true) {
   14792 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   14793 			break;
   14794 		}
   14795 
   14796 		/* Toggle LANPHYPC Value bit */
   14797 		wm_toggle_lanphypc_pch_lpt(sc);
   14798 
   14799 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14800 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14801 				break;
   14802 
   14803 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14804 			 * so ensure that the MAC is also out of SMBus mode
   14805 			 */
   14806 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14807 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14808 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14809 
   14810 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14811 				break;
   14812 			rv = -1;
   14813 		}
   14814 		break;
   14815 	default:
   14816 		break;
   14817 	}
   14818 
   14819 	/* Release semaphore */
   14820 	sc->phy.release(sc);
   14821 
   14822 	if (rv == 0) {
   14823 		/* Check to see if able to reset PHY.  Print error if not */
   14824 		if (wm_phy_resetisblocked(sc)) {
   14825 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14826 			goto out;
   14827 		}
   14828 
   14829 		/* Reset the PHY before any access to it.  Doing so, ensures
   14830 		 * that the PHY is in a known good state before we read/write
   14831 		 * PHY registers.  The generic reset is sufficient here,
   14832 		 * because we haven't determined the PHY type yet.
   14833 		 */
   14834 		if (wm_reset_phy(sc) != 0)
   14835 			goto out;
   14836 
   14837 		/* On a successful reset, possibly need to wait for the PHY
   14838 		 * to quiesce to an accessible state before returning control
   14839 		 * to the calling function.  If the PHY does not quiesce, then
   14840 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14841 		 *  the PHY is in.
   14842 		 */
   14843 		if (wm_phy_resetisblocked(sc))
   14844 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14845 	}
   14846 
   14847 out:
   14848 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14849 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14850 		delay(10*1000);
   14851 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14852 	}
   14853 
   14854 	return 0;
   14855 }
   14856 
   14857 static void
   14858 wm_init_manageability(struct wm_softc *sc)
   14859 {
   14860 
   14861 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14862 		device_xname(sc->sc_dev), __func__));
   14863 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14864 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14865 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14866 
   14867 		/* Disable hardware interception of ARP */
   14868 		manc &= ~MANC_ARP_EN;
   14869 
   14870 		/* Enable receiving management packets to the host */
   14871 		if (sc->sc_type >= WM_T_82571) {
   14872 			manc |= MANC_EN_MNG2HOST;
   14873 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14874 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14875 		}
   14876 
   14877 		CSR_WRITE(sc, WMREG_MANC, manc);
   14878 	}
   14879 }
   14880 
   14881 static void
   14882 wm_release_manageability(struct wm_softc *sc)
   14883 {
   14884 
   14885 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14886 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14887 
   14888 		manc |= MANC_ARP_EN;
   14889 		if (sc->sc_type >= WM_T_82571)
   14890 			manc &= ~MANC_EN_MNG2HOST;
   14891 
   14892 		CSR_WRITE(sc, WMREG_MANC, manc);
   14893 	}
   14894 }
   14895 
   14896 static void
   14897 wm_get_wakeup(struct wm_softc *sc)
   14898 {
   14899 
   14900 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14901 	switch (sc->sc_type) {
   14902 	case WM_T_82573:
   14903 	case WM_T_82583:
   14904 		sc->sc_flags |= WM_F_HAS_AMT;
   14905 		/* FALLTHROUGH */
   14906 	case WM_T_80003:
   14907 	case WM_T_82575:
   14908 	case WM_T_82576:
   14909 	case WM_T_82580:
   14910 	case WM_T_I350:
   14911 	case WM_T_I354:
   14912 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14913 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14914 		/* FALLTHROUGH */
   14915 	case WM_T_82541:
   14916 	case WM_T_82541_2:
   14917 	case WM_T_82547:
   14918 	case WM_T_82547_2:
   14919 	case WM_T_82571:
   14920 	case WM_T_82572:
   14921 	case WM_T_82574:
   14922 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14923 		break;
   14924 	case WM_T_ICH8:
   14925 	case WM_T_ICH9:
   14926 	case WM_T_ICH10:
   14927 	case WM_T_PCH:
   14928 	case WM_T_PCH2:
   14929 	case WM_T_PCH_LPT:
   14930 	case WM_T_PCH_SPT:
   14931 	case WM_T_PCH_CNP:
   14932 		sc->sc_flags |= WM_F_HAS_AMT;
   14933 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14934 		break;
   14935 	default:
   14936 		break;
   14937 	}
   14938 
   14939 	/* 1: HAS_MANAGE */
   14940 	if (wm_enable_mng_pass_thru(sc) != 0)
   14941 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14942 
   14943 	/*
   14944 	 * Note that the WOL flags is set after the resetting of the eeprom
   14945 	 * stuff
   14946 	 */
   14947 }
   14948 
   14949 /*
   14950  * Unconfigure Ultra Low Power mode.
   14951  * Only for I217 and newer (see below).
   14952  */
   14953 static int
   14954 wm_ulp_disable(struct wm_softc *sc)
   14955 {
   14956 	uint32_t reg;
   14957 	uint16_t phyreg;
   14958 	int i = 0, rv = 0;
   14959 
   14960 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14961 		device_xname(sc->sc_dev), __func__));
   14962 	/* Exclude old devices */
   14963 	if ((sc->sc_type < WM_T_PCH_LPT)
   14964 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14965 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14966 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14967 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14968 		return 0;
   14969 
   14970 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14971 		/* Request ME un-configure ULP mode in the PHY */
   14972 		reg = CSR_READ(sc, WMREG_H2ME);
   14973 		reg &= ~H2ME_ULP;
   14974 		reg |= H2ME_ENFORCE_SETTINGS;
   14975 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14976 
   14977 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14978 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14979 			if (i++ == 30) {
   14980 				device_printf(sc->sc_dev, "%s timed out\n",
   14981 				    __func__);
   14982 				return -1;
   14983 			}
   14984 			delay(10 * 1000);
   14985 		}
   14986 		reg = CSR_READ(sc, WMREG_H2ME);
   14987 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14988 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14989 
   14990 		return 0;
   14991 	}
   14992 
   14993 	/* Acquire semaphore */
   14994 	rv = sc->phy.acquire(sc);
   14995 	if (rv != 0) {
   14996 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14997 		device_xname(sc->sc_dev), __func__));
   14998 		return -1;
   14999 	}
   15000 
   15001 	/* Toggle LANPHYPC */
   15002 	wm_toggle_lanphypc_pch_lpt(sc);
   15003 
   15004 	/* Unforce SMBus mode in PHY */
   15005 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15006 	if (rv != 0) {
   15007 		uint32_t reg2;
   15008 
   15009 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15010 			__func__);
   15011 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15012 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15013 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15014 		delay(50 * 1000);
   15015 
   15016 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15017 		    &phyreg);
   15018 		if (rv != 0)
   15019 			goto release;
   15020 	}
   15021 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15022 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15023 
   15024 	/* Unforce SMBus mode in MAC */
   15025 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15026 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15027 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15028 
   15029 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15030 	if (rv != 0)
   15031 		goto release;
   15032 	phyreg |= HV_PM_CTRL_K1_ENA;
   15033 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15034 
   15035 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15036 		&phyreg);
   15037 	if (rv != 0)
   15038 		goto release;
   15039 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15040 	    | I218_ULP_CONFIG1_STICKY_ULP
   15041 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15042 	    | I218_ULP_CONFIG1_WOL_HOST
   15043 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15044 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15045 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15046 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15047 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15048 	phyreg |= I218_ULP_CONFIG1_START;
   15049 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15050 
   15051 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15052 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15053 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15054 
   15055 release:
   15056 	/* Release semaphore */
   15057 	sc->phy.release(sc);
   15058 	wm_gmii_reset(sc);
   15059 	delay(50 * 1000);
   15060 
   15061 	return rv;
   15062 }
   15063 
   15064 /* WOL in the newer chipset interfaces (pchlan) */
   15065 static int
   15066 wm_enable_phy_wakeup(struct wm_softc *sc)
   15067 {
   15068 	device_t dev = sc->sc_dev;
   15069 	uint32_t mreg, moff;
   15070 	uint16_t wuce, wuc, wufc, preg;
   15071 	int i, rv;
   15072 
   15073 	KASSERT(sc->sc_type >= WM_T_PCH);
   15074 
   15075 	/* Copy MAC RARs to PHY RARs */
   15076 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15077 
   15078 	/* Activate PHY wakeup */
   15079 	rv = sc->phy.acquire(sc);
   15080 	if (rv != 0) {
   15081 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15082 		    __func__);
   15083 		return rv;
   15084 	}
   15085 
   15086 	/*
   15087 	 * Enable access to PHY wakeup registers.
   15088 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15089 	 */
   15090 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15091 	if (rv != 0) {
   15092 		device_printf(dev,
   15093 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15094 		goto release;
   15095 	}
   15096 
   15097 	/* Copy MAC MTA to PHY MTA */
   15098 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15099 		uint16_t lo, hi;
   15100 
   15101 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15102 		lo = (uint16_t)(mreg & 0xffff);
   15103 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15104 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15105 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15106 	}
   15107 
   15108 	/* Configure PHY Rx Control register */
   15109 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15110 	mreg = CSR_READ(sc, WMREG_RCTL);
   15111 	if (mreg & RCTL_UPE)
   15112 		preg |= BM_RCTL_UPE;
   15113 	if (mreg & RCTL_MPE)
   15114 		preg |= BM_RCTL_MPE;
   15115 	preg &= ~(BM_RCTL_MO_MASK);
   15116 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15117 	if (moff != 0)
   15118 		preg |= moff << BM_RCTL_MO_SHIFT;
   15119 	if (mreg & RCTL_BAM)
   15120 		preg |= BM_RCTL_BAM;
   15121 	if (mreg & RCTL_PMCF)
   15122 		preg |= BM_RCTL_PMCF;
   15123 	mreg = CSR_READ(sc, WMREG_CTRL);
   15124 	if (mreg & CTRL_RFCE)
   15125 		preg |= BM_RCTL_RFCE;
   15126 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15127 
   15128 	wuc = WUC_APME | WUC_PME_EN;
   15129 	wufc = WUFC_MAG;
   15130 	/* Enable PHY wakeup in MAC register */
   15131 	CSR_WRITE(sc, WMREG_WUC,
   15132 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15133 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15134 
   15135 	/* Configure and enable PHY wakeup in PHY registers */
   15136 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15137 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15138 
   15139 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15140 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15141 
   15142 release:
   15143 	sc->phy.release(sc);
   15144 
   15145 	return 0;
   15146 }
   15147 
   15148 /* Power down workaround on D3 */
   15149 static void
   15150 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15151 {
   15152 	uint32_t reg;
   15153 	uint16_t phyreg;
   15154 	int i;
   15155 
   15156 	for (i = 0; i < 2; i++) {
   15157 		/* Disable link */
   15158 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15159 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15160 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15161 
   15162 		/*
   15163 		 * Call gig speed drop workaround on Gig disable before
   15164 		 * accessing any PHY registers
   15165 		 */
   15166 		if (sc->sc_type == WM_T_ICH8)
   15167 			wm_gig_downshift_workaround_ich8lan(sc);
   15168 
   15169 		/* Write VR power-down enable */
   15170 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15171 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15172 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15173 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15174 
   15175 		/* Read it back and test */
   15176 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15177 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15178 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15179 			break;
   15180 
   15181 		/* Issue PHY reset and repeat at most one more time */
   15182 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15183 	}
   15184 }
   15185 
   15186 /*
   15187  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15188  *  @sc: pointer to the HW structure
   15189  *
   15190  *  During S0 to Sx transition, it is possible the link remains at gig
   15191  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15192  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15193  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15194  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15195  *  needs to be written.
   15196  *  Parts that support (and are linked to a partner which support) EEE in
   15197  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15198  *  than 10Mbps w/o EEE.
   15199  */
   15200 static void
   15201 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15202 {
   15203 	device_t dev = sc->sc_dev;
   15204 	struct ethercom *ec = &sc->sc_ethercom;
   15205 	uint32_t phy_ctrl;
   15206 	int rv;
   15207 
   15208 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15209 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15210 
   15211 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   15212 
   15213 	if (sc->sc_phytype == WMPHY_I217) {
   15214 		uint16_t devid = sc->sc_pcidevid;
   15215 
   15216 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   15217 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   15218 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   15219 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   15220 		    (sc->sc_type >= WM_T_PCH_SPT))
   15221 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   15222 			    CSR_READ(sc, WMREG_FEXTNVM6)
   15223 			    & ~FEXTNVM6_REQ_PLL_CLK);
   15224 
   15225 		if (sc->phy.acquire(sc) != 0)
   15226 			goto out;
   15227 
   15228 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15229 			uint16_t eee_advert;
   15230 
   15231 			rv = wm_read_emi_reg_locked(dev,
   15232 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   15233 			if (rv)
   15234 				goto release;
   15235 
   15236 			/*
   15237 			 * Disable LPLU if both link partners support 100BaseT
   15238 			 * EEE and 100Full is advertised on both ends of the
   15239 			 * link, and enable Auto Enable LPI since there will
   15240 			 * be no driver to enable LPI while in Sx.
   15241 			 */
   15242 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   15243 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   15244 				uint16_t anar, phy_reg;
   15245 
   15246 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   15247 				    &anar);
   15248 				if (anar & ANAR_TX_FD) {
   15249 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   15250 					    PHY_CTRL_NOND0A_LPLU);
   15251 
   15252 					/* Set Auto Enable LPI after link up */
   15253 					sc->phy.readreg_locked(dev, 2,
   15254 					    I217_LPI_GPIO_CTRL, &phy_reg);
   15255 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15256 					sc->phy.writereg_locked(dev, 2,
   15257 					    I217_LPI_GPIO_CTRL, phy_reg);
   15258 				}
   15259 			}
   15260 		}
   15261 
   15262 		/*
   15263 		 * For i217 Intel Rapid Start Technology support,
   15264 		 * when the system is going into Sx and no manageability engine
   15265 		 * is present, the driver must configure proxy to reset only on
   15266 		 * power good.	LPI (Low Power Idle) state must also reset only
   15267 		 * on power good, as well as the MTA (Multicast table array).
   15268 		 * The SMBus release must also be disabled on LCD reset.
   15269 		 */
   15270 
   15271 		/*
   15272 		 * Enable MTA to reset for Intel Rapid Start Technology
   15273 		 * Support
   15274 		 */
   15275 
   15276 release:
   15277 		sc->phy.release(sc);
   15278 	}
   15279 out:
   15280 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   15281 
   15282 	if (sc->sc_type == WM_T_ICH8)
   15283 		wm_gig_downshift_workaround_ich8lan(sc);
   15284 
   15285 	if (sc->sc_type >= WM_T_PCH) {
   15286 		wm_oem_bits_config_ich8lan(sc, false);
   15287 
   15288 		/* Reset PHY to activate OEM bits on 82577/8 */
   15289 		if (sc->sc_type == WM_T_PCH)
   15290 			wm_reset_phy(sc);
   15291 
   15292 		if (sc->phy.acquire(sc) != 0)
   15293 			return;
   15294 		wm_write_smbus_addr(sc);
   15295 		sc->phy.release(sc);
   15296 	}
   15297 }
   15298 
   15299 /*
   15300  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   15301  *  @sc: pointer to the HW structure
   15302  *
   15303  *  During Sx to S0 transitions on non-managed devices or managed devices
   15304  *  on which PHY resets are not blocked, if the PHY registers cannot be
   15305  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   15306  *  the PHY.
   15307  *  On i217, setup Intel Rapid Start Technology.
   15308  */
   15309 static int
   15310 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   15311 {
   15312 	device_t dev = sc->sc_dev;
   15313 	int rv;
   15314 
   15315 	if (sc->sc_type < WM_T_PCH2)
   15316 		return 0;
   15317 
   15318 	rv = wm_init_phy_workarounds_pchlan(sc);
   15319 	if (rv != 0)
   15320 		return -1;
   15321 
   15322 	/* For i217 Intel Rapid Start Technology support when the system
   15323 	 * is transitioning from Sx and no manageability engine is present
   15324 	 * configure SMBus to restore on reset, disable proxy, and enable
   15325 	 * the reset on MTA (Multicast table array).
   15326 	 */
   15327 	if (sc->sc_phytype == WMPHY_I217) {
   15328 		uint16_t phy_reg;
   15329 
   15330 		if (sc->phy.acquire(sc) != 0)
   15331 			return -1;
   15332 
   15333 		/* Clear Auto Enable LPI after link up */
   15334 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15335 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15336 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15337 
   15338 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15339 			/* Restore clear on SMB if no manageability engine
   15340 			 * is present
   15341 			 */
   15342 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15343 			    &phy_reg);
   15344 			if (rv != 0)
   15345 				goto release;
   15346 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15347 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15348 
   15349 			/* Disable Proxy */
   15350 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15351 		}
   15352 		/* Enable reset on MTA */
   15353 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15354 		if (rv != 0)
   15355 			goto release;
   15356 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15357 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15358 
   15359 release:
   15360 		sc->phy.release(sc);
   15361 		return rv;
   15362 	}
   15363 
   15364 	return 0;
   15365 }
   15366 
   15367 static void
   15368 wm_enable_wakeup(struct wm_softc *sc)
   15369 {
   15370 	uint32_t reg, pmreg;
   15371 	pcireg_t pmode;
   15372 	int rv = 0;
   15373 
   15374 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15375 		device_xname(sc->sc_dev), __func__));
   15376 
   15377 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15378 	    &pmreg, NULL) == 0)
   15379 		return;
   15380 
   15381 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15382 		goto pme;
   15383 
   15384 	/* Advertise the wakeup capability */
   15385 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15386 	    | CTRL_SWDPIN(3));
   15387 
   15388 	/* Keep the laser running on fiber adapters */
   15389 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15390 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15391 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15392 		reg |= CTRL_EXT_SWDPIN(3);
   15393 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15394 	}
   15395 
   15396 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15397 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15398 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15399 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15400 		wm_suspend_workarounds_ich8lan(sc);
   15401 
   15402 #if 0	/* For the multicast packet */
   15403 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15404 	reg |= WUFC_MC;
   15405 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15406 #endif
   15407 
   15408 	if (sc->sc_type >= WM_T_PCH) {
   15409 		rv = wm_enable_phy_wakeup(sc);
   15410 		if (rv != 0)
   15411 			goto pme;
   15412 	} else {
   15413 		/* Enable wakeup by the MAC */
   15414 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15415 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15416 	}
   15417 
   15418 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15419 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15420 		|| (sc->sc_type == WM_T_PCH2))
   15421 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15422 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15423 
   15424 pme:
   15425 	/* Request PME */
   15426 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15427 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   15428 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15429 		/* For WOL */
   15430 		pmode |= PCI_PMCSR_PME_EN;
   15431 	} else {
   15432 		/* Disable WOL */
   15433 		pmode &= ~PCI_PMCSR_PME_EN;
   15434 	}
   15435 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15436 }
   15437 
   15438 /* Disable ASPM L0s and/or L1 for workaround */
   15439 static void
   15440 wm_disable_aspm(struct wm_softc *sc)
   15441 {
   15442 	pcireg_t reg, mask = 0;
   15443 	unsigned const char *str = "";
   15444 
   15445 	/*
   15446 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15447 	 * space.
   15448 	 */
   15449 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15450 		return;
   15451 
   15452 	switch (sc->sc_type) {
   15453 	case WM_T_82571:
   15454 	case WM_T_82572:
   15455 		/*
   15456 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15457 		 * State Power management L1 State (ASPM L1).
   15458 		 */
   15459 		mask = PCIE_LCSR_ASPM_L1;
   15460 		str = "L1 is";
   15461 		break;
   15462 	case WM_T_82573:
   15463 	case WM_T_82574:
   15464 	case WM_T_82583:
   15465 		/*
   15466 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15467 		 *
   15468 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15469 		 * some chipset.  The document of 82574 and 82583 says that
   15470 		 * disabling L0s with some specific chipset is sufficient,
   15471 		 * but we follow as of the Intel em driver does.
   15472 		 *
   15473 		 * References:
   15474 		 * Errata 8 of the Specification Update of i82573.
   15475 		 * Errata 20 of the Specification Update of i82574.
   15476 		 * Errata 9 of the Specification Update of i82583.
   15477 		 */
   15478 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15479 		str = "L0s and L1 are";
   15480 		break;
   15481 	default:
   15482 		return;
   15483 	}
   15484 
   15485 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15486 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15487 	reg &= ~mask;
   15488 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15489 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15490 
   15491 	/* Print only in wm_attach() */
   15492 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15493 		aprint_verbose_dev(sc->sc_dev,
   15494 		    "ASPM %s disabled to workaround the errata.\n", str);
   15495 }
   15496 
   15497 /* LPLU */
   15498 
   15499 static void
   15500 wm_lplu_d0_disable(struct wm_softc *sc)
   15501 {
   15502 	struct mii_data *mii = &sc->sc_mii;
   15503 	uint32_t reg;
   15504 	uint16_t phyval;
   15505 
   15506 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15507 		device_xname(sc->sc_dev), __func__));
   15508 
   15509 	if (sc->sc_phytype == WMPHY_IFE)
   15510 		return;
   15511 
   15512 	switch (sc->sc_type) {
   15513 	case WM_T_82571:
   15514 	case WM_T_82572:
   15515 	case WM_T_82573:
   15516 	case WM_T_82575:
   15517 	case WM_T_82576:
   15518 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   15519 		phyval &= ~PMR_D0_LPLU;
   15520 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   15521 		break;
   15522 	case WM_T_82580:
   15523 	case WM_T_I350:
   15524 	case WM_T_I210:
   15525 	case WM_T_I211:
   15526 		reg = CSR_READ(sc, WMREG_PHPM);
   15527 		reg &= ~PHPM_D0A_LPLU;
   15528 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15529 		break;
   15530 	case WM_T_82574:
   15531 	case WM_T_82583:
   15532 	case WM_T_ICH8:
   15533 	case WM_T_ICH9:
   15534 	case WM_T_ICH10:
   15535 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15536 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15537 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15538 		CSR_WRITE_FLUSH(sc);
   15539 		break;
   15540 	case WM_T_PCH:
   15541 	case WM_T_PCH2:
   15542 	case WM_T_PCH_LPT:
   15543 	case WM_T_PCH_SPT:
   15544 	case WM_T_PCH_CNP:
   15545 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15546 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15547 		if (wm_phy_resetisblocked(sc) == false)
   15548 			phyval |= HV_OEM_BITS_ANEGNOW;
   15549 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15550 		break;
   15551 	default:
   15552 		break;
   15553 	}
   15554 }
   15555 
   15556 /* EEE */
   15557 
   15558 static int
   15559 wm_set_eee_i350(struct wm_softc *sc)
   15560 {
   15561 	struct ethercom *ec = &sc->sc_ethercom;
   15562 	uint32_t ipcnfg, eeer;
   15563 	uint32_t ipcnfg_mask
   15564 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15565 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15566 
   15567 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15568 
   15569 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15570 	eeer = CSR_READ(sc, WMREG_EEER);
   15571 
   15572 	/* Enable or disable per user setting */
   15573 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15574 		ipcnfg |= ipcnfg_mask;
   15575 		eeer |= eeer_mask;
   15576 	} else {
   15577 		ipcnfg &= ~ipcnfg_mask;
   15578 		eeer &= ~eeer_mask;
   15579 	}
   15580 
   15581 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15582 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15583 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15584 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15585 
   15586 	return 0;
   15587 }
   15588 
   15589 static int
   15590 wm_set_eee_pchlan(struct wm_softc *sc)
   15591 {
   15592 	device_t dev = sc->sc_dev;
   15593 	struct ethercom *ec = &sc->sc_ethercom;
   15594 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15595 	int rv = 0;
   15596 
   15597 	switch (sc->sc_phytype) {
   15598 	case WMPHY_82579:
   15599 		lpa = I82579_EEE_LP_ABILITY;
   15600 		pcs_status = I82579_EEE_PCS_STATUS;
   15601 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15602 		break;
   15603 	case WMPHY_I217:
   15604 		lpa = I217_EEE_LP_ABILITY;
   15605 		pcs_status = I217_EEE_PCS_STATUS;
   15606 		adv_addr = I217_EEE_ADVERTISEMENT;
   15607 		break;
   15608 	default:
   15609 		return 0;
   15610 	}
   15611 
   15612 	if (sc->phy.acquire(sc)) {
   15613 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15614 		return 0;
   15615 	}
   15616 
   15617 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15618 	if (rv != 0)
   15619 		goto release;
   15620 
   15621 	/* Clear bits that enable EEE in various speeds */
   15622 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15623 
   15624 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15625 		/* Save off link partner's EEE ability */
   15626 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15627 		if (rv != 0)
   15628 			goto release;
   15629 
   15630 		/* Read EEE advertisement */
   15631 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15632 			goto release;
   15633 
   15634 		/*
   15635 		 * Enable EEE only for speeds in which the link partner is
   15636 		 * EEE capable and for which we advertise EEE.
   15637 		 */
   15638 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15639 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15640 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15641 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15642 			if ((data & ANLPAR_TX_FD) != 0)
   15643 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15644 			else {
   15645 				/*
   15646 				 * EEE is not supported in 100Half, so ignore
   15647 				 * partner's EEE in 100 ability if full-duplex
   15648 				 * is not advertised.
   15649 				 */
   15650 				sc->eee_lp_ability
   15651 				    &= ~AN_EEEADVERT_100_TX;
   15652 			}
   15653 		}
   15654 	}
   15655 
   15656 	if (sc->sc_phytype == WMPHY_82579) {
   15657 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15658 		if (rv != 0)
   15659 			goto release;
   15660 
   15661 		data &= ~I82579_LPI_PLL_SHUT_100;
   15662 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15663 	}
   15664 
   15665 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15666 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15667 		goto release;
   15668 
   15669 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15670 release:
   15671 	sc->phy.release(sc);
   15672 
   15673 	return rv;
   15674 }
   15675 
   15676 static int
   15677 wm_set_eee(struct wm_softc *sc)
   15678 {
   15679 	struct ethercom *ec = &sc->sc_ethercom;
   15680 
   15681 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15682 		return 0;
   15683 
   15684 	if (sc->sc_type == WM_T_I354) {
   15685 		/* I354 uses an external PHY */
   15686 		return 0; /* not yet */
   15687 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15688 		return wm_set_eee_i350(sc);
   15689 	else if (sc->sc_type >= WM_T_PCH2)
   15690 		return wm_set_eee_pchlan(sc);
   15691 
   15692 	return 0;
   15693 }
   15694 
   15695 /*
   15696  * Workarounds (mainly PHY related).
   15697  * Basically, PHY's workarounds are in the PHY drivers.
   15698  */
   15699 
   15700 /* Work-around for 82566 Kumeran PCS lock loss */
   15701 static int
   15702 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15703 {
   15704 	struct mii_data *mii = &sc->sc_mii;
   15705 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15706 	int i, reg, rv;
   15707 	uint16_t phyreg;
   15708 
   15709 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15710 		device_xname(sc->sc_dev), __func__));
   15711 
   15712 	/* If the link is not up, do nothing */
   15713 	if ((status & STATUS_LU) == 0)
   15714 		return 0;
   15715 
   15716 	/* Nothing to do if the link is other than 1Gbps */
   15717 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15718 		return 0;
   15719 
   15720 	for (i = 0; i < 10; i++) {
   15721 		/* read twice */
   15722 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15723 		if (rv != 0)
   15724 			return rv;
   15725 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15726 		if (rv != 0)
   15727 			return rv;
   15728 
   15729 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15730 			goto out;	/* GOOD! */
   15731 
   15732 		/* Reset the PHY */
   15733 		wm_reset_phy(sc);
   15734 		delay(5*1000);
   15735 	}
   15736 
   15737 	/* Disable GigE link negotiation */
   15738 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15739 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15740 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15741 
   15742 	/*
   15743 	 * Call gig speed drop workaround on Gig disable before accessing
   15744 	 * any PHY registers.
   15745 	 */
   15746 	wm_gig_downshift_workaround_ich8lan(sc);
   15747 
   15748 out:
   15749 	return 0;
   15750 }
   15751 
   15752 /*
   15753  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15754  *  @sc: pointer to the HW structure
   15755  *
   15756  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15757  *  LPLU, Gig disable, MDIC PHY reset):
   15758  *    1) Set Kumeran Near-end loopback
   15759  *    2) Clear Kumeran Near-end loopback
   15760  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15761  */
   15762 static void
   15763 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15764 {
   15765 	uint16_t kmreg;
   15766 
   15767 	/* Only for igp3 */
   15768 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15769 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15770 			return;
   15771 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15772 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15773 			return;
   15774 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15775 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15776 	}
   15777 }
   15778 
   15779 /*
   15780  * Workaround for pch's PHYs
   15781  * XXX should be moved to new PHY driver?
   15782  */
   15783 static int
   15784 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15785 {
   15786 	device_t dev = sc->sc_dev;
   15787 	struct mii_data *mii = &sc->sc_mii;
   15788 	struct mii_softc *child;
   15789 	uint16_t phy_data, phyrev = 0;
   15790 	int phytype = sc->sc_phytype;
   15791 	int rv;
   15792 
   15793 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15794 		device_xname(dev), __func__));
   15795 	KASSERT(sc->sc_type == WM_T_PCH);
   15796 
   15797 	/* Set MDIO slow mode before any other MDIO access */
   15798 	if (phytype == WMPHY_82577)
   15799 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15800 			return rv;
   15801 
   15802 	child = LIST_FIRST(&mii->mii_phys);
   15803 	if (child != NULL)
   15804 		phyrev = child->mii_mpd_rev;
   15805 
   15806 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15807 	if ((child != NULL) &&
   15808 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   15809 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   15810 		/* Disable generation of early preamble (0x4431) */
   15811 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15812 		    &phy_data);
   15813 		if (rv != 0)
   15814 			return rv;
   15815 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   15816 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   15817 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15818 		    phy_data);
   15819 		if (rv != 0)
   15820 			return rv;
   15821 
   15822 		/* Preamble tuning for SSC */
   15823 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   15824 		if (rv != 0)
   15825 			return rv;
   15826 	}
   15827 
   15828 	/* 82578 */
   15829 	if (phytype == WMPHY_82578) {
   15830 		/*
   15831 		 * Return registers to default by doing a soft reset then
   15832 		 * writing 0x3140 to the control register
   15833 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15834 		 */
   15835 		if ((child != NULL) && (phyrev < 2)) {
   15836 			PHY_RESET(child);
   15837 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   15838 			if (rv != 0)
   15839 				return rv;
   15840 		}
   15841 	}
   15842 
   15843 	/* Select page 0 */
   15844 	if ((rv = sc->phy.acquire(sc)) != 0)
   15845 		return rv;
   15846 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   15847 	sc->phy.release(sc);
   15848 	if (rv != 0)
   15849 		return rv;
   15850 
   15851 	/*
   15852 	 * Configure the K1 Si workaround during phy reset assuming there is
   15853 	 * link so that it disables K1 if link is in 1Gbps.
   15854 	 */
   15855 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15856 		return rv;
   15857 
   15858 	/* Workaround for link disconnects on a busy hub in half duplex */
   15859 	rv = sc->phy.acquire(sc);
   15860 	if (rv)
   15861 		return rv;
   15862 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   15863 	if (rv)
   15864 		goto release;
   15865 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   15866 	    phy_data & 0x00ff);
   15867 	if (rv)
   15868 		goto release;
   15869 
   15870 	/* Set MSE higher to enable link to stay up when noise is high */
   15871 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   15872 release:
   15873 	sc->phy.release(sc);
   15874 
   15875 	return rv;
   15876 }
   15877 
   15878 /*
   15879  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15880  *  @sc:   pointer to the HW structure
   15881  */
   15882 static void
   15883 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15884 {
   15885 	device_t dev = sc->sc_dev;
   15886 	uint32_t mac_reg;
   15887 	uint16_t i, wuce;
   15888 	int count;
   15889 
   15890 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15891 		device_xname(sc->sc_dev), __func__));
   15892 
   15893 	if (sc->phy.acquire(sc) != 0)
   15894 		return;
   15895 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15896 		goto release;
   15897 
   15898 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15899 	count = wm_rar_count(sc);
   15900 	for (i = 0; i < count; i++) {
   15901 		uint16_t lo, hi;
   15902 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15903 		lo = (uint16_t)(mac_reg & 0xffff);
   15904 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15905 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15906 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15907 
   15908 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15909 		lo = (uint16_t)(mac_reg & 0xffff);
   15910 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15911 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15912 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15913 	}
   15914 
   15915 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15916 
   15917 release:
   15918 	sc->phy.release(sc);
   15919 }
   15920 
   15921 /*
   15922  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15923  *  done after every PHY reset.
   15924  */
   15925 static int
   15926 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15927 {
   15928 	device_t dev = sc->sc_dev;
   15929 	int rv;
   15930 
   15931 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15932 		device_xname(dev), __func__));
   15933 	KASSERT(sc->sc_type == WM_T_PCH2);
   15934 
   15935 	/* Set MDIO slow mode before any other MDIO access */
   15936 	rv = wm_set_mdio_slow_mode_hv(sc);
   15937 	if (rv != 0)
   15938 		return rv;
   15939 
   15940 	rv = sc->phy.acquire(sc);
   15941 	if (rv != 0)
   15942 		return rv;
   15943 	/* Set MSE higher to enable link to stay up when noise is high */
   15944 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   15945 	if (rv != 0)
   15946 		goto release;
   15947 	/* Drop link after 5 times MSE threshold was reached */
   15948 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   15949 release:
   15950 	sc->phy.release(sc);
   15951 
   15952 	return rv;
   15953 }
   15954 
   15955 /**
   15956  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15957  *  @link: link up bool flag
   15958  *
   15959  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15960  *  preventing further DMA write requests.  Workaround the issue by disabling
   15961  *  the de-assertion of the clock request when in 1Gpbs mode.
   15962  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15963  *  speeds in order to avoid Tx hangs.
   15964  **/
   15965 static int
   15966 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15967 {
   15968 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15969 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15970 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15971 	uint16_t phyreg;
   15972 
   15973 	if (link && (speed == STATUS_SPEED_1000)) {
   15974 		sc->phy.acquire(sc);
   15975 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15976 		    &phyreg);
   15977 		if (rv != 0)
   15978 			goto release;
   15979 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15980 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15981 		if (rv != 0)
   15982 			goto release;
   15983 		delay(20);
   15984 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15985 
   15986 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15987 		    &phyreg);
   15988 release:
   15989 		sc->phy.release(sc);
   15990 		return rv;
   15991 	}
   15992 
   15993 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15994 
   15995 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15996 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15997 	    || !link
   15998 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15999 		goto update_fextnvm6;
   16000 
   16001 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   16002 
   16003 	/* Clear link status transmit timeout */
   16004 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   16005 	if (speed == STATUS_SPEED_100) {
   16006 		/* Set inband Tx timeout to 5x10us for 100Half */
   16007 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16008 
   16009 		/* Do not extend the K1 entry latency for 100Half */
   16010 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16011 	} else {
   16012 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   16013 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16014 
   16015 		/* Extend the K1 entry latency for 10 Mbps */
   16016 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16017 	}
   16018 
   16019 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   16020 
   16021 update_fextnvm6:
   16022 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   16023 	return 0;
   16024 }
   16025 
   16026 /*
   16027  *  wm_k1_gig_workaround_hv - K1 Si workaround
   16028  *  @sc:   pointer to the HW structure
   16029  *  @link: link up bool flag
   16030  *
   16031  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   16032  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   16033  *  If link is down, the function will restore the default K1 setting located
   16034  *  in the NVM.
   16035  */
   16036 static int
   16037 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   16038 {
   16039 	int k1_enable = sc->sc_nvm_k1_enabled;
   16040 
   16041 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16042 		device_xname(sc->sc_dev), __func__));
   16043 
   16044 	if (sc->phy.acquire(sc) != 0)
   16045 		return -1;
   16046 
   16047 	if (link) {
   16048 		k1_enable = 0;
   16049 
   16050 		/* Link stall fix for link up */
   16051 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16052 		    0x0100);
   16053 	} else {
   16054 		/* Link stall fix for link down */
   16055 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16056 		    0x4100);
   16057 	}
   16058 
   16059 	wm_configure_k1_ich8lan(sc, k1_enable);
   16060 	sc->phy.release(sc);
   16061 
   16062 	return 0;
   16063 }
   16064 
   16065 /*
   16066  *  wm_k1_workaround_lv - K1 Si workaround
   16067  *  @sc:   pointer to the HW structure
   16068  *
   16069  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   16070  *  Disable K1 for 1000 and 100 speeds
   16071  */
   16072 static int
   16073 wm_k1_workaround_lv(struct wm_softc *sc)
   16074 {
   16075 	uint32_t reg;
   16076 	uint16_t phyreg;
   16077 	int rv;
   16078 
   16079 	if (sc->sc_type != WM_T_PCH2)
   16080 		return 0;
   16081 
   16082 	/* Set K1 beacon duration based on 10Mbps speed */
   16083 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   16084 	if (rv != 0)
   16085 		return rv;
   16086 
   16087 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   16088 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   16089 		if (phyreg &
   16090 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   16091 			/* LV 1G/100 Packet drop issue wa  */
   16092 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   16093 			    &phyreg);
   16094 			if (rv != 0)
   16095 				return rv;
   16096 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   16097 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   16098 			    phyreg);
   16099 			if (rv != 0)
   16100 				return rv;
   16101 		} else {
   16102 			/* For 10Mbps */
   16103 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   16104 			reg &= ~FEXTNVM4_BEACON_DURATION;
   16105 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   16106 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   16107 		}
   16108 	}
   16109 
   16110 	return 0;
   16111 }
   16112 
   16113 /*
   16114  *  wm_link_stall_workaround_hv - Si workaround
   16115  *  @sc: pointer to the HW structure
   16116  *
   16117  *  This function works around a Si bug where the link partner can get
   16118  *  a link up indication before the PHY does. If small packets are sent
   16119  *  by the link partner they can be placed in the packet buffer without
   16120  *  being properly accounted for by the PHY and will stall preventing
   16121  *  further packets from being received.  The workaround is to clear the
   16122  *  packet buffer after the PHY detects link up.
   16123  */
   16124 static int
   16125 wm_link_stall_workaround_hv(struct wm_softc *sc)
   16126 {
   16127 	uint16_t phyreg;
   16128 
   16129 	if (sc->sc_phytype != WMPHY_82578)
   16130 		return 0;
   16131 
   16132 	/* Do not apply workaround if in PHY loopback bit 14 set */
   16133 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   16134 	if ((phyreg & BMCR_LOOP) != 0)
   16135 		return 0;
   16136 
   16137 	/* Check if link is up and at 1Gbps */
   16138 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   16139 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16140 	    | BM_CS_STATUS_SPEED_MASK;
   16141 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16142 		| BM_CS_STATUS_SPEED_1000))
   16143 		return 0;
   16144 
   16145 	delay(200 * 1000);	/* XXX too big */
   16146 
   16147 	/* Flush the packets in the fifo buffer */
   16148 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16149 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   16150 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16151 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   16152 
   16153 	return 0;
   16154 }
   16155 
   16156 static int
   16157 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   16158 {
   16159 	int rv;
   16160 	uint16_t reg;
   16161 
   16162 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   16163 	if (rv != 0)
   16164 		return rv;
   16165 
   16166 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   16167 	    reg | HV_KMRN_MDIO_SLOW);
   16168 }
   16169 
   16170 /*
   16171  *  wm_configure_k1_ich8lan - Configure K1 power state
   16172  *  @sc: pointer to the HW structure
   16173  *  @enable: K1 state to configure
   16174  *
   16175  *  Configure the K1 power state based on the provided parameter.
   16176  *  Assumes semaphore already acquired.
   16177  */
   16178 static void
   16179 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   16180 {
   16181 	uint32_t ctrl, ctrl_ext, tmp;
   16182 	uint16_t kmreg;
   16183 	int rv;
   16184 
   16185 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16186 
   16187 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   16188 	if (rv != 0)
   16189 		return;
   16190 
   16191 	if (k1_enable)
   16192 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   16193 	else
   16194 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   16195 
   16196 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   16197 	if (rv != 0)
   16198 		return;
   16199 
   16200 	delay(20);
   16201 
   16202 	ctrl = CSR_READ(sc, WMREG_CTRL);
   16203 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   16204 
   16205 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   16206 	tmp |= CTRL_FRCSPD;
   16207 
   16208 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   16209 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   16210 	CSR_WRITE_FLUSH(sc);
   16211 	delay(20);
   16212 
   16213 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   16214 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   16215 	CSR_WRITE_FLUSH(sc);
   16216 	delay(20);
   16217 
   16218 	return;
   16219 }
   16220 
   16221 /* special case - for 82575 - need to do manual init ... */
   16222 static void
   16223 wm_reset_init_script_82575(struct wm_softc *sc)
   16224 {
   16225 	/*
   16226 	 * Remark: this is untested code - we have no board without EEPROM
   16227 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   16228 	 */
   16229 
   16230 	/* SerDes configuration via SERDESCTRL */
   16231 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   16232 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   16233 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   16234 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   16235 
   16236 	/* CCM configuration via CCMCTL register */
   16237 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   16238 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   16239 
   16240 	/* PCIe lanes configuration */
   16241 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   16242 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   16243 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   16244 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   16245 
   16246 	/* PCIe PLL Configuration */
   16247 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   16248 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   16249 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   16250 }
   16251 
   16252 static void
   16253 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   16254 {
   16255 	uint32_t reg;
   16256 	uint16_t nvmword;
   16257 	int rv;
   16258 
   16259 	if (sc->sc_type != WM_T_82580)
   16260 		return;
   16261 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   16262 		return;
   16263 
   16264 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   16265 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   16266 	if (rv != 0) {
   16267 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   16268 		    __func__);
   16269 		return;
   16270 	}
   16271 
   16272 	reg = CSR_READ(sc, WMREG_MDICNFG);
   16273 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   16274 		reg |= MDICNFG_DEST;
   16275 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   16276 		reg |= MDICNFG_COM_MDIO;
   16277 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16278 }
   16279 
   16280 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   16281 
   16282 static bool
   16283 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   16284 {
   16285 	uint32_t reg;
   16286 	uint16_t id1, id2;
   16287 	int i, rv;
   16288 
   16289 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16290 		device_xname(sc->sc_dev), __func__));
   16291 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16292 
   16293 	id1 = id2 = 0xffff;
   16294 	for (i = 0; i < 2; i++) {
   16295 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   16296 		    &id1);
   16297 		if ((rv != 0) || MII_INVALIDID(id1))
   16298 			continue;
   16299 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   16300 		    &id2);
   16301 		if ((rv != 0) || MII_INVALIDID(id2))
   16302 			continue;
   16303 		break;
   16304 	}
   16305 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   16306 		goto out;
   16307 
   16308 	/*
   16309 	 * In case the PHY needs to be in mdio slow mode,
   16310 	 * set slow mode and try to get the PHY id again.
   16311 	 */
   16312 	rv = 0;
   16313 	if (sc->sc_type < WM_T_PCH_LPT) {
   16314 		sc->phy.release(sc);
   16315 		wm_set_mdio_slow_mode_hv(sc);
   16316 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   16317 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   16318 		sc->phy.acquire(sc);
   16319 	}
   16320 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   16321 		device_printf(sc->sc_dev, "XXX return with false\n");
   16322 		return false;
   16323 	}
   16324 out:
   16325 	if (sc->sc_type >= WM_T_PCH_LPT) {
   16326 		/* Only unforce SMBus if ME is not active */
   16327 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16328 			uint16_t phyreg;
   16329 
   16330 			/* Unforce SMBus mode in PHY */
   16331 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   16332 			    CV_SMB_CTRL, &phyreg);
   16333 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16334 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16335 			    CV_SMB_CTRL, phyreg);
   16336 
   16337 			/* Unforce SMBus mode in MAC */
   16338 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16339 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16340 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16341 		}
   16342 	}
   16343 	return true;
   16344 }
   16345 
   16346 static void
   16347 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16348 {
   16349 	uint32_t reg;
   16350 	int i;
   16351 
   16352 	/* Set PHY Config Counter to 50msec */
   16353 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16354 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16355 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16356 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16357 
   16358 	/* Toggle LANPHYPC */
   16359 	reg = CSR_READ(sc, WMREG_CTRL);
   16360 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16361 	reg &= ~CTRL_LANPHYPC_VALUE;
   16362 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16363 	CSR_WRITE_FLUSH(sc);
   16364 	delay(1000);
   16365 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16366 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16367 	CSR_WRITE_FLUSH(sc);
   16368 
   16369 	if (sc->sc_type < WM_T_PCH_LPT)
   16370 		delay(50 * 1000);
   16371 	else {
   16372 		i = 20;
   16373 
   16374 		do {
   16375 			delay(5 * 1000);
   16376 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16377 		    && i--);
   16378 
   16379 		delay(30 * 1000);
   16380 	}
   16381 }
   16382 
   16383 static int
   16384 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16385 {
   16386 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16387 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16388 	uint32_t rxa;
   16389 	uint16_t scale = 0, lat_enc = 0;
   16390 	int32_t obff_hwm = 0;
   16391 	int64_t lat_ns, value;
   16392 
   16393 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16394 		device_xname(sc->sc_dev), __func__));
   16395 
   16396 	if (link) {
   16397 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16398 		uint32_t status;
   16399 		uint16_t speed;
   16400 		pcireg_t preg;
   16401 
   16402 		status = CSR_READ(sc, WMREG_STATUS);
   16403 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16404 		case STATUS_SPEED_10:
   16405 			speed = 10;
   16406 			break;
   16407 		case STATUS_SPEED_100:
   16408 			speed = 100;
   16409 			break;
   16410 		case STATUS_SPEED_1000:
   16411 			speed = 1000;
   16412 			break;
   16413 		default:
   16414 			device_printf(sc->sc_dev, "Unknown speed "
   16415 			    "(status = %08x)\n", status);
   16416 			return -1;
   16417 		}
   16418 
   16419 		/* Rx Packet Buffer Allocation size (KB) */
   16420 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16421 
   16422 		/*
   16423 		 * Determine the maximum latency tolerated by the device.
   16424 		 *
   16425 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16426 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16427 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16428 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16429 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16430 		 */
   16431 		lat_ns = ((int64_t)rxa * 1024 -
   16432 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16433 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16434 		if (lat_ns < 0)
   16435 			lat_ns = 0;
   16436 		else
   16437 			lat_ns /= speed;
   16438 		value = lat_ns;
   16439 
   16440 		while (value > LTRV_VALUE) {
   16441 			scale ++;
   16442 			value = howmany(value, __BIT(5));
   16443 		}
   16444 		if (scale > LTRV_SCALE_MAX) {
   16445 			device_printf(sc->sc_dev,
   16446 			    "Invalid LTR latency scale %d\n", scale);
   16447 			return -1;
   16448 		}
   16449 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16450 
   16451 		/* Determine the maximum latency tolerated by the platform */
   16452 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16453 		    WM_PCI_LTR_CAP_LPT);
   16454 		max_snoop = preg & 0xffff;
   16455 		max_nosnoop = preg >> 16;
   16456 
   16457 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16458 
   16459 		if (lat_enc > max_ltr_enc) {
   16460 			lat_enc = max_ltr_enc;
   16461 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16462 			    * PCI_LTR_SCALETONS(
   16463 				    __SHIFTOUT(lat_enc,
   16464 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16465 		}
   16466 
   16467 		if (lat_ns) {
   16468 			lat_ns *= speed * 1000;
   16469 			lat_ns /= 8;
   16470 			lat_ns /= 1000000000;
   16471 			obff_hwm = (int32_t)(rxa - lat_ns);
   16472 		}
   16473 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16474 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16475 			    "(rxa = %d, lat_ns = %d)\n",
   16476 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16477 			return -1;
   16478 		}
   16479 	}
   16480 	/* Snoop and No-Snoop latencies the same */
   16481 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16482 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16483 
   16484 	/* Set OBFF high water mark */
   16485 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16486 	reg |= obff_hwm;
   16487 	CSR_WRITE(sc, WMREG_SVT, reg);
   16488 
   16489 	/* Enable OBFF */
   16490 	reg = CSR_READ(sc, WMREG_SVCR);
   16491 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16492 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16493 
   16494 	return 0;
   16495 }
   16496 
   16497 /*
   16498  * I210 Errata 25 and I211 Errata 10
   16499  * Slow System Clock.
   16500  *
   16501  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   16502  */
   16503 static int
   16504 wm_pll_workaround_i210(struct wm_softc *sc)
   16505 {
   16506 	uint32_t mdicnfg, wuc;
   16507 	uint32_t reg;
   16508 	pcireg_t pcireg;
   16509 	uint32_t pmreg;
   16510 	uint16_t nvmword, tmp_nvmword;
   16511 	uint16_t phyval;
   16512 	bool wa_done = false;
   16513 	int i, rv = 0;
   16514 
   16515 	/* Get Power Management cap offset */
   16516 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16517 	    &pmreg, NULL) == 0)
   16518 		return -1;
   16519 
   16520 	/* Save WUC and MDICNFG registers */
   16521 	wuc = CSR_READ(sc, WMREG_WUC);
   16522 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16523 
   16524 	reg = mdicnfg & ~MDICNFG_DEST;
   16525 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16526 
   16527 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   16528 		/*
   16529 		 * The default value of the Initialization Control Word 1
   16530 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   16531 		 */
   16532 		nvmword = INVM_DEFAULT_AL;
   16533 	}
   16534 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16535 
   16536 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16537 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16538 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16539 
   16540 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16541 			rv = 0;
   16542 			break; /* OK */
   16543 		} else
   16544 			rv = -1;
   16545 
   16546 		wa_done = true;
   16547 		/* Directly reset the internal PHY */
   16548 		reg = CSR_READ(sc, WMREG_CTRL);
   16549 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16550 
   16551 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16552 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16553 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16554 
   16555 		CSR_WRITE(sc, WMREG_WUC, 0);
   16556 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16557 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16558 
   16559 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16560 		    pmreg + PCI_PMCSR);
   16561 		pcireg |= PCI_PMCSR_STATE_D3;
   16562 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16563 		    pmreg + PCI_PMCSR, pcireg);
   16564 		delay(1000);
   16565 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16566 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16567 		    pmreg + PCI_PMCSR, pcireg);
   16568 
   16569 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16570 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16571 
   16572 		/* Restore WUC register */
   16573 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16574 	}
   16575 
   16576 	/* Restore MDICNFG setting */
   16577 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16578 	if (wa_done)
   16579 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16580 	return rv;
   16581 }
   16582 
   16583 static void
   16584 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16585 {
   16586 	uint32_t reg;
   16587 
   16588 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16589 		device_xname(sc->sc_dev), __func__));
   16590 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16591 	    || (sc->sc_type == WM_T_PCH_CNP));
   16592 
   16593 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16594 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16595 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16596 
   16597 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16598 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16599 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16600 }
   16601