Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.688
      1 /*	$NetBSD: if_wm.c,v 1.688 2020/09/16 15:04:01 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.688 2020/09/16 15:04:01 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/workqueue.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <net/rss_config.h>
    121 
    122 #include <netinet/in.h>			/* XXX for struct ip */
    123 #include <netinet/in_systm.h>		/* XXX for struct ip */
    124 #include <netinet/ip.h>			/* XXX for struct ip */
    125 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    126 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    127 
    128 #include <sys/bus.h>
    129 #include <sys/intr.h>
    130 #include <machine/endian.h>
    131 
    132 #include <dev/mii/mii.h>
    133 #include <dev/mii/mdio.h>
    134 #include <dev/mii/miivar.h>
    135 #include <dev/mii/miidevs.h>
    136 #include <dev/mii/mii_bitbang.h>
    137 #include <dev/mii/ikphyreg.h>
    138 #include <dev/mii/igphyreg.h>
    139 #include <dev/mii/igphyvar.h>
    140 #include <dev/mii/inbmphyreg.h>
    141 #include <dev/mii/ihphyreg.h>
    142 #include <dev/mii/makphyreg.h>
    143 
    144 #include <dev/pci/pcireg.h>
    145 #include <dev/pci/pcivar.h>
    146 #include <dev/pci/pcidevs.h>
    147 
    148 #include <dev/pci/if_wmreg.h>
    149 #include <dev/pci/if_wmvar.h>
    150 
    151 #ifdef WM_DEBUG
    152 #define	WM_DEBUG_LINK		__BIT(0)
    153 #define	WM_DEBUG_TX		__BIT(1)
    154 #define	WM_DEBUG_RX		__BIT(2)
    155 #define	WM_DEBUG_GMII		__BIT(3)
    156 #define	WM_DEBUG_MANAGE		__BIT(4)
    157 #define	WM_DEBUG_NVM		__BIT(5)
    158 #define	WM_DEBUG_INIT		__BIT(6)
    159 #define	WM_DEBUG_LOCK		__BIT(7)
    160 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    161     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    162 #define	DPRINTF(x, y)	do { if (wm_debug & (x)) printf y; } while (0)
    163 #else
    164 #define	DPRINTF(x, y)	__nothing
    165 #endif /* WM_DEBUG */
    166 
    167 #ifdef NET_MPSAFE
    168 #define WM_MPSAFE	1
    169 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
    170 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
    171 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    172 #else
    173 #define WM_CALLOUT_FLAGS	0
    174 #define WM_SOFTINT_FLAGS	0
    175 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    176 #endif
    177 
    178 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    179 
    180 /*
    181  * This device driver's max interrupt numbers.
    182  */
    183 #define WM_MAX_NQUEUEINTR	16
    184 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    185 
    186 #ifndef WM_DISABLE_MSI
    187 #define	WM_DISABLE_MSI 0
    188 #endif
    189 #ifndef WM_DISABLE_MSIX
    190 #define	WM_DISABLE_MSIX 0
    191 #endif
    192 
    193 int wm_disable_msi = WM_DISABLE_MSI;
    194 int wm_disable_msix = WM_DISABLE_MSIX;
    195 
    196 #ifndef WM_WATCHDOG_TIMEOUT
    197 #define WM_WATCHDOG_TIMEOUT 5
    198 #endif
    199 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    200 
    201 /*
    202  * Transmit descriptor list size.  Due to errata, we can only have
    203  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    204  * on >= 82544. We tell the upper layers that they can queue a lot
    205  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    206  * of them at a time.
    207  *
    208  * We allow up to 64 DMA segments per packet.  Pathological packet
    209  * chains containing many small mbufs have been observed in zero-copy
    210  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    211  * m_defrag() is called to reduce it.
    212  */
    213 #define	WM_NTXSEGS		64
    214 #define	WM_IFQUEUELEN		256
    215 #define	WM_TXQUEUELEN_MAX	64
    216 #define	WM_TXQUEUELEN_MAX_82547	16
    217 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    218 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    219 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    220 #define	WM_NTXDESC_82542	256
    221 #define	WM_NTXDESC_82544	4096
    222 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    223 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    224 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    225 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    226 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    227 
    228 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    229 
    230 #define	WM_TXINTERQSIZE		256
    231 
    232 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    233 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    234 #endif
    235 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    236 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    237 #endif
    238 
    239 /*
    240  * Receive descriptor list size.  We have one Rx buffer for normal
    241  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    242  * packet.  We allocate 256 receive descriptors, each with a 2k
    243  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    244  */
    245 #define	WM_NRXDESC		256U
    246 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    247 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    248 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    249 
    250 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    251 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    252 #endif
    253 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    254 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    255 #endif
    256 
    257 typedef union txdescs {
    258 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    259 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    260 } txdescs_t;
    261 
    262 typedef union rxdescs {
    263 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    264 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    265 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    266 } rxdescs_t;
    267 
    268 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    269 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    270 
    271 /*
    272  * Software state for transmit jobs.
    273  */
    274 struct wm_txsoft {
    275 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    276 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    277 	int txs_firstdesc;		/* first descriptor in packet */
    278 	int txs_lastdesc;		/* last descriptor in packet */
    279 	int txs_ndesc;			/* # of descriptors used */
    280 };
    281 
    282 /*
    283  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    284  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    285  * them together.
    286  */
    287 struct wm_rxsoft {
    288 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    289 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    290 };
    291 
    292 #define WM_LINKUP_TIMEOUT	50
    293 
    294 static uint16_t swfwphysem[] = {
    295 	SWFW_PHY0_SM,
    296 	SWFW_PHY1_SM,
    297 	SWFW_PHY2_SM,
    298 	SWFW_PHY3_SM
    299 };
    300 
    301 static const uint32_t wm_82580_rxpbs_table[] = {
    302 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    303 };
    304 
    305 struct wm_softc;
    306 
    307 #ifdef WM_EVENT_COUNTERS
    308 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    309 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    310 	struct evcnt qname##_ev_##evname;
    311 
    312 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    313 	do {								\
    314 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    315 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    316 		    "%s%02d%s", #qname, (qnum), #evname);		\
    317 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    318 		    (evtype), NULL, (xname),				\
    319 		    (q)->qname##_##evname##_evcnt_name);		\
    320 	} while (0)
    321 
    322 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    323 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    324 
    325 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    326 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    327 
    328 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    329 	evcnt_detach(&(q)->qname##_ev_##evname);
    330 #endif /* WM_EVENT_COUNTERS */
    331 
    332 struct wm_txqueue {
    333 	kmutex_t *txq_lock;		/* lock for tx operations */
    334 
    335 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    336 
    337 	/* Software state for the transmit descriptors. */
    338 	int txq_num;			/* must be a power of two */
    339 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    340 
    341 	/* TX control data structures. */
    342 	int txq_ndesc;			/* must be a power of two */
    343 	size_t txq_descsize;		/* a tx descriptor size */
    344 	txdescs_t *txq_descs_u;
    345 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    346 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    347 	int txq_desc_rseg;		/* real number of control segment */
    348 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    349 #define	txq_descs	txq_descs_u->sctxu_txdescs
    350 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    351 
    352 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    353 
    354 	int txq_free;			/* number of free Tx descriptors */
    355 	int txq_next;			/* next ready Tx descriptor */
    356 
    357 	int txq_sfree;			/* number of free Tx jobs */
    358 	int txq_snext;			/* next free Tx job */
    359 	int txq_sdirty;			/* dirty Tx jobs */
    360 
    361 	/* These 4 variables are used only on the 82547. */
    362 	int txq_fifo_size;		/* Tx FIFO size */
    363 	int txq_fifo_head;		/* current head of FIFO */
    364 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    365 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    366 
    367 	/*
    368 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    369 	 * CPUs. This queue intermediate them without block.
    370 	 */
    371 	pcq_t *txq_interq;
    372 
    373 	/*
    374 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    375 	 * to manage Tx H/W queue's busy flag.
    376 	 */
    377 	int txq_flags;			/* flags for H/W queue, see below */
    378 #define	WM_TXQ_NO_SPACE	0x1
    379 
    380 	bool txq_stopping;
    381 
    382 	bool txq_sending;
    383 	time_t txq_lastsent;
    384 
    385 	/* Checksum flags used for previous packet */
    386 	uint32_t 	txq_last_hw_cmd;
    387 	uint8_t 	txq_last_hw_fields;
    388 	uint16_t	txq_last_hw_ipcs;
    389 	uint16_t	txq_last_hw_tucs;
    390 
    391 	uint32_t txq_packets;		/* for AIM */
    392 	uint32_t txq_bytes;		/* for AIM */
    393 #ifdef WM_EVENT_COUNTERS
    394 	/* TX event counters */
    395 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    396 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    397 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    398 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    399 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    400 					    /* XXX not used? */
    401 
    402 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    403 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    404 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    405 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    406 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    407 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    408 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    409 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    410 					    /* other than toomanyseg */
    411 
    412 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    413 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    414 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    415 	WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
    416 
    417 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    418 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    419 #endif /* WM_EVENT_COUNTERS */
    420 };
    421 
    422 struct wm_rxqueue {
    423 	kmutex_t *rxq_lock;		/* lock for rx operations */
    424 
    425 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    426 
    427 	/* Software state for the receive descriptors. */
    428 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    429 
    430 	/* RX control data structures. */
    431 	int rxq_ndesc;			/* must be a power of two */
    432 	size_t rxq_descsize;		/* a rx descriptor size */
    433 	rxdescs_t *rxq_descs_u;
    434 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    435 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    436 	int rxq_desc_rseg;		/* real number of control segment */
    437 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    438 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    439 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    440 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    441 
    442 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    443 
    444 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    445 	int rxq_discard;
    446 	int rxq_len;
    447 	struct mbuf *rxq_head;
    448 	struct mbuf *rxq_tail;
    449 	struct mbuf **rxq_tailp;
    450 
    451 	bool rxq_stopping;
    452 
    453 	uint32_t rxq_packets;		/* for AIM */
    454 	uint32_t rxq_bytes;		/* for AIM */
    455 #ifdef WM_EVENT_COUNTERS
    456 	/* RX event counters */
    457 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    458 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    459 
    460 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    461 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    462 #endif
    463 };
    464 
    465 struct wm_queue {
    466 	int wmq_id;			/* index of TX/RX queues */
    467 	int wmq_intr_idx;		/* index of MSI-X tables */
    468 
    469 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    470 	bool wmq_set_itr;
    471 
    472 	struct wm_txqueue wmq_txq;
    473 	struct wm_rxqueue wmq_rxq;
    474 
    475 	bool wmq_txrx_use_workqueue;
    476 	struct work wmq_cookie;
    477 	void *wmq_si;
    478 };
    479 
    480 struct wm_phyop {
    481 	int (*acquire)(struct wm_softc *);
    482 	void (*release)(struct wm_softc *);
    483 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    484 	int (*writereg_locked)(device_t, int, int, uint16_t);
    485 	int reset_delay_us;
    486 	bool no_errprint;
    487 };
    488 
    489 struct wm_nvmop {
    490 	int (*acquire)(struct wm_softc *);
    491 	void (*release)(struct wm_softc *);
    492 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    493 };
    494 
    495 /*
    496  * Software state per device.
    497  */
    498 struct wm_softc {
    499 	device_t sc_dev;		/* generic device information */
    500 	bus_space_tag_t sc_st;		/* bus space tag */
    501 	bus_space_handle_t sc_sh;	/* bus space handle */
    502 	bus_size_t sc_ss;		/* bus space size */
    503 	bus_space_tag_t sc_iot;		/* I/O space tag */
    504 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    505 	bus_size_t sc_ios;		/* I/O space size */
    506 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    507 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    508 	bus_size_t sc_flashs;		/* flash registers space size */
    509 	off_t sc_flashreg_offset;	/*
    510 					 * offset to flash registers from
    511 					 * start of BAR
    512 					 */
    513 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    514 
    515 	struct ethercom sc_ethercom;	/* ethernet common data */
    516 	struct mii_data sc_mii;		/* MII/media information */
    517 
    518 	pci_chipset_tag_t sc_pc;
    519 	pcitag_t sc_pcitag;
    520 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    521 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    522 
    523 	uint16_t sc_pcidevid;		/* PCI device ID */
    524 	wm_chip_type sc_type;		/* MAC type */
    525 	int sc_rev;			/* MAC revision */
    526 	wm_phy_type sc_phytype;		/* PHY type */
    527 	uint8_t sc_sfptype;		/* SFP type */
    528 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    529 #define	WM_MEDIATYPE_UNKNOWN		0x00
    530 #define	WM_MEDIATYPE_FIBER		0x01
    531 #define	WM_MEDIATYPE_COPPER		0x02
    532 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    533 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    534 	int sc_flags;			/* flags; see below */
    535 	u_short sc_if_flags;		/* last if_flags */
    536 	int sc_ec_capenable;		/* last ec_capenable */
    537 	int sc_flowflags;		/* 802.3x flow control flags */
    538 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    539 	int sc_align_tweak;
    540 
    541 	void *sc_ihs[WM_MAX_NINTR];	/*
    542 					 * interrupt cookie.
    543 					 * - legacy and msi use sc_ihs[0] only
    544 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    545 					 */
    546 	pci_intr_handle_t *sc_intrs;	/*
    547 					 * legacy and msi use sc_intrs[0] only
    548 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    549 					 */
    550 	int sc_nintrs;			/* number of interrupts */
    551 
    552 	int sc_link_intr_idx;		/* index of MSI-X tables */
    553 
    554 	callout_t sc_tick_ch;		/* tick callout */
    555 	bool sc_core_stopping;
    556 
    557 	int sc_nvm_ver_major;
    558 	int sc_nvm_ver_minor;
    559 	int sc_nvm_ver_build;
    560 	int sc_nvm_addrbits;		/* NVM address bits */
    561 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    562 	int sc_ich8_flash_base;
    563 	int sc_ich8_flash_bank_size;
    564 	int sc_nvm_k1_enabled;
    565 
    566 	int sc_nqueues;
    567 	struct wm_queue *sc_queue;
    568 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    569 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    570 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    571 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    572 	struct workqueue *sc_queue_wq;
    573 	bool sc_txrx_use_workqueue;
    574 
    575 	int sc_affinity_offset;
    576 
    577 #ifdef WM_EVENT_COUNTERS
    578 	/* Event counters. */
    579 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    580 
    581 	/* WM_T_82542_2_1 only */
    582 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    583 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    584 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    585 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    586 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    587 #endif /* WM_EVENT_COUNTERS */
    588 
    589 	struct sysctllog *sc_sysctllog;
    590 
    591 	/* This variable are used only on the 82547. */
    592 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    593 
    594 	uint32_t sc_ctrl;		/* prototype CTRL register */
    595 #if 0
    596 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    597 #endif
    598 	uint32_t sc_icr;		/* prototype interrupt bits */
    599 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    600 	uint32_t sc_tctl;		/* prototype TCTL register */
    601 	uint32_t sc_rctl;		/* prototype RCTL register */
    602 	uint32_t sc_txcw;		/* prototype TXCW register */
    603 	uint32_t sc_tipg;		/* prototype TIPG register */
    604 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    605 	uint32_t sc_pba;		/* prototype PBA register */
    606 
    607 	int sc_tbi_linkup;		/* TBI link status */
    608 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    609 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    610 
    611 	int sc_mchash_type;		/* multicast filter offset */
    612 
    613 	krndsource_t rnd_source;	/* random source */
    614 
    615 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    616 
    617 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    618 	kmutex_t *sc_ich_phymtx;	/*
    619 					 * 82574/82583/ICH/PCH specific PHY
    620 					 * mutex. For 82574/82583, the mutex
    621 					 * is used for both PHY and NVM.
    622 					 */
    623 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    624 
    625 	struct wm_phyop phy;
    626 	struct wm_nvmop nvm;
    627 };
    628 
    629 #define WM_CORE_LOCK(_sc)						\
    630 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    631 #define WM_CORE_UNLOCK(_sc)						\
    632 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    633 #define WM_CORE_LOCKED(_sc)						\
    634 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    635 
    636 #define	WM_RXCHAIN_RESET(rxq)						\
    637 do {									\
    638 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    639 	*(rxq)->rxq_tailp = NULL;					\
    640 	(rxq)->rxq_len = 0;						\
    641 } while (/*CONSTCOND*/0)
    642 
    643 #define	WM_RXCHAIN_LINK(rxq, m)						\
    644 do {									\
    645 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    646 	(rxq)->rxq_tailp = &(m)->m_next;				\
    647 } while (/*CONSTCOND*/0)
    648 
    649 #ifdef WM_EVENT_COUNTERS
    650 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    651 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    652 
    653 #define WM_Q_EVCNT_INCR(qname, evname)			\
    654 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    655 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    656 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    657 #else /* !WM_EVENT_COUNTERS */
    658 #define	WM_EVCNT_INCR(ev)	/* nothing */
    659 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    660 
    661 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    662 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    663 #endif /* !WM_EVENT_COUNTERS */
    664 
    665 #define	CSR_READ(sc, reg)						\
    666 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    667 #define	CSR_WRITE(sc, reg, val)						\
    668 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    669 #define	CSR_WRITE_FLUSH(sc)						\
    670 	(void)CSR_READ((sc), WMREG_STATUS)
    671 
    672 #define ICH8_FLASH_READ32(sc, reg)					\
    673 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    674 	    (reg) + sc->sc_flashreg_offset)
    675 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    676 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    677 	    (reg) + sc->sc_flashreg_offset, (data))
    678 
    679 #define ICH8_FLASH_READ16(sc, reg)					\
    680 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    681 	    (reg) + sc->sc_flashreg_offset)
    682 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    683 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    684 	    (reg) + sc->sc_flashreg_offset, (data))
    685 
    686 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    687 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    688 
    689 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    690 #define	WM_CDTXADDR_HI(txq, x)						\
    691 	(sizeof(bus_addr_t) == 8 ?					\
    692 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    693 
    694 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    695 #define	WM_CDRXADDR_HI(rxq, x)						\
    696 	(sizeof(bus_addr_t) == 8 ?					\
    697 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    698 
    699 /*
    700  * Register read/write functions.
    701  * Other than CSR_{READ|WRITE}().
    702  */
    703 #if 0
    704 static inline uint32_t wm_io_read(struct wm_softc *, int);
    705 #endif
    706 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    707 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    708     uint32_t, uint32_t);
    709 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    710 
    711 /*
    712  * Descriptor sync/init functions.
    713  */
    714 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    715 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    716 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    717 
    718 /*
    719  * Device driver interface functions and commonly used functions.
    720  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    721  */
    722 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    723 static int	wm_match(device_t, cfdata_t, void *);
    724 static void	wm_attach(device_t, device_t, void *);
    725 static int	wm_detach(device_t, int);
    726 static bool	wm_suspend(device_t, const pmf_qual_t *);
    727 static bool	wm_resume(device_t, const pmf_qual_t *);
    728 static void	wm_watchdog(struct ifnet *);
    729 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    730     uint16_t *);
    731 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    732     uint16_t *);
    733 static void	wm_tick(void *);
    734 static int	wm_ifflags_cb(struct ethercom *);
    735 static int	wm_ioctl(struct ifnet *, u_long, void *);
    736 /* MAC address related */
    737 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    738 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    739 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    740 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    741 static int	wm_rar_count(struct wm_softc *);
    742 static void	wm_set_filter(struct wm_softc *);
    743 /* Reset and init related */
    744 static void	wm_set_vlan(struct wm_softc *);
    745 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    746 static void	wm_get_auto_rd_done(struct wm_softc *);
    747 static void	wm_lan_init_done(struct wm_softc *);
    748 static void	wm_get_cfg_done(struct wm_softc *);
    749 static int	wm_phy_post_reset(struct wm_softc *);
    750 static int	wm_write_smbus_addr(struct wm_softc *);
    751 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    752 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    753 static void	wm_initialize_hardware_bits(struct wm_softc *);
    754 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    755 static int	wm_reset_phy(struct wm_softc *);
    756 static void	wm_flush_desc_rings(struct wm_softc *);
    757 static void	wm_reset(struct wm_softc *);
    758 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    759 static void	wm_rxdrain(struct wm_rxqueue *);
    760 static void	wm_init_rss(struct wm_softc *);
    761 static void	wm_adjust_qnum(struct wm_softc *, int);
    762 static inline bool	wm_is_using_msix(struct wm_softc *);
    763 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    764 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    765 static int	wm_setup_legacy(struct wm_softc *);
    766 static int	wm_setup_msix(struct wm_softc *);
    767 static int	wm_init(struct ifnet *);
    768 static int	wm_init_locked(struct ifnet *);
    769 static void	wm_init_sysctls(struct wm_softc *);
    770 static void	wm_unset_stopping_flags(struct wm_softc *);
    771 static void	wm_set_stopping_flags(struct wm_softc *);
    772 static void	wm_stop(struct ifnet *, int);
    773 static void	wm_stop_locked(struct ifnet *, bool, bool);
    774 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    775 static void	wm_82547_txfifo_stall(void *);
    776 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    777 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    778 /* DMA related */
    779 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    780 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    781 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    782 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    783     struct wm_txqueue *);
    784 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    785 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    786 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    787     struct wm_rxqueue *);
    788 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    789 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    790 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    791 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    792 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    793 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    794 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    795     struct wm_txqueue *);
    796 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    797     struct wm_rxqueue *);
    798 static int	wm_alloc_txrx_queues(struct wm_softc *);
    799 static void	wm_free_txrx_queues(struct wm_softc *);
    800 static int	wm_init_txrx_queues(struct wm_softc *);
    801 /* Start */
    802 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    803     struct wm_txsoft *, uint32_t *, uint8_t *);
    804 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    805 static void	wm_start(struct ifnet *);
    806 static void	wm_start_locked(struct ifnet *);
    807 static int	wm_transmit(struct ifnet *, struct mbuf *);
    808 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    809 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    810 		    bool);
    811 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    812     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    813 static void	wm_nq_start(struct ifnet *);
    814 static void	wm_nq_start_locked(struct ifnet *);
    815 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    816 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    817 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    818 		    bool);
    819 static void	wm_deferred_start_locked(struct wm_txqueue *);
    820 static void	wm_handle_queue(void *);
    821 static void	wm_handle_queue_work(struct work *, void *);
    822 /* Interrupt */
    823 static bool	wm_txeof(struct wm_txqueue *, u_int);
    824 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    825 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    826 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    827 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    828 static void	wm_linkintr(struct wm_softc *, uint32_t);
    829 static int	wm_intr_legacy(void *);
    830 static inline void	wm_txrxintr_disable(struct wm_queue *);
    831 static inline void	wm_txrxintr_enable(struct wm_queue *);
    832 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    833 static int	wm_txrxintr_msix(void *);
    834 static int	wm_linkintr_msix(void *);
    835 
    836 /*
    837  * Media related.
    838  * GMII, SGMII, TBI, SERDES and SFP.
    839  */
    840 /* Common */
    841 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    842 /* GMII related */
    843 static void	wm_gmii_reset(struct wm_softc *);
    844 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    845 static int	wm_get_phy_id_82575(struct wm_softc *);
    846 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    847 static int	wm_gmii_mediachange(struct ifnet *);
    848 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    849 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    850 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    851 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    852 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    853 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    854 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    855 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    856 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    857 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    858 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    859 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    860 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    861 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    862 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    863 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    864 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    865 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    866 	bool);
    867 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    868 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    869 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    870 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    871 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    872 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    873 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    874 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    875 static void	wm_gmii_statchg(struct ifnet *);
    876 /*
    877  * kumeran related (80003, ICH* and PCH*).
    878  * These functions are not for accessing MII registers but for accessing
    879  * kumeran specific registers.
    880  */
    881 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    882 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    883 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    884 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    885 /* EMI register related */
    886 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    887 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    888 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    889 /* SGMII */
    890 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    891 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    892 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    893 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    894 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    895 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    896 /* TBI related */
    897 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    898 static void	wm_tbi_mediainit(struct wm_softc *);
    899 static int	wm_tbi_mediachange(struct ifnet *);
    900 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    901 static int	wm_check_for_link(struct wm_softc *);
    902 static void	wm_tbi_tick(struct wm_softc *);
    903 /* SERDES related */
    904 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    905 static int	wm_serdes_mediachange(struct ifnet *);
    906 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    907 static void	wm_serdes_tick(struct wm_softc *);
    908 /* SFP related */
    909 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    910 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    911 
    912 /*
    913  * NVM related.
    914  * Microwire, SPI (w/wo EERD) and Flash.
    915  */
    916 /* Misc functions */
    917 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    918 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    919 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    920 /* Microwire */
    921 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    922 /* SPI */
    923 static int	wm_nvm_ready_spi(struct wm_softc *);
    924 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    925 /* Using with EERD */
    926 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    927 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    928 /* Flash */
    929 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    930     unsigned int *);
    931 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    932 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    933 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    934     uint32_t *);
    935 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    936 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    937 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    938 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    939 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    940 /* iNVM */
    941 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    942 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    943 /* Lock, detecting NVM type, validate checksum and read */
    944 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    945 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    946 static int	wm_nvm_validate_checksum(struct wm_softc *);
    947 static void	wm_nvm_version_invm(struct wm_softc *);
    948 static void	wm_nvm_version(struct wm_softc *);
    949 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    950 
    951 /*
    952  * Hardware semaphores.
    953  * Very complexed...
    954  */
    955 static int	wm_get_null(struct wm_softc *);
    956 static void	wm_put_null(struct wm_softc *);
    957 static int	wm_get_eecd(struct wm_softc *);
    958 static void	wm_put_eecd(struct wm_softc *);
    959 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    960 static void	wm_put_swsm_semaphore(struct wm_softc *);
    961 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    962 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    963 static int	wm_get_nvm_80003(struct wm_softc *);
    964 static void	wm_put_nvm_80003(struct wm_softc *);
    965 static int	wm_get_nvm_82571(struct wm_softc *);
    966 static void	wm_put_nvm_82571(struct wm_softc *);
    967 static int	wm_get_phy_82575(struct wm_softc *);
    968 static void	wm_put_phy_82575(struct wm_softc *);
    969 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    970 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    971 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    972 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    973 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    974 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    975 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    976 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    977 
    978 /*
    979  * Management mode and power management related subroutines.
    980  * BMC, AMT, suspend/resume and EEE.
    981  */
    982 #if 0
    983 static int	wm_check_mng_mode(struct wm_softc *);
    984 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    985 static int	wm_check_mng_mode_82574(struct wm_softc *);
    986 static int	wm_check_mng_mode_generic(struct wm_softc *);
    987 #endif
    988 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    989 static bool	wm_phy_resetisblocked(struct wm_softc *);
    990 static void	wm_get_hw_control(struct wm_softc *);
    991 static void	wm_release_hw_control(struct wm_softc *);
    992 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    993 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    994 static void	wm_init_manageability(struct wm_softc *);
    995 static void	wm_release_manageability(struct wm_softc *);
    996 static void	wm_get_wakeup(struct wm_softc *);
    997 static int	wm_ulp_disable(struct wm_softc *);
    998 static int	wm_enable_phy_wakeup(struct wm_softc *);
    999 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1000 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1001 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1002 static void	wm_enable_wakeup(struct wm_softc *);
   1003 static void	wm_disable_aspm(struct wm_softc *);
   1004 /* LPLU (Low Power Link Up) */
   1005 static void	wm_lplu_d0_disable(struct wm_softc *);
   1006 /* EEE */
   1007 static int	wm_set_eee_i350(struct wm_softc *);
   1008 static int	wm_set_eee_pchlan(struct wm_softc *);
   1009 static int	wm_set_eee(struct wm_softc *);
   1010 
   1011 /*
   1012  * Workarounds (mainly PHY related).
   1013  * Basically, PHY's workarounds are in the PHY drivers.
   1014  */
   1015 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1016 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1017 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1018 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1019 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1020 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1021 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1022 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1023 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1024 static int	wm_k1_workaround_lv(struct wm_softc *);
   1025 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1026 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1027 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1028 static void	wm_reset_init_script_82575(struct wm_softc *);
   1029 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1030 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1031 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1032 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1033 static int	wm_pll_workaround_i210(struct wm_softc *);
   1034 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1035 
   1036 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1037     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1038 
   1039 /*
   1040  * Devices supported by this driver.
   1041  */
   1042 static const struct wm_product {
   1043 	pci_vendor_id_t		wmp_vendor;
   1044 	pci_product_id_t	wmp_product;
   1045 	const char		*wmp_name;
   1046 	wm_chip_type		wmp_type;
   1047 	uint32_t		wmp_flags;
   1048 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1049 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1050 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1051 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1052 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1053 } wm_products[] = {
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1055 	  "Intel i82542 1000BASE-X Ethernet",
   1056 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1057 
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1059 	  "Intel i82543GC 1000BASE-X Ethernet",
   1060 	  WM_T_82543,		WMP_F_FIBER },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1063 	  "Intel i82543GC 1000BASE-T Ethernet",
   1064 	  WM_T_82543,		WMP_F_COPPER },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1067 	  "Intel i82544EI 1000BASE-T Ethernet",
   1068 	  WM_T_82544,		WMP_F_COPPER },
   1069 
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1071 	  "Intel i82544EI 1000BASE-X Ethernet",
   1072 	  WM_T_82544,		WMP_F_FIBER },
   1073 
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1075 	  "Intel i82544GC 1000BASE-T Ethernet",
   1076 	  WM_T_82544,		WMP_F_COPPER },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1079 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1080 	  WM_T_82544,		WMP_F_COPPER },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1083 	  "Intel i82540EM 1000BASE-T Ethernet",
   1084 	  WM_T_82540,		WMP_F_COPPER },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1087 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1088 	  WM_T_82540,		WMP_F_COPPER },
   1089 
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1091 	  "Intel i82540EP 1000BASE-T Ethernet",
   1092 	  WM_T_82540,		WMP_F_COPPER },
   1093 
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1095 	  "Intel i82540EP 1000BASE-T Ethernet",
   1096 	  WM_T_82540,		WMP_F_COPPER },
   1097 
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1099 	  "Intel i82540EP 1000BASE-T Ethernet",
   1100 	  WM_T_82540,		WMP_F_COPPER },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1103 	  "Intel i82545EM 1000BASE-T Ethernet",
   1104 	  WM_T_82545,		WMP_F_COPPER },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1107 	  "Intel i82545GM 1000BASE-T Ethernet",
   1108 	  WM_T_82545_3,		WMP_F_COPPER },
   1109 
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1111 	  "Intel i82545GM 1000BASE-X Ethernet",
   1112 	  WM_T_82545_3,		WMP_F_FIBER },
   1113 
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1115 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1116 	  WM_T_82545_3,		WMP_F_SERDES },
   1117 
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1119 	  "Intel i82546EB 1000BASE-T Ethernet",
   1120 	  WM_T_82546,		WMP_F_COPPER },
   1121 
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1123 	  "Intel i82546EB 1000BASE-T Ethernet",
   1124 	  WM_T_82546,		WMP_F_COPPER },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1127 	  "Intel i82545EM 1000BASE-X Ethernet",
   1128 	  WM_T_82545,		WMP_F_FIBER },
   1129 
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1131 	  "Intel i82546EB 1000BASE-X Ethernet",
   1132 	  WM_T_82546,		WMP_F_FIBER },
   1133 
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1135 	  "Intel i82546GB 1000BASE-T Ethernet",
   1136 	  WM_T_82546_3,		WMP_F_COPPER },
   1137 
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1139 	  "Intel i82546GB 1000BASE-X Ethernet",
   1140 	  WM_T_82546_3,		WMP_F_FIBER },
   1141 
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1143 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1144 	  WM_T_82546_3,		WMP_F_SERDES },
   1145 
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1147 	  "i82546GB quad-port Gigabit Ethernet",
   1148 	  WM_T_82546_3,		WMP_F_COPPER },
   1149 
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1151 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1152 	  WM_T_82546_3,		WMP_F_COPPER },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1155 	  "Intel PRO/1000MT (82546GB)",
   1156 	  WM_T_82546_3,		WMP_F_COPPER },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1159 	  "Intel i82541EI 1000BASE-T Ethernet",
   1160 	  WM_T_82541,		WMP_F_COPPER },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1163 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1164 	  WM_T_82541,		WMP_F_COPPER },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1167 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1168 	  WM_T_82541,		WMP_F_COPPER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1171 	  "Intel i82541ER 1000BASE-T Ethernet",
   1172 	  WM_T_82541_2,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1175 	  "Intel i82541GI 1000BASE-T Ethernet",
   1176 	  WM_T_82541_2,		WMP_F_COPPER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1179 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1180 	  WM_T_82541_2,		WMP_F_COPPER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1183 	  "Intel i82541PI 1000BASE-T Ethernet",
   1184 	  WM_T_82541_2,		WMP_F_COPPER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1187 	  "Intel i82547EI 1000BASE-T Ethernet",
   1188 	  WM_T_82547,		WMP_F_COPPER },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1191 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1192 	  WM_T_82547,		WMP_F_COPPER },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1195 	  "Intel i82547GI 1000BASE-T Ethernet",
   1196 	  WM_T_82547_2,		WMP_F_COPPER },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1199 	  "Intel PRO/1000 PT (82571EB)",
   1200 	  WM_T_82571,		WMP_F_COPPER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1203 	  "Intel PRO/1000 PF (82571EB)",
   1204 	  WM_T_82571,		WMP_F_FIBER },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1207 	  "Intel PRO/1000 PB (82571EB)",
   1208 	  WM_T_82571,		WMP_F_SERDES },
   1209 
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1211 	  "Intel PRO/1000 QT (82571EB)",
   1212 	  WM_T_82571,		WMP_F_COPPER },
   1213 
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1215 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1216 	  WM_T_82571,		WMP_F_COPPER },
   1217 
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1219 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1220 	  WM_T_82571,		WMP_F_COPPER },
   1221 
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1223 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1224 	  WM_T_82571,		WMP_F_SERDES },
   1225 
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1227 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1228 	  WM_T_82571,		WMP_F_SERDES },
   1229 
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1231 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1232 	  WM_T_82571,		WMP_F_FIBER },
   1233 
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1235 	  "Intel i82572EI 1000baseT Ethernet",
   1236 	  WM_T_82572,		WMP_F_COPPER },
   1237 
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1239 	  "Intel i82572EI 1000baseX Ethernet",
   1240 	  WM_T_82572,		WMP_F_FIBER },
   1241 
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1243 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1244 	  WM_T_82572,		WMP_F_SERDES },
   1245 
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1247 	  "Intel i82572EI 1000baseT Ethernet",
   1248 	  WM_T_82572,		WMP_F_COPPER },
   1249 
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1251 	  "Intel i82573E",
   1252 	  WM_T_82573,		WMP_F_COPPER },
   1253 
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1255 	  "Intel i82573E IAMT",
   1256 	  WM_T_82573,		WMP_F_COPPER },
   1257 
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1259 	  "Intel i82573L Gigabit Ethernet",
   1260 	  WM_T_82573,		WMP_F_COPPER },
   1261 
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1263 	  "Intel i82574L",
   1264 	  WM_T_82574,		WMP_F_COPPER },
   1265 
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1267 	  "Intel i82574L",
   1268 	  WM_T_82574,		WMP_F_COPPER },
   1269 
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1271 	  "Intel i82583V",
   1272 	  WM_T_82583,		WMP_F_COPPER },
   1273 
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1275 	  "i80003 dual 1000baseT Ethernet",
   1276 	  WM_T_80003,		WMP_F_COPPER },
   1277 
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1279 	  "i80003 dual 1000baseX Ethernet",
   1280 	  WM_T_80003,		WMP_F_COPPER },
   1281 
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1283 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1284 	  WM_T_80003,		WMP_F_SERDES },
   1285 
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1287 	  "Intel i80003 1000baseT Ethernet",
   1288 	  WM_T_80003,		WMP_F_COPPER },
   1289 
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1291 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1292 	  WM_T_80003,		WMP_F_SERDES },
   1293 
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1295 	  "Intel i82801H (M_AMT) LAN Controller",
   1296 	  WM_T_ICH8,		WMP_F_COPPER },
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1298 	  "Intel i82801H (AMT) LAN Controller",
   1299 	  WM_T_ICH8,		WMP_F_COPPER },
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1301 	  "Intel i82801H LAN Controller",
   1302 	  WM_T_ICH8,		WMP_F_COPPER },
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1304 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1305 	  WM_T_ICH8,		WMP_F_COPPER },
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1307 	  "Intel i82801H (M) LAN Controller",
   1308 	  WM_T_ICH8,		WMP_F_COPPER },
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1310 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1311 	  WM_T_ICH8,		WMP_F_COPPER },
   1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1313 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1314 	  WM_T_ICH8,		WMP_F_COPPER },
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1316 	  "82567V-3 LAN Controller",
   1317 	  WM_T_ICH8,		WMP_F_COPPER },
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1319 	  "82801I (AMT) LAN Controller",
   1320 	  WM_T_ICH9,		WMP_F_COPPER },
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1322 	  "82801I 10/100 LAN Controller",
   1323 	  WM_T_ICH9,		WMP_F_COPPER },
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1325 	  "82801I (G) 10/100 LAN Controller",
   1326 	  WM_T_ICH9,		WMP_F_COPPER },
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1328 	  "82801I (GT) 10/100 LAN Controller",
   1329 	  WM_T_ICH9,		WMP_F_COPPER },
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1331 	  "82801I (C) LAN Controller",
   1332 	  WM_T_ICH9,		WMP_F_COPPER },
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1334 	  "82801I mobile LAN Controller",
   1335 	  WM_T_ICH9,		WMP_F_COPPER },
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1337 	  "82801I mobile (V) LAN Controller",
   1338 	  WM_T_ICH9,		WMP_F_COPPER },
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1340 	  "82801I mobile (AMT) LAN Controller",
   1341 	  WM_T_ICH9,		WMP_F_COPPER },
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1343 	  "82567LM-4 LAN Controller",
   1344 	  WM_T_ICH9,		WMP_F_COPPER },
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1346 	  "82567LM-2 LAN Controller",
   1347 	  WM_T_ICH10,		WMP_F_COPPER },
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1349 	  "82567LF-2 LAN Controller",
   1350 	  WM_T_ICH10,		WMP_F_COPPER },
   1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1352 	  "82567LM-3 LAN Controller",
   1353 	  WM_T_ICH10,		WMP_F_COPPER },
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1355 	  "82567LF-3 LAN Controller",
   1356 	  WM_T_ICH10,		WMP_F_COPPER },
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1358 	  "82567V-2 LAN Controller",
   1359 	  WM_T_ICH10,		WMP_F_COPPER },
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1361 	  "82567V-3? LAN Controller",
   1362 	  WM_T_ICH10,		WMP_F_COPPER },
   1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1364 	  "HANKSVILLE LAN Controller",
   1365 	  WM_T_ICH10,		WMP_F_COPPER },
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1367 	  "PCH LAN (82577LM) Controller",
   1368 	  WM_T_PCH,		WMP_F_COPPER },
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1370 	  "PCH LAN (82577LC) Controller",
   1371 	  WM_T_PCH,		WMP_F_COPPER },
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1373 	  "PCH LAN (82578DM) Controller",
   1374 	  WM_T_PCH,		WMP_F_COPPER },
   1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1376 	  "PCH LAN (82578DC) Controller",
   1377 	  WM_T_PCH,		WMP_F_COPPER },
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1379 	  "PCH2 LAN (82579LM) Controller",
   1380 	  WM_T_PCH2,		WMP_F_COPPER },
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1382 	  "PCH2 LAN (82579V) Controller",
   1383 	  WM_T_PCH2,		WMP_F_COPPER },
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1385 	  "82575EB dual-1000baseT Ethernet",
   1386 	  WM_T_82575,		WMP_F_COPPER },
   1387 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1388 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1389 	  WM_T_82575,		WMP_F_SERDES },
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1391 	  "82575GB quad-1000baseT Ethernet",
   1392 	  WM_T_82575,		WMP_F_COPPER },
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1394 	  "82575GB quad-1000baseT Ethernet (PM)",
   1395 	  WM_T_82575,		WMP_F_COPPER },
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1397 	  "82576 1000BaseT Ethernet",
   1398 	  WM_T_82576,		WMP_F_COPPER },
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1400 	  "82576 1000BaseX Ethernet",
   1401 	  WM_T_82576,		WMP_F_FIBER },
   1402 
   1403 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1404 	  "82576 gigabit Ethernet (SERDES)",
   1405 	  WM_T_82576,		WMP_F_SERDES },
   1406 
   1407 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1408 	  "82576 quad-1000BaseT Ethernet",
   1409 	  WM_T_82576,		WMP_F_COPPER },
   1410 
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1412 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1413 	  WM_T_82576,		WMP_F_COPPER },
   1414 
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1416 	  "82576 gigabit Ethernet",
   1417 	  WM_T_82576,		WMP_F_COPPER },
   1418 
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1420 	  "82576 gigabit Ethernet (SERDES)",
   1421 	  WM_T_82576,		WMP_F_SERDES },
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1423 	  "82576 quad-gigabit Ethernet (SERDES)",
   1424 	  WM_T_82576,		WMP_F_SERDES },
   1425 
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1427 	  "82580 1000BaseT Ethernet",
   1428 	  WM_T_82580,		WMP_F_COPPER },
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1430 	  "82580 1000BaseX Ethernet",
   1431 	  WM_T_82580,		WMP_F_FIBER },
   1432 
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1434 	  "82580 1000BaseT Ethernet (SERDES)",
   1435 	  WM_T_82580,		WMP_F_SERDES },
   1436 
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1438 	  "82580 gigabit Ethernet (SGMII)",
   1439 	  WM_T_82580,		WMP_F_COPPER },
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1441 	  "82580 dual-1000BaseT Ethernet",
   1442 	  WM_T_82580,		WMP_F_COPPER },
   1443 
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1445 	  "82580 quad-1000BaseX Ethernet",
   1446 	  WM_T_82580,		WMP_F_FIBER },
   1447 
   1448 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1449 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1450 	  WM_T_82580,		WMP_F_COPPER },
   1451 
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1453 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1454 	  WM_T_82580,		WMP_F_SERDES },
   1455 
   1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1457 	  "DH89XXCC 1000BASE-KX Ethernet",
   1458 	  WM_T_82580,		WMP_F_SERDES },
   1459 
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1461 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1462 	  WM_T_82580,		WMP_F_SERDES },
   1463 
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1465 	  "I350 Gigabit Network Connection",
   1466 	  WM_T_I350,		WMP_F_COPPER },
   1467 
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1469 	  "I350 Gigabit Fiber Network Connection",
   1470 	  WM_T_I350,		WMP_F_FIBER },
   1471 
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1473 	  "I350 Gigabit Backplane Connection",
   1474 	  WM_T_I350,		WMP_F_SERDES },
   1475 
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1477 	  "I350 Quad Port Gigabit Ethernet",
   1478 	  WM_T_I350,		WMP_F_SERDES },
   1479 
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1481 	  "I350 Gigabit Connection",
   1482 	  WM_T_I350,		WMP_F_COPPER },
   1483 
   1484 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1485 	  "I354 Gigabit Ethernet (KX)",
   1486 	  WM_T_I354,		WMP_F_SERDES },
   1487 
   1488 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1489 	  "I354 Gigabit Ethernet (SGMII)",
   1490 	  WM_T_I354,		WMP_F_COPPER },
   1491 
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1493 	  "I354 Gigabit Ethernet (2.5G)",
   1494 	  WM_T_I354,		WMP_F_COPPER },
   1495 
   1496 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1497 	  "I210-T1 Ethernet Server Adapter",
   1498 	  WM_T_I210,		WMP_F_COPPER },
   1499 
   1500 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1501 	  "I210 Ethernet (Copper OEM)",
   1502 	  WM_T_I210,		WMP_F_COPPER },
   1503 
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1505 	  "I210 Ethernet (Copper IT)",
   1506 	  WM_T_I210,		WMP_F_COPPER },
   1507 
   1508 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1509 	  "I210 Ethernet (Copper, FLASH less)",
   1510 	  WM_T_I210,		WMP_F_COPPER },
   1511 
   1512 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1513 	  "I210 Gigabit Ethernet (Fiber)",
   1514 	  WM_T_I210,		WMP_F_FIBER },
   1515 
   1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1517 	  "I210 Gigabit Ethernet (SERDES)",
   1518 	  WM_T_I210,		WMP_F_SERDES },
   1519 
   1520 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1521 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1522 	  WM_T_I210,		WMP_F_SERDES },
   1523 
   1524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1525 	  "I210 Gigabit Ethernet (SGMII)",
   1526 	  WM_T_I210,		WMP_F_COPPER },
   1527 
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1529 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1530 	  WM_T_I210,		WMP_F_COPPER },
   1531 
   1532 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1533 	  "I211 Ethernet (COPPER)",
   1534 	  WM_T_I211,		WMP_F_COPPER },
   1535 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1536 	  "I217 V Ethernet Connection",
   1537 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1538 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1539 	  "I217 LM Ethernet Connection",
   1540 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1541 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1542 	  "I218 V Ethernet Connection",
   1543 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1544 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1545 	  "I218 V Ethernet Connection",
   1546 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1547 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1548 	  "I218 V Ethernet Connection",
   1549 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1550 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1551 	  "I218 LM Ethernet Connection",
   1552 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1553 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1554 	  "I218 LM Ethernet Connection",
   1555 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1556 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1557 	  "I218 LM Ethernet Connection",
   1558 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1559 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1560 	  "I219 LM Ethernet Connection",
   1561 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1562 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1563 	  "I219 LM Ethernet Connection",
   1564 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1565 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1566 	  "I219 LM Ethernet Connection",
   1567 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1568 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1569 	  "I219 LM Ethernet Connection",
   1570 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1571 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1572 	  "I219 LM Ethernet Connection",
   1573 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1574 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1575 	  "I219 LM Ethernet Connection",
   1576 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1577 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1578 	  "I219 LM Ethernet Connection",
   1579 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1580 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1581 	  "I219 LM Ethernet Connection",
   1582 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1583 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1584 	  "I219 LM Ethernet Connection",
   1585 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1586 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1587 	  "I219 LM Ethernet Connection",
   1588 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1589 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1590 	  "I219 LM Ethernet Connection",
   1591 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1592 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1593 	  "I219 LM Ethernet Connection",
   1594 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1595 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1596 	  "I219 LM Ethernet Connection",
   1597 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1598 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1599 	  "I219 LM Ethernet Connection",
   1600 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1601 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1602 	  "I219 LM Ethernet Connection",
   1603 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1604 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1605 	  "I219 V Ethernet Connection",
   1606 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1607 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1608 	  "I219 V Ethernet Connection",
   1609 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1610 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1611 	  "I219 V Ethernet Connection",
   1612 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1613 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1614 	  "I219 V Ethernet Connection",
   1615 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1616 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1617 	  "I219 V Ethernet Connection",
   1618 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1619 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1620 	  "I219 V Ethernet Connection",
   1621 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1622 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1623 	  "I219 V Ethernet Connection",
   1624 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1625 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1626 	  "I219 V Ethernet Connection",
   1627 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1628 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1629 	  "I219 V Ethernet Connection",
   1630 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1631 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1632 	  "I219 V Ethernet Connection",
   1633 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1634 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1635 	  "I219 V Ethernet Connection",
   1636 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1637 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1638 	  "I219 V Ethernet Connection",
   1639 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1640 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1641 	  "I219 V Ethernet Connection",
   1642 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1643 	{ 0,			0,
   1644 	  NULL,
   1645 	  0,			0 },
   1646 };
   1647 
   1648 /*
   1649  * Register read/write functions.
   1650  * Other than CSR_{READ|WRITE}().
   1651  */
   1652 
   1653 #if 0 /* Not currently used */
   1654 static inline uint32_t
   1655 wm_io_read(struct wm_softc *sc, int reg)
   1656 {
   1657 
   1658 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1659 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1660 }
   1661 #endif
   1662 
   1663 static inline void
   1664 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1665 {
   1666 
   1667 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1668 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1669 }
   1670 
   1671 static inline void
   1672 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1673     uint32_t data)
   1674 {
   1675 	uint32_t regval;
   1676 	int i;
   1677 
   1678 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1679 
   1680 	CSR_WRITE(sc, reg, regval);
   1681 
   1682 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1683 		delay(5);
   1684 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1685 			break;
   1686 	}
   1687 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1688 		aprint_error("%s: WARNING:"
   1689 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1690 		    device_xname(sc->sc_dev), reg);
   1691 	}
   1692 }
   1693 
   1694 static inline void
   1695 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1696 {
   1697 	wa->wa_low = htole32(v & 0xffffffffU);
   1698 	if (sizeof(bus_addr_t) == 8)
   1699 		wa->wa_high = htole32((uint64_t) v >> 32);
   1700 	else
   1701 		wa->wa_high = 0;
   1702 }
   1703 
   1704 /*
   1705  * Descriptor sync/init functions.
   1706  */
   1707 static inline void
   1708 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1709 {
   1710 	struct wm_softc *sc = txq->txq_sc;
   1711 
   1712 	/* If it will wrap around, sync to the end of the ring. */
   1713 	if ((start + num) > WM_NTXDESC(txq)) {
   1714 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1715 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1716 		    (WM_NTXDESC(txq) - start), ops);
   1717 		num -= (WM_NTXDESC(txq) - start);
   1718 		start = 0;
   1719 	}
   1720 
   1721 	/* Now sync whatever is left. */
   1722 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1723 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1724 }
   1725 
   1726 static inline void
   1727 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1728 {
   1729 	struct wm_softc *sc = rxq->rxq_sc;
   1730 
   1731 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1732 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1733 }
   1734 
   1735 static inline void
   1736 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1737 {
   1738 	struct wm_softc *sc = rxq->rxq_sc;
   1739 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1740 	struct mbuf *m = rxs->rxs_mbuf;
   1741 
   1742 	/*
   1743 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1744 	 * so that the payload after the Ethernet header is aligned
   1745 	 * to a 4-byte boundary.
   1746 
   1747 	 * XXX BRAINDAMAGE ALERT!
   1748 	 * The stupid chip uses the same size for every buffer, which
   1749 	 * is set in the Receive Control register.  We are using the 2K
   1750 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1751 	 * reason, we can't "scoot" packets longer than the standard
   1752 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1753 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1754 	 * the upper layer copy the headers.
   1755 	 */
   1756 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1757 
   1758 	if (sc->sc_type == WM_T_82574) {
   1759 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1760 		rxd->erx_data.erxd_addr =
   1761 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1762 		rxd->erx_data.erxd_dd = 0;
   1763 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1764 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1765 
   1766 		rxd->nqrx_data.nrxd_paddr =
   1767 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1768 		/* Currently, split header is not supported. */
   1769 		rxd->nqrx_data.nrxd_haddr = 0;
   1770 	} else {
   1771 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1772 
   1773 		wm_set_dma_addr(&rxd->wrx_addr,
   1774 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1775 		rxd->wrx_len = 0;
   1776 		rxd->wrx_cksum = 0;
   1777 		rxd->wrx_status = 0;
   1778 		rxd->wrx_errors = 0;
   1779 		rxd->wrx_special = 0;
   1780 	}
   1781 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1782 
   1783 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1784 }
   1785 
   1786 /*
   1787  * Device driver interface functions and commonly used functions.
   1788  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1789  */
   1790 
   1791 /* Lookup supported device table */
   1792 static const struct wm_product *
   1793 wm_lookup(const struct pci_attach_args *pa)
   1794 {
   1795 	const struct wm_product *wmp;
   1796 
   1797 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1798 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1799 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1800 			return wmp;
   1801 	}
   1802 	return NULL;
   1803 }
   1804 
   1805 /* The match function (ca_match) */
   1806 static int
   1807 wm_match(device_t parent, cfdata_t cf, void *aux)
   1808 {
   1809 	struct pci_attach_args *pa = aux;
   1810 
   1811 	if (wm_lookup(pa) != NULL)
   1812 		return 1;
   1813 
   1814 	return 0;
   1815 }
   1816 
   1817 /* The attach function (ca_attach) */
   1818 static void
   1819 wm_attach(device_t parent, device_t self, void *aux)
   1820 {
   1821 	struct wm_softc *sc = device_private(self);
   1822 	struct pci_attach_args *pa = aux;
   1823 	prop_dictionary_t dict;
   1824 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1825 	pci_chipset_tag_t pc = pa->pa_pc;
   1826 	int counts[PCI_INTR_TYPE_SIZE];
   1827 	pci_intr_type_t max_type;
   1828 	const char *eetype, *xname;
   1829 	bus_space_tag_t memt;
   1830 	bus_space_handle_t memh;
   1831 	bus_size_t memsize;
   1832 	int memh_valid;
   1833 	int i, error;
   1834 	const struct wm_product *wmp;
   1835 	prop_data_t ea;
   1836 	prop_number_t pn;
   1837 	uint8_t enaddr[ETHER_ADDR_LEN];
   1838 	char buf[256];
   1839 	char wqname[MAXCOMLEN];
   1840 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1841 	pcireg_t preg, memtype;
   1842 	uint16_t eeprom_data, apme_mask;
   1843 	bool force_clear_smbi;
   1844 	uint32_t link_mode;
   1845 	uint32_t reg;
   1846 
   1847 	sc->sc_dev = self;
   1848 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
   1849 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1850 	sc->sc_core_stopping = false;
   1851 
   1852 	wmp = wm_lookup(pa);
   1853 #ifdef DIAGNOSTIC
   1854 	if (wmp == NULL) {
   1855 		printf("\n");
   1856 		panic("wm_attach: impossible");
   1857 	}
   1858 #endif
   1859 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1860 
   1861 	sc->sc_pc = pa->pa_pc;
   1862 	sc->sc_pcitag = pa->pa_tag;
   1863 
   1864 	if (pci_dma64_available(pa))
   1865 		sc->sc_dmat = pa->pa_dmat64;
   1866 	else
   1867 		sc->sc_dmat = pa->pa_dmat;
   1868 
   1869 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1870 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1871 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1872 
   1873 	sc->sc_type = wmp->wmp_type;
   1874 
   1875 	/* Set default function pointers */
   1876 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1877 	sc->phy.release = sc->nvm.release = wm_put_null;
   1878 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1879 
   1880 	if (sc->sc_type < WM_T_82543) {
   1881 		if (sc->sc_rev < 2) {
   1882 			aprint_error_dev(sc->sc_dev,
   1883 			    "i82542 must be at least rev. 2\n");
   1884 			return;
   1885 		}
   1886 		if (sc->sc_rev < 3)
   1887 			sc->sc_type = WM_T_82542_2_0;
   1888 	}
   1889 
   1890 	/*
   1891 	 * Disable MSI for Errata:
   1892 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1893 	 *
   1894 	 *  82544: Errata 25
   1895 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1896 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1897 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1898 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1899 	 *
   1900 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1901 	 *
   1902 	 *  82571 & 82572: Errata 63
   1903 	 */
   1904 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1905 	    || (sc->sc_type == WM_T_82572))
   1906 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1907 
   1908 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1909 	    || (sc->sc_type == WM_T_82580)
   1910 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1911 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1912 		sc->sc_flags |= WM_F_NEWQUEUE;
   1913 
   1914 	/* Set device properties (mactype) */
   1915 	dict = device_properties(sc->sc_dev);
   1916 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1917 
   1918 	/*
   1919 	 * Map the device.  All devices support memory-mapped acccess,
   1920 	 * and it is really required for normal operation.
   1921 	 */
   1922 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1923 	switch (memtype) {
   1924 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1925 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1926 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1927 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1928 		break;
   1929 	default:
   1930 		memh_valid = 0;
   1931 		break;
   1932 	}
   1933 
   1934 	if (memh_valid) {
   1935 		sc->sc_st = memt;
   1936 		sc->sc_sh = memh;
   1937 		sc->sc_ss = memsize;
   1938 	} else {
   1939 		aprint_error_dev(sc->sc_dev,
   1940 		    "unable to map device registers\n");
   1941 		return;
   1942 	}
   1943 
   1944 	/*
   1945 	 * In addition, i82544 and later support I/O mapped indirect
   1946 	 * register access.  It is not desirable (nor supported in
   1947 	 * this driver) to use it for normal operation, though it is
   1948 	 * required to work around bugs in some chip versions.
   1949 	 */
   1950 	if (sc->sc_type >= WM_T_82544) {
   1951 		/* First we have to find the I/O BAR. */
   1952 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1953 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1954 			if (memtype == PCI_MAPREG_TYPE_IO)
   1955 				break;
   1956 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1957 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1958 				i += 4;	/* skip high bits, too */
   1959 		}
   1960 		if (i < PCI_MAPREG_END) {
   1961 			/*
   1962 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1963 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1964 			 * It's no problem because newer chips has no this
   1965 			 * bug.
   1966 			 *
   1967 			 * The i8254x doesn't apparently respond when the
   1968 			 * I/O BAR is 0, which looks somewhat like it's not
   1969 			 * been configured.
   1970 			 */
   1971 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1972 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1973 				aprint_error_dev(sc->sc_dev,
   1974 				    "WARNING: I/O BAR at zero.\n");
   1975 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1976 					0, &sc->sc_iot, &sc->sc_ioh,
   1977 					NULL, &sc->sc_ios) == 0) {
   1978 				sc->sc_flags |= WM_F_IOH_VALID;
   1979 			} else
   1980 				aprint_error_dev(sc->sc_dev,
   1981 				    "WARNING: unable to map I/O space\n");
   1982 		}
   1983 
   1984 	}
   1985 
   1986 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1987 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1988 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1989 	if (sc->sc_type < WM_T_82542_2_1)
   1990 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1991 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1992 
   1993 	/* Power up chip */
   1994 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1995 	    && error != EOPNOTSUPP) {
   1996 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1997 		return;
   1998 	}
   1999 
   2000 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2001 	/*
   2002 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2003 	 * resource.
   2004 	 */
   2005 	if (sc->sc_nqueues > 1) {
   2006 		max_type = PCI_INTR_TYPE_MSIX;
   2007 		/*
   2008 		 *  82583 has a MSI-X capability in the PCI configuration space
   2009 		 * but it doesn't support it. At least the document doesn't
   2010 		 * say anything about MSI-X.
   2011 		 */
   2012 		counts[PCI_INTR_TYPE_MSIX]
   2013 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2014 	} else {
   2015 		max_type = PCI_INTR_TYPE_MSI;
   2016 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2017 	}
   2018 
   2019 	/* Allocation settings */
   2020 	counts[PCI_INTR_TYPE_MSI] = 1;
   2021 	counts[PCI_INTR_TYPE_INTX] = 1;
   2022 	/* overridden by disable flags */
   2023 	if (wm_disable_msi != 0) {
   2024 		counts[PCI_INTR_TYPE_MSI] = 0;
   2025 		if (wm_disable_msix != 0) {
   2026 			max_type = PCI_INTR_TYPE_INTX;
   2027 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2028 		}
   2029 	} else if (wm_disable_msix != 0) {
   2030 		max_type = PCI_INTR_TYPE_MSI;
   2031 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2032 	}
   2033 
   2034 alloc_retry:
   2035 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2036 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2037 		return;
   2038 	}
   2039 
   2040 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2041 		error = wm_setup_msix(sc);
   2042 		if (error) {
   2043 			pci_intr_release(pc, sc->sc_intrs,
   2044 			    counts[PCI_INTR_TYPE_MSIX]);
   2045 
   2046 			/* Setup for MSI: Disable MSI-X */
   2047 			max_type = PCI_INTR_TYPE_MSI;
   2048 			counts[PCI_INTR_TYPE_MSI] = 1;
   2049 			counts[PCI_INTR_TYPE_INTX] = 1;
   2050 			goto alloc_retry;
   2051 		}
   2052 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2053 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2054 		error = wm_setup_legacy(sc);
   2055 		if (error) {
   2056 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2057 			    counts[PCI_INTR_TYPE_MSI]);
   2058 
   2059 			/* The next try is for INTx: Disable MSI */
   2060 			max_type = PCI_INTR_TYPE_INTX;
   2061 			counts[PCI_INTR_TYPE_INTX] = 1;
   2062 			goto alloc_retry;
   2063 		}
   2064 	} else {
   2065 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2066 		error = wm_setup_legacy(sc);
   2067 		if (error) {
   2068 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2069 			    counts[PCI_INTR_TYPE_INTX]);
   2070 			return;
   2071 		}
   2072 	}
   2073 
   2074 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2075 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2076 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2077 	    WM_WORKQUEUE_FLAGS);
   2078 	if (error) {
   2079 		aprint_error_dev(sc->sc_dev,
   2080 		    "unable to create workqueue\n");
   2081 		goto out;
   2082 	}
   2083 
   2084 	/*
   2085 	 * Check the function ID (unit number of the chip).
   2086 	 */
   2087 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2088 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2089 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2090 	    || (sc->sc_type == WM_T_82580)
   2091 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2092 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2093 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2094 	else
   2095 		sc->sc_funcid = 0;
   2096 
   2097 	/*
   2098 	 * Determine a few things about the bus we're connected to.
   2099 	 */
   2100 	if (sc->sc_type < WM_T_82543) {
   2101 		/* We don't really know the bus characteristics here. */
   2102 		sc->sc_bus_speed = 33;
   2103 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2104 		/*
   2105 		 * CSA (Communication Streaming Architecture) is about as fast
   2106 		 * a 32-bit 66MHz PCI Bus.
   2107 		 */
   2108 		sc->sc_flags |= WM_F_CSA;
   2109 		sc->sc_bus_speed = 66;
   2110 		aprint_verbose_dev(sc->sc_dev,
   2111 		    "Communication Streaming Architecture\n");
   2112 		if (sc->sc_type == WM_T_82547) {
   2113 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
   2114 			callout_setfunc(&sc->sc_txfifo_ch,
   2115 			    wm_82547_txfifo_stall, sc);
   2116 			aprint_verbose_dev(sc->sc_dev,
   2117 			    "using 82547 Tx FIFO stall work-around\n");
   2118 		}
   2119 	} else if (sc->sc_type >= WM_T_82571) {
   2120 		sc->sc_flags |= WM_F_PCIE;
   2121 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2122 		    && (sc->sc_type != WM_T_ICH10)
   2123 		    && (sc->sc_type != WM_T_PCH)
   2124 		    && (sc->sc_type != WM_T_PCH2)
   2125 		    && (sc->sc_type != WM_T_PCH_LPT)
   2126 		    && (sc->sc_type != WM_T_PCH_SPT)
   2127 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2128 			/* ICH* and PCH* have no PCIe capability registers */
   2129 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2130 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2131 				NULL) == 0)
   2132 				aprint_error_dev(sc->sc_dev,
   2133 				    "unable to find PCIe capability\n");
   2134 		}
   2135 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2136 	} else {
   2137 		reg = CSR_READ(sc, WMREG_STATUS);
   2138 		if (reg & STATUS_BUS64)
   2139 			sc->sc_flags |= WM_F_BUS64;
   2140 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2141 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2142 
   2143 			sc->sc_flags |= WM_F_PCIX;
   2144 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2145 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2146 				aprint_error_dev(sc->sc_dev,
   2147 				    "unable to find PCIX capability\n");
   2148 			else if (sc->sc_type != WM_T_82545_3 &&
   2149 				 sc->sc_type != WM_T_82546_3) {
   2150 				/*
   2151 				 * Work around a problem caused by the BIOS
   2152 				 * setting the max memory read byte count
   2153 				 * incorrectly.
   2154 				 */
   2155 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2156 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2157 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2158 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2159 
   2160 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2161 				    PCIX_CMD_BYTECNT_SHIFT;
   2162 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2163 				    PCIX_STATUS_MAXB_SHIFT;
   2164 				if (bytecnt > maxb) {
   2165 					aprint_verbose_dev(sc->sc_dev,
   2166 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2167 					    512 << bytecnt, 512 << maxb);
   2168 					pcix_cmd = (pcix_cmd &
   2169 					    ~PCIX_CMD_BYTECNT_MASK) |
   2170 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2171 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2172 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2173 					    pcix_cmd);
   2174 				}
   2175 			}
   2176 		}
   2177 		/*
   2178 		 * The quad port adapter is special; it has a PCIX-PCIX
   2179 		 * bridge on the board, and can run the secondary bus at
   2180 		 * a higher speed.
   2181 		 */
   2182 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2183 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2184 								      : 66;
   2185 		} else if (sc->sc_flags & WM_F_PCIX) {
   2186 			switch (reg & STATUS_PCIXSPD_MASK) {
   2187 			case STATUS_PCIXSPD_50_66:
   2188 				sc->sc_bus_speed = 66;
   2189 				break;
   2190 			case STATUS_PCIXSPD_66_100:
   2191 				sc->sc_bus_speed = 100;
   2192 				break;
   2193 			case STATUS_PCIXSPD_100_133:
   2194 				sc->sc_bus_speed = 133;
   2195 				break;
   2196 			default:
   2197 				aprint_error_dev(sc->sc_dev,
   2198 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2199 				    reg & STATUS_PCIXSPD_MASK);
   2200 				sc->sc_bus_speed = 66;
   2201 				break;
   2202 			}
   2203 		} else
   2204 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2205 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2206 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2207 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2208 	}
   2209 
   2210 	/* clear interesting stat counters */
   2211 	CSR_READ(sc, WMREG_COLC);
   2212 	CSR_READ(sc, WMREG_RXERRC);
   2213 
   2214 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2215 	    || (sc->sc_type >= WM_T_ICH8))
   2216 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2217 	if (sc->sc_type >= WM_T_ICH8)
   2218 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2219 
   2220 	/* Set PHY, NVM mutex related stuff */
   2221 	switch (sc->sc_type) {
   2222 	case WM_T_82542_2_0:
   2223 	case WM_T_82542_2_1:
   2224 	case WM_T_82543:
   2225 	case WM_T_82544:
   2226 		/* Microwire */
   2227 		sc->nvm.read = wm_nvm_read_uwire;
   2228 		sc->sc_nvm_wordsize = 64;
   2229 		sc->sc_nvm_addrbits = 6;
   2230 		break;
   2231 	case WM_T_82540:
   2232 	case WM_T_82545:
   2233 	case WM_T_82545_3:
   2234 	case WM_T_82546:
   2235 	case WM_T_82546_3:
   2236 		/* Microwire */
   2237 		sc->nvm.read = wm_nvm_read_uwire;
   2238 		reg = CSR_READ(sc, WMREG_EECD);
   2239 		if (reg & EECD_EE_SIZE) {
   2240 			sc->sc_nvm_wordsize = 256;
   2241 			sc->sc_nvm_addrbits = 8;
   2242 		} else {
   2243 			sc->sc_nvm_wordsize = 64;
   2244 			sc->sc_nvm_addrbits = 6;
   2245 		}
   2246 		sc->sc_flags |= WM_F_LOCK_EECD;
   2247 		sc->nvm.acquire = wm_get_eecd;
   2248 		sc->nvm.release = wm_put_eecd;
   2249 		break;
   2250 	case WM_T_82541:
   2251 	case WM_T_82541_2:
   2252 	case WM_T_82547:
   2253 	case WM_T_82547_2:
   2254 		reg = CSR_READ(sc, WMREG_EECD);
   2255 		/*
   2256 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2257 		 * on 8254[17], so set flags and functios before calling it.
   2258 		 */
   2259 		sc->sc_flags |= WM_F_LOCK_EECD;
   2260 		sc->nvm.acquire = wm_get_eecd;
   2261 		sc->nvm.release = wm_put_eecd;
   2262 		if (reg & EECD_EE_TYPE) {
   2263 			/* SPI */
   2264 			sc->nvm.read = wm_nvm_read_spi;
   2265 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2266 			wm_nvm_set_addrbits_size_eecd(sc);
   2267 		} else {
   2268 			/* Microwire */
   2269 			sc->nvm.read = wm_nvm_read_uwire;
   2270 			if ((reg & EECD_EE_ABITS) != 0) {
   2271 				sc->sc_nvm_wordsize = 256;
   2272 				sc->sc_nvm_addrbits = 8;
   2273 			} else {
   2274 				sc->sc_nvm_wordsize = 64;
   2275 				sc->sc_nvm_addrbits = 6;
   2276 			}
   2277 		}
   2278 		break;
   2279 	case WM_T_82571:
   2280 	case WM_T_82572:
   2281 		/* SPI */
   2282 		sc->nvm.read = wm_nvm_read_eerd;
   2283 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2284 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2285 		wm_nvm_set_addrbits_size_eecd(sc);
   2286 		sc->phy.acquire = wm_get_swsm_semaphore;
   2287 		sc->phy.release = wm_put_swsm_semaphore;
   2288 		sc->nvm.acquire = wm_get_nvm_82571;
   2289 		sc->nvm.release = wm_put_nvm_82571;
   2290 		break;
   2291 	case WM_T_82573:
   2292 	case WM_T_82574:
   2293 	case WM_T_82583:
   2294 		sc->nvm.read = wm_nvm_read_eerd;
   2295 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2296 		if (sc->sc_type == WM_T_82573) {
   2297 			sc->phy.acquire = wm_get_swsm_semaphore;
   2298 			sc->phy.release = wm_put_swsm_semaphore;
   2299 			sc->nvm.acquire = wm_get_nvm_82571;
   2300 			sc->nvm.release = wm_put_nvm_82571;
   2301 		} else {
   2302 			/* Both PHY and NVM use the same semaphore. */
   2303 			sc->phy.acquire = sc->nvm.acquire
   2304 			    = wm_get_swfwhw_semaphore;
   2305 			sc->phy.release = sc->nvm.release
   2306 			    = wm_put_swfwhw_semaphore;
   2307 		}
   2308 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2309 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2310 			sc->sc_nvm_wordsize = 2048;
   2311 		} else {
   2312 			/* SPI */
   2313 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2314 			wm_nvm_set_addrbits_size_eecd(sc);
   2315 		}
   2316 		break;
   2317 	case WM_T_82575:
   2318 	case WM_T_82576:
   2319 	case WM_T_82580:
   2320 	case WM_T_I350:
   2321 	case WM_T_I354:
   2322 	case WM_T_80003:
   2323 		/* SPI */
   2324 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2325 		wm_nvm_set_addrbits_size_eecd(sc);
   2326 		if ((sc->sc_type == WM_T_80003)
   2327 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2328 			sc->nvm.read = wm_nvm_read_eerd;
   2329 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2330 		} else {
   2331 			sc->nvm.read = wm_nvm_read_spi;
   2332 			sc->sc_flags |= WM_F_LOCK_EECD;
   2333 		}
   2334 		sc->phy.acquire = wm_get_phy_82575;
   2335 		sc->phy.release = wm_put_phy_82575;
   2336 		sc->nvm.acquire = wm_get_nvm_80003;
   2337 		sc->nvm.release = wm_put_nvm_80003;
   2338 		break;
   2339 	case WM_T_ICH8:
   2340 	case WM_T_ICH9:
   2341 	case WM_T_ICH10:
   2342 	case WM_T_PCH:
   2343 	case WM_T_PCH2:
   2344 	case WM_T_PCH_LPT:
   2345 		sc->nvm.read = wm_nvm_read_ich8;
   2346 		/* FLASH */
   2347 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2348 		sc->sc_nvm_wordsize = 2048;
   2349 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2350 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2351 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2352 			aprint_error_dev(sc->sc_dev,
   2353 			    "can't map FLASH registers\n");
   2354 			goto out;
   2355 		}
   2356 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2357 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2358 		    ICH_FLASH_SECTOR_SIZE;
   2359 		sc->sc_ich8_flash_bank_size =
   2360 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2361 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2362 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2363 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2364 		sc->sc_flashreg_offset = 0;
   2365 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2366 		sc->phy.release = wm_put_swflag_ich8lan;
   2367 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2368 		sc->nvm.release = wm_put_nvm_ich8lan;
   2369 		break;
   2370 	case WM_T_PCH_SPT:
   2371 	case WM_T_PCH_CNP:
   2372 		sc->nvm.read = wm_nvm_read_spt;
   2373 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2374 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2375 		sc->sc_flasht = sc->sc_st;
   2376 		sc->sc_flashh = sc->sc_sh;
   2377 		sc->sc_ich8_flash_base = 0;
   2378 		sc->sc_nvm_wordsize =
   2379 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2380 		    * NVM_SIZE_MULTIPLIER;
   2381 		/* It is size in bytes, we want words */
   2382 		sc->sc_nvm_wordsize /= 2;
   2383 		/* Assume 2 banks */
   2384 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2385 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2386 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2387 		sc->phy.release = wm_put_swflag_ich8lan;
   2388 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2389 		sc->nvm.release = wm_put_nvm_ich8lan;
   2390 		break;
   2391 	case WM_T_I210:
   2392 	case WM_T_I211:
   2393 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2394 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2395 		if (wm_nvm_flash_presence_i210(sc)) {
   2396 			sc->nvm.read = wm_nvm_read_eerd;
   2397 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2398 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2399 			wm_nvm_set_addrbits_size_eecd(sc);
   2400 		} else {
   2401 			sc->nvm.read = wm_nvm_read_invm;
   2402 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2403 			sc->sc_nvm_wordsize = INVM_SIZE;
   2404 		}
   2405 		sc->phy.acquire = wm_get_phy_82575;
   2406 		sc->phy.release = wm_put_phy_82575;
   2407 		sc->nvm.acquire = wm_get_nvm_80003;
   2408 		sc->nvm.release = wm_put_nvm_80003;
   2409 		break;
   2410 	default:
   2411 		break;
   2412 	}
   2413 
   2414 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2415 	switch (sc->sc_type) {
   2416 	case WM_T_82571:
   2417 	case WM_T_82572:
   2418 		reg = CSR_READ(sc, WMREG_SWSM2);
   2419 		if ((reg & SWSM2_LOCK) == 0) {
   2420 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2421 			force_clear_smbi = true;
   2422 		} else
   2423 			force_clear_smbi = false;
   2424 		break;
   2425 	case WM_T_82573:
   2426 	case WM_T_82574:
   2427 	case WM_T_82583:
   2428 		force_clear_smbi = true;
   2429 		break;
   2430 	default:
   2431 		force_clear_smbi = false;
   2432 		break;
   2433 	}
   2434 	if (force_clear_smbi) {
   2435 		reg = CSR_READ(sc, WMREG_SWSM);
   2436 		if ((reg & SWSM_SMBI) != 0)
   2437 			aprint_error_dev(sc->sc_dev,
   2438 			    "Please update the Bootagent\n");
   2439 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2440 	}
   2441 
   2442 	/*
   2443 	 * Defer printing the EEPROM type until after verifying the checksum
   2444 	 * This allows the EEPROM type to be printed correctly in the case
   2445 	 * that no EEPROM is attached.
   2446 	 */
   2447 	/*
   2448 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2449 	 * this for later, so we can fail future reads from the EEPROM.
   2450 	 */
   2451 	if (wm_nvm_validate_checksum(sc)) {
   2452 		/*
   2453 		 * Read twice again because some PCI-e parts fail the
   2454 		 * first check due to the link being in sleep state.
   2455 		 */
   2456 		if (wm_nvm_validate_checksum(sc))
   2457 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2458 	}
   2459 
   2460 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2461 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2462 	else {
   2463 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2464 		    sc->sc_nvm_wordsize);
   2465 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2466 			aprint_verbose("iNVM");
   2467 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2468 			aprint_verbose("FLASH(HW)");
   2469 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2470 			aprint_verbose("FLASH");
   2471 		else {
   2472 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2473 				eetype = "SPI";
   2474 			else
   2475 				eetype = "MicroWire";
   2476 			aprint_verbose("(%d address bits) %s EEPROM",
   2477 			    sc->sc_nvm_addrbits, eetype);
   2478 		}
   2479 	}
   2480 	wm_nvm_version(sc);
   2481 	aprint_verbose("\n");
   2482 
   2483 	/*
   2484 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2485 	 * incorrect.
   2486 	 */
   2487 	wm_gmii_setup_phytype(sc, 0, 0);
   2488 
   2489 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2490 	switch (sc->sc_type) {
   2491 	case WM_T_ICH8:
   2492 	case WM_T_ICH9:
   2493 	case WM_T_ICH10:
   2494 	case WM_T_PCH:
   2495 	case WM_T_PCH2:
   2496 	case WM_T_PCH_LPT:
   2497 	case WM_T_PCH_SPT:
   2498 	case WM_T_PCH_CNP:
   2499 		apme_mask = WUC_APME;
   2500 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2501 		if ((eeprom_data & apme_mask) != 0)
   2502 			sc->sc_flags |= WM_F_WOL;
   2503 		break;
   2504 	default:
   2505 		break;
   2506 	}
   2507 
   2508 	/* Reset the chip to a known state. */
   2509 	wm_reset(sc);
   2510 
   2511 	/*
   2512 	 * Check for I21[01] PLL workaround.
   2513 	 *
   2514 	 * Three cases:
   2515 	 * a) Chip is I211.
   2516 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2517 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2518 	 */
   2519 	if (sc->sc_type == WM_T_I211)
   2520 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2521 	if (sc->sc_type == WM_T_I210) {
   2522 		if (!wm_nvm_flash_presence_i210(sc))
   2523 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2524 		else if ((sc->sc_nvm_ver_major < 3)
   2525 		    || ((sc->sc_nvm_ver_major == 3)
   2526 			&& (sc->sc_nvm_ver_minor < 25))) {
   2527 			aprint_verbose_dev(sc->sc_dev,
   2528 			    "ROM image version %d.%d is older than 3.25\n",
   2529 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2530 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2531 		}
   2532 	}
   2533 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2534 		wm_pll_workaround_i210(sc);
   2535 
   2536 	wm_get_wakeup(sc);
   2537 
   2538 	/* Non-AMT based hardware can now take control from firmware */
   2539 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2540 		wm_get_hw_control(sc);
   2541 
   2542 	/*
   2543 	 * Read the Ethernet address from the EEPROM, if not first found
   2544 	 * in device properties.
   2545 	 */
   2546 	ea = prop_dictionary_get(dict, "mac-address");
   2547 	if (ea != NULL) {
   2548 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2549 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2550 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2551 	} else {
   2552 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2553 			aprint_error_dev(sc->sc_dev,
   2554 			    "unable to read Ethernet address\n");
   2555 			goto out;
   2556 		}
   2557 	}
   2558 
   2559 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2560 	    ether_sprintf(enaddr));
   2561 
   2562 	/*
   2563 	 * Read the config info from the EEPROM, and set up various
   2564 	 * bits in the control registers based on their contents.
   2565 	 */
   2566 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2567 	if (pn != NULL) {
   2568 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2569 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2570 	} else {
   2571 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2572 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2573 			goto out;
   2574 		}
   2575 	}
   2576 
   2577 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2578 	if (pn != NULL) {
   2579 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2580 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2581 	} else {
   2582 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2583 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2584 			goto out;
   2585 		}
   2586 	}
   2587 
   2588 	/* check for WM_F_WOL */
   2589 	switch (sc->sc_type) {
   2590 	case WM_T_82542_2_0:
   2591 	case WM_T_82542_2_1:
   2592 	case WM_T_82543:
   2593 		/* dummy? */
   2594 		eeprom_data = 0;
   2595 		apme_mask = NVM_CFG3_APME;
   2596 		break;
   2597 	case WM_T_82544:
   2598 		apme_mask = NVM_CFG2_82544_APM_EN;
   2599 		eeprom_data = cfg2;
   2600 		break;
   2601 	case WM_T_82546:
   2602 	case WM_T_82546_3:
   2603 	case WM_T_82571:
   2604 	case WM_T_82572:
   2605 	case WM_T_82573:
   2606 	case WM_T_82574:
   2607 	case WM_T_82583:
   2608 	case WM_T_80003:
   2609 	case WM_T_82575:
   2610 	case WM_T_82576:
   2611 		apme_mask = NVM_CFG3_APME;
   2612 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2613 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2614 		break;
   2615 	case WM_T_82580:
   2616 	case WM_T_I350:
   2617 	case WM_T_I354:
   2618 	case WM_T_I210:
   2619 	case WM_T_I211:
   2620 		apme_mask = NVM_CFG3_APME;
   2621 		wm_nvm_read(sc,
   2622 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2623 		    1, &eeprom_data);
   2624 		break;
   2625 	case WM_T_ICH8:
   2626 	case WM_T_ICH9:
   2627 	case WM_T_ICH10:
   2628 	case WM_T_PCH:
   2629 	case WM_T_PCH2:
   2630 	case WM_T_PCH_LPT:
   2631 	case WM_T_PCH_SPT:
   2632 	case WM_T_PCH_CNP:
   2633 		/* Already checked before wm_reset () */
   2634 		apme_mask = eeprom_data = 0;
   2635 		break;
   2636 	default: /* XXX 82540 */
   2637 		apme_mask = NVM_CFG3_APME;
   2638 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2639 		break;
   2640 	}
   2641 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2642 	if ((eeprom_data & apme_mask) != 0)
   2643 		sc->sc_flags |= WM_F_WOL;
   2644 
   2645 	/*
   2646 	 * We have the eeprom settings, now apply the special cases
   2647 	 * where the eeprom may be wrong or the board won't support
   2648 	 * wake on lan on a particular port
   2649 	 */
   2650 	switch (sc->sc_pcidevid) {
   2651 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2652 		sc->sc_flags &= ~WM_F_WOL;
   2653 		break;
   2654 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2655 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2656 		/* Wake events only supported on port A for dual fiber
   2657 		 * regardless of eeprom setting */
   2658 		if (sc->sc_funcid == 1)
   2659 			sc->sc_flags &= ~WM_F_WOL;
   2660 		break;
   2661 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2662 		/* If quad port adapter, disable WoL on all but port A */
   2663 		if (sc->sc_funcid != 0)
   2664 			sc->sc_flags &= ~WM_F_WOL;
   2665 		break;
   2666 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2667 		/* Wake events only supported on port A for dual fiber
   2668 		 * regardless of eeprom setting */
   2669 		if (sc->sc_funcid == 1)
   2670 			sc->sc_flags &= ~WM_F_WOL;
   2671 		break;
   2672 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2673 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2674 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2675 		/* If quad port adapter, disable WoL on all but port A */
   2676 		if (sc->sc_funcid != 0)
   2677 			sc->sc_flags &= ~WM_F_WOL;
   2678 		break;
   2679 	}
   2680 
   2681 	if (sc->sc_type >= WM_T_82575) {
   2682 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2683 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2684 			    nvmword);
   2685 			if ((sc->sc_type == WM_T_82575) ||
   2686 			    (sc->sc_type == WM_T_82576)) {
   2687 				/* Check NVM for autonegotiation */
   2688 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2689 				    != 0)
   2690 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2691 			}
   2692 			if ((sc->sc_type == WM_T_82575) ||
   2693 			    (sc->sc_type == WM_T_I350)) {
   2694 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2695 					sc->sc_flags |= WM_F_MAS;
   2696 			}
   2697 		}
   2698 	}
   2699 
   2700 	/*
   2701 	 * XXX need special handling for some multiple port cards
   2702 	 * to disable a paticular port.
   2703 	 */
   2704 
   2705 	if (sc->sc_type >= WM_T_82544) {
   2706 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2707 		if (pn != NULL) {
   2708 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2709 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2710 		} else {
   2711 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2712 				aprint_error_dev(sc->sc_dev,
   2713 				    "unable to read SWDPIN\n");
   2714 				goto out;
   2715 			}
   2716 		}
   2717 	}
   2718 
   2719 	if (cfg1 & NVM_CFG1_ILOS)
   2720 		sc->sc_ctrl |= CTRL_ILOS;
   2721 
   2722 	/*
   2723 	 * XXX
   2724 	 * This code isn't correct because pin 2 and 3 are located
   2725 	 * in different position on newer chips. Check all datasheet.
   2726 	 *
   2727 	 * Until resolve this problem, check if a chip < 82580
   2728 	 */
   2729 	if (sc->sc_type <= WM_T_82580) {
   2730 		if (sc->sc_type >= WM_T_82544) {
   2731 			sc->sc_ctrl |=
   2732 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2733 			    CTRL_SWDPIO_SHIFT;
   2734 			sc->sc_ctrl |=
   2735 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2736 			    CTRL_SWDPINS_SHIFT;
   2737 		} else {
   2738 			sc->sc_ctrl |=
   2739 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2740 			    CTRL_SWDPIO_SHIFT;
   2741 		}
   2742 	}
   2743 
   2744 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2745 		wm_nvm_read(sc,
   2746 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2747 		    1, &nvmword);
   2748 		if (nvmword & NVM_CFG3_ILOS)
   2749 			sc->sc_ctrl |= CTRL_ILOS;
   2750 	}
   2751 
   2752 #if 0
   2753 	if (sc->sc_type >= WM_T_82544) {
   2754 		if (cfg1 & NVM_CFG1_IPS0)
   2755 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2756 		if (cfg1 & NVM_CFG1_IPS1)
   2757 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2758 		sc->sc_ctrl_ext |=
   2759 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2760 		    CTRL_EXT_SWDPIO_SHIFT;
   2761 		sc->sc_ctrl_ext |=
   2762 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2763 		    CTRL_EXT_SWDPINS_SHIFT;
   2764 	} else {
   2765 		sc->sc_ctrl_ext |=
   2766 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2767 		    CTRL_EXT_SWDPIO_SHIFT;
   2768 	}
   2769 #endif
   2770 
   2771 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2772 #if 0
   2773 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2774 #endif
   2775 
   2776 	if (sc->sc_type == WM_T_PCH) {
   2777 		uint16_t val;
   2778 
   2779 		/* Save the NVM K1 bit setting */
   2780 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2781 
   2782 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2783 			sc->sc_nvm_k1_enabled = 1;
   2784 		else
   2785 			sc->sc_nvm_k1_enabled = 0;
   2786 	}
   2787 
   2788 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2789 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2790 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2791 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2792 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2793 	    || sc->sc_type == WM_T_82573
   2794 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2795 		/* Copper only */
   2796 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2797 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2798 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2799 	    || (sc->sc_type ==WM_T_I211)) {
   2800 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2801 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2802 		switch (link_mode) {
   2803 		case CTRL_EXT_LINK_MODE_1000KX:
   2804 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2805 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2806 			break;
   2807 		case CTRL_EXT_LINK_MODE_SGMII:
   2808 			if (wm_sgmii_uses_mdio(sc)) {
   2809 				aprint_normal_dev(sc->sc_dev,
   2810 				    "SGMII(MDIO)\n");
   2811 				sc->sc_flags |= WM_F_SGMII;
   2812 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2813 				break;
   2814 			}
   2815 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2816 			/*FALLTHROUGH*/
   2817 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2818 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2819 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2820 				if (link_mode
   2821 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2822 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2823 					sc->sc_flags |= WM_F_SGMII;
   2824 					aprint_verbose_dev(sc->sc_dev,
   2825 					    "SGMII\n");
   2826 				} else {
   2827 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2828 					aprint_verbose_dev(sc->sc_dev,
   2829 					    "SERDES\n");
   2830 				}
   2831 				break;
   2832 			}
   2833 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2834 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2835 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2836 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2837 				sc->sc_flags |= WM_F_SGMII;
   2838 			}
   2839 			/* Do not change link mode for 100BaseFX */
   2840 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2841 				break;
   2842 
   2843 			/* Change current link mode setting */
   2844 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2845 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2846 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2847 			else
   2848 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2849 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2850 			break;
   2851 		case CTRL_EXT_LINK_MODE_GMII:
   2852 		default:
   2853 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2854 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2855 			break;
   2856 		}
   2857 
   2858 		reg &= ~CTRL_EXT_I2C_ENA;
   2859 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2860 			reg |= CTRL_EXT_I2C_ENA;
   2861 		else
   2862 			reg &= ~CTRL_EXT_I2C_ENA;
   2863 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2864 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   2865 			wm_gmii_setup_phytype(sc, 0, 0);
   2866 			wm_reset_mdicnfg_82580(sc);
   2867 		}
   2868 	} else if (sc->sc_type < WM_T_82543 ||
   2869 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2870 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2871 			aprint_error_dev(sc->sc_dev,
   2872 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2873 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2874 		}
   2875 	} else {
   2876 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2877 			aprint_error_dev(sc->sc_dev,
   2878 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2879 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2880 		}
   2881 	}
   2882 
   2883 	if (sc->sc_type >= WM_T_PCH2)
   2884 		sc->sc_flags |= WM_F_EEE;
   2885 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2886 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2887 		/* XXX: Need special handling for I354. (not yet) */
   2888 		if (sc->sc_type != WM_T_I354)
   2889 			sc->sc_flags |= WM_F_EEE;
   2890 	}
   2891 
   2892 	/*
   2893 	 * The I350 has a bug where it always strips the CRC whether
   2894 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   2895 	 */
   2896 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2897 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2898 		sc->sc_flags |= WM_F_CRC_STRIP;
   2899 
   2900 	/* Set device properties (macflags) */
   2901 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2902 
   2903 	if (sc->sc_flags != 0) {
   2904 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2905 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2906 	}
   2907 
   2908 #ifdef WM_MPSAFE
   2909 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2910 #else
   2911 	sc->sc_core_lock = NULL;
   2912 #endif
   2913 
   2914 	/* Initialize the media structures accordingly. */
   2915 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2916 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2917 	else
   2918 		wm_tbi_mediainit(sc); /* All others */
   2919 
   2920 	ifp = &sc->sc_ethercom.ec_if;
   2921 	xname = device_xname(sc->sc_dev);
   2922 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2923 	ifp->if_softc = sc;
   2924 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2925 #ifdef WM_MPSAFE
   2926 	ifp->if_extflags = IFEF_MPSAFE;
   2927 #endif
   2928 	ifp->if_ioctl = wm_ioctl;
   2929 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2930 		ifp->if_start = wm_nq_start;
   2931 		/*
   2932 		 * When the number of CPUs is one and the controller can use
   2933 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2934 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2935 		 * and the other is used for link status changing.
   2936 		 * In this situation, wm_nq_transmit() is disadvantageous
   2937 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2938 		 */
   2939 		if (wm_is_using_multiqueue(sc))
   2940 			ifp->if_transmit = wm_nq_transmit;
   2941 	} else {
   2942 		ifp->if_start = wm_start;
   2943 		/*
   2944 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2945 		 */
   2946 		if (wm_is_using_multiqueue(sc))
   2947 			ifp->if_transmit = wm_transmit;
   2948 	}
   2949 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2950 	ifp->if_init = wm_init;
   2951 	ifp->if_stop = wm_stop;
   2952 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2953 	IFQ_SET_READY(&ifp->if_snd);
   2954 
   2955 	/* Check for jumbo frame */
   2956 	switch (sc->sc_type) {
   2957 	case WM_T_82573:
   2958 		/* XXX limited to 9234 if ASPM is disabled */
   2959 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2960 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2961 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2962 		break;
   2963 	case WM_T_82571:
   2964 	case WM_T_82572:
   2965 	case WM_T_82574:
   2966 	case WM_T_82583:
   2967 	case WM_T_82575:
   2968 	case WM_T_82576:
   2969 	case WM_T_82580:
   2970 	case WM_T_I350:
   2971 	case WM_T_I354:
   2972 	case WM_T_I210:
   2973 	case WM_T_I211:
   2974 	case WM_T_80003:
   2975 	case WM_T_ICH9:
   2976 	case WM_T_ICH10:
   2977 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2978 	case WM_T_PCH_LPT:
   2979 	case WM_T_PCH_SPT:
   2980 	case WM_T_PCH_CNP:
   2981 		/* XXX limited to 9234 */
   2982 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2983 		break;
   2984 	case WM_T_PCH:
   2985 		/* XXX limited to 4096 */
   2986 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2987 		break;
   2988 	case WM_T_82542_2_0:
   2989 	case WM_T_82542_2_1:
   2990 	case WM_T_ICH8:
   2991 		/* No support for jumbo frame */
   2992 		break;
   2993 	default:
   2994 		/* ETHER_MAX_LEN_JUMBO */
   2995 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2996 		break;
   2997 	}
   2998 
   2999 	/* If we're a i82543 or greater, we can support VLANs. */
   3000 	if (sc->sc_type >= WM_T_82543) {
   3001 		sc->sc_ethercom.ec_capabilities |=
   3002 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3003 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3004 	}
   3005 
   3006 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3007 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3008 
   3009 	/*
   3010 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   3011 	 * on i82543 and later.
   3012 	 */
   3013 	if (sc->sc_type >= WM_T_82543) {
   3014 		ifp->if_capabilities |=
   3015 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3016 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3017 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3018 		    IFCAP_CSUM_TCPv6_Tx |
   3019 		    IFCAP_CSUM_UDPv6_Tx;
   3020 	}
   3021 
   3022 	/*
   3023 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3024 	 *
   3025 	 *	82541GI (8086:1076) ... no
   3026 	 *	82572EI (8086:10b9) ... yes
   3027 	 */
   3028 	if (sc->sc_type >= WM_T_82571) {
   3029 		ifp->if_capabilities |=
   3030 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3031 	}
   3032 
   3033 	/*
   3034 	 * If we're a i82544 or greater (except i82547), we can do
   3035 	 * TCP segmentation offload.
   3036 	 */
   3037 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   3038 		ifp->if_capabilities |= IFCAP_TSOv4;
   3039 	}
   3040 
   3041 	if (sc->sc_type >= WM_T_82571) {
   3042 		ifp->if_capabilities |= IFCAP_TSOv6;
   3043 	}
   3044 
   3045 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3046 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3047 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3048 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3049 
   3050 	/* Attach the interface. */
   3051 	error = if_initialize(ifp);
   3052 	if (error != 0) {
   3053 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   3054 		    error);
   3055 		return; /* Error */
   3056 	}
   3057 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3058 	ether_ifattach(ifp, enaddr);
   3059 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3060 	if_register(ifp);
   3061 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3062 	    RND_FLAG_DEFAULT);
   3063 
   3064 #ifdef WM_EVENT_COUNTERS
   3065 	/* Attach event counters. */
   3066 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3067 	    NULL, xname, "linkintr");
   3068 
   3069 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3070 	    NULL, xname, "tx_xoff");
   3071 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3072 	    NULL, xname, "tx_xon");
   3073 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3074 	    NULL, xname, "rx_xoff");
   3075 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3076 	    NULL, xname, "rx_xon");
   3077 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3078 	    NULL, xname, "rx_macctl");
   3079 #endif /* WM_EVENT_COUNTERS */
   3080 
   3081 	sc->sc_txrx_use_workqueue = false;
   3082 
   3083 	wm_init_sysctls(sc);
   3084 
   3085 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3086 		pmf_class_network_register(self, ifp);
   3087 	else
   3088 		aprint_error_dev(self, "couldn't establish power handler\n");
   3089 
   3090 	sc->sc_flags |= WM_F_ATTACHED;
   3091 out:
   3092 	return;
   3093 }
   3094 
   3095 /* The detach function (ca_detach) */
   3096 static int
   3097 wm_detach(device_t self, int flags __unused)
   3098 {
   3099 	struct wm_softc *sc = device_private(self);
   3100 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3101 	int i;
   3102 
   3103 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3104 		return 0;
   3105 
   3106 	/* Stop the interface. Callouts are stopped in it. */
   3107 	wm_stop(ifp, 1);
   3108 
   3109 	pmf_device_deregister(self);
   3110 
   3111 	sysctl_teardown(&sc->sc_sysctllog);
   3112 
   3113 #ifdef WM_EVENT_COUNTERS
   3114 	evcnt_detach(&sc->sc_ev_linkintr);
   3115 
   3116 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3117 	evcnt_detach(&sc->sc_ev_tx_xon);
   3118 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3119 	evcnt_detach(&sc->sc_ev_rx_xon);
   3120 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3121 #endif /* WM_EVENT_COUNTERS */
   3122 
   3123 	rnd_detach_source(&sc->rnd_source);
   3124 
   3125 	/* Tell the firmware about the release */
   3126 	WM_CORE_LOCK(sc);
   3127 	wm_release_manageability(sc);
   3128 	wm_release_hw_control(sc);
   3129 	wm_enable_wakeup(sc);
   3130 	WM_CORE_UNLOCK(sc);
   3131 
   3132 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3133 
   3134 	ether_ifdetach(ifp);
   3135 	if_detach(ifp);
   3136 	if_percpuq_destroy(sc->sc_ipq);
   3137 
   3138 	/* Delete all remaining media. */
   3139 	ifmedia_fini(&sc->sc_mii.mii_media);
   3140 
   3141 	/* Unload RX dmamaps and free mbufs */
   3142 	for (i = 0; i < sc->sc_nqueues; i++) {
   3143 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3144 		mutex_enter(rxq->rxq_lock);
   3145 		wm_rxdrain(rxq);
   3146 		mutex_exit(rxq->rxq_lock);
   3147 	}
   3148 	/* Must unlock here */
   3149 
   3150 	/* Disestablish the interrupt handler */
   3151 	for (i = 0; i < sc->sc_nintrs; i++) {
   3152 		if (sc->sc_ihs[i] != NULL) {
   3153 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3154 			sc->sc_ihs[i] = NULL;
   3155 		}
   3156 	}
   3157 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3158 
   3159 	/* wm_stop() ensure workqueue is stopped. */
   3160 	workqueue_destroy(sc->sc_queue_wq);
   3161 
   3162 	for (i = 0; i < sc->sc_nqueues; i++)
   3163 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3164 
   3165 	wm_free_txrx_queues(sc);
   3166 
   3167 	/* Unmap the registers */
   3168 	if (sc->sc_ss) {
   3169 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3170 		sc->sc_ss = 0;
   3171 	}
   3172 	if (sc->sc_ios) {
   3173 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3174 		sc->sc_ios = 0;
   3175 	}
   3176 	if (sc->sc_flashs) {
   3177 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3178 		sc->sc_flashs = 0;
   3179 	}
   3180 
   3181 	if (sc->sc_core_lock)
   3182 		mutex_obj_free(sc->sc_core_lock);
   3183 	if (sc->sc_ich_phymtx)
   3184 		mutex_obj_free(sc->sc_ich_phymtx);
   3185 	if (sc->sc_ich_nvmmtx)
   3186 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3187 
   3188 	return 0;
   3189 }
   3190 
   3191 static bool
   3192 wm_suspend(device_t self, const pmf_qual_t *qual)
   3193 {
   3194 	struct wm_softc *sc = device_private(self);
   3195 
   3196 	wm_release_manageability(sc);
   3197 	wm_release_hw_control(sc);
   3198 	wm_enable_wakeup(sc);
   3199 
   3200 	return true;
   3201 }
   3202 
   3203 static bool
   3204 wm_resume(device_t self, const pmf_qual_t *qual)
   3205 {
   3206 	struct wm_softc *sc = device_private(self);
   3207 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3208 	pcireg_t reg;
   3209 	char buf[256];
   3210 
   3211 	reg = CSR_READ(sc, WMREG_WUS);
   3212 	if (reg != 0) {
   3213 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3214 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3215 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3216 	}
   3217 
   3218 	if (sc->sc_type >= WM_T_PCH2)
   3219 		wm_resume_workarounds_pchlan(sc);
   3220 	if ((ifp->if_flags & IFF_UP) == 0) {
   3221 		wm_reset(sc);
   3222 		/* Non-AMT based hardware can now take control from firmware */
   3223 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3224 			wm_get_hw_control(sc);
   3225 		wm_init_manageability(sc);
   3226 	} else {
   3227 		/*
   3228 		 * We called pmf_class_network_register(), so if_init() is
   3229 		 * automatically called when IFF_UP. wm_reset(),
   3230 		 * wm_get_hw_control() and wm_init_manageability() are called
   3231 		 * via wm_init().
   3232 		 */
   3233 	}
   3234 
   3235 	return true;
   3236 }
   3237 
   3238 /*
   3239  * wm_watchdog:		[ifnet interface function]
   3240  *
   3241  *	Watchdog timer handler.
   3242  */
   3243 static void
   3244 wm_watchdog(struct ifnet *ifp)
   3245 {
   3246 	int qid;
   3247 	struct wm_softc *sc = ifp->if_softc;
   3248 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3249 
   3250 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3251 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3252 
   3253 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3254 	}
   3255 
   3256 	/* IF any of queues hanged up, reset the interface. */
   3257 	if (hang_queue != 0) {
   3258 		(void)wm_init(ifp);
   3259 
   3260 		/*
   3261 		 * There are still some upper layer processing which call
   3262 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3263 		 */
   3264 		/* Try to get more packets going. */
   3265 		ifp->if_start(ifp);
   3266 	}
   3267 }
   3268 
   3269 
   3270 static void
   3271 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3272 {
   3273 
   3274 	mutex_enter(txq->txq_lock);
   3275 	if (txq->txq_sending &&
   3276 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3277 		wm_watchdog_txq_locked(ifp, txq, hang);
   3278 
   3279 	mutex_exit(txq->txq_lock);
   3280 }
   3281 
   3282 static void
   3283 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3284     uint16_t *hang)
   3285 {
   3286 	struct wm_softc *sc = ifp->if_softc;
   3287 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3288 
   3289 	KASSERT(mutex_owned(txq->txq_lock));
   3290 
   3291 	/*
   3292 	 * Since we're using delayed interrupts, sweep up
   3293 	 * before we report an error.
   3294 	 */
   3295 	wm_txeof(txq, UINT_MAX);
   3296 
   3297 	if (txq->txq_sending)
   3298 		*hang |= __BIT(wmq->wmq_id);
   3299 
   3300 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3301 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3302 		    device_xname(sc->sc_dev));
   3303 	} else {
   3304 #ifdef WM_DEBUG
   3305 		int i, j;
   3306 		struct wm_txsoft *txs;
   3307 #endif
   3308 		log(LOG_ERR,
   3309 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3310 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3311 		    txq->txq_next);
   3312 		if_statinc(ifp, if_oerrors);
   3313 #ifdef WM_DEBUG
   3314 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3315 		    i = WM_NEXTTXS(txq, i)) {
   3316 			txs = &txq->txq_soft[i];
   3317 			printf("txs %d tx %d -> %d\n",
   3318 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3319 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3320 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3321 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3322 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3323 					printf("\t %#08x%08x\n",
   3324 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3325 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3326 				} else {
   3327 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3328 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3329 					    txq->txq_descs[j].wtx_addr.wa_low);
   3330 					printf("\t %#04x%02x%02x%08x\n",
   3331 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3332 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3333 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3334 					    txq->txq_descs[j].wtx_cmdlen);
   3335 				}
   3336 				if (j == txs->txs_lastdesc)
   3337 					break;
   3338 			}
   3339 		}
   3340 #endif
   3341 	}
   3342 }
   3343 
   3344 /*
   3345  * wm_tick:
   3346  *
   3347  *	One second timer, used to check link status, sweep up
   3348  *	completed transmit jobs, etc.
   3349  */
   3350 static void
   3351 wm_tick(void *arg)
   3352 {
   3353 	struct wm_softc *sc = arg;
   3354 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3355 #ifndef WM_MPSAFE
   3356 	int s = splnet();
   3357 #endif
   3358 
   3359 	WM_CORE_LOCK(sc);
   3360 
   3361 	if (sc->sc_core_stopping) {
   3362 		WM_CORE_UNLOCK(sc);
   3363 #ifndef WM_MPSAFE
   3364 		splx(s);
   3365 #endif
   3366 		return;
   3367 	}
   3368 
   3369 	if (sc->sc_type >= WM_T_82542_2_1) {
   3370 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3371 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3372 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3373 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3374 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3375 	}
   3376 
   3377 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3378 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
   3379 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
   3380 	    + CSR_READ(sc, WMREG_CRCERRS)
   3381 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3382 	    + CSR_READ(sc, WMREG_SYMERRC)
   3383 	    + CSR_READ(sc, WMREG_RXERRC)
   3384 	    + CSR_READ(sc, WMREG_SEC)
   3385 	    + CSR_READ(sc, WMREG_CEXTERR)
   3386 	    + CSR_READ(sc, WMREG_RLEC));
   3387 	/*
   3388 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3389 	 * memory. It does not mean the number of dropped packet. Because
   3390 	 * ethernet controller can receive packets in such case if there is
   3391 	 * space in phy's FIFO.
   3392 	 *
   3393 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3394 	 * own EVCNT instead of if_iqdrops.
   3395 	 */
   3396 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
   3397 	IF_STAT_PUTREF(ifp);
   3398 
   3399 	if (sc->sc_flags & WM_F_HAS_MII)
   3400 		mii_tick(&sc->sc_mii);
   3401 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3402 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3403 		wm_serdes_tick(sc);
   3404 	else
   3405 		wm_tbi_tick(sc);
   3406 
   3407 	WM_CORE_UNLOCK(sc);
   3408 
   3409 	wm_watchdog(ifp);
   3410 
   3411 	callout_schedule(&sc->sc_tick_ch, hz);
   3412 }
   3413 
   3414 static int
   3415 wm_ifflags_cb(struct ethercom *ec)
   3416 {
   3417 	struct ifnet *ifp = &ec->ec_if;
   3418 	struct wm_softc *sc = ifp->if_softc;
   3419 	u_short iffchange;
   3420 	int ecchange;
   3421 	bool needreset = false;
   3422 	int rc = 0;
   3423 
   3424 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3425 		device_xname(sc->sc_dev), __func__));
   3426 
   3427 	WM_CORE_LOCK(sc);
   3428 
   3429 	/*
   3430 	 * Check for if_flags.
   3431 	 * Main usage is to prevent linkdown when opening bpf.
   3432 	 */
   3433 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3434 	sc->sc_if_flags = ifp->if_flags;
   3435 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3436 		needreset = true;
   3437 		goto ec;
   3438 	}
   3439 
   3440 	/* iff related updates */
   3441 	if ((iffchange & IFF_PROMISC) != 0)
   3442 		wm_set_filter(sc);
   3443 
   3444 	wm_set_vlan(sc);
   3445 
   3446 ec:
   3447 	/* Check for ec_capenable. */
   3448 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3449 	sc->sc_ec_capenable = ec->ec_capenable;
   3450 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3451 		needreset = true;
   3452 		goto out;
   3453 	}
   3454 
   3455 	/* ec related updates */
   3456 	wm_set_eee(sc);
   3457 
   3458 out:
   3459 	if (needreset)
   3460 		rc = ENETRESET;
   3461 	WM_CORE_UNLOCK(sc);
   3462 
   3463 	return rc;
   3464 }
   3465 
   3466 /*
   3467  * wm_ioctl:		[ifnet interface function]
   3468  *
   3469  *	Handle control requests from the operator.
   3470  */
   3471 static int
   3472 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3473 {
   3474 	struct wm_softc *sc = ifp->if_softc;
   3475 	struct ifreq *ifr = (struct ifreq *)data;
   3476 	struct ifaddr *ifa = (struct ifaddr *)data;
   3477 	struct sockaddr_dl *sdl;
   3478 	int s, error;
   3479 
   3480 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3481 		device_xname(sc->sc_dev), __func__));
   3482 
   3483 #ifndef WM_MPSAFE
   3484 	s = splnet();
   3485 #endif
   3486 	switch (cmd) {
   3487 	case SIOCSIFMEDIA:
   3488 		WM_CORE_LOCK(sc);
   3489 		/* Flow control requires full-duplex mode. */
   3490 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3491 		    (ifr->ifr_media & IFM_FDX) == 0)
   3492 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3493 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3494 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3495 				/* We can do both TXPAUSE and RXPAUSE. */
   3496 				ifr->ifr_media |=
   3497 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3498 			}
   3499 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3500 		}
   3501 		WM_CORE_UNLOCK(sc);
   3502 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3503 		break;
   3504 	case SIOCINITIFADDR:
   3505 		WM_CORE_LOCK(sc);
   3506 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3507 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3508 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3509 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3510 			/* Unicast address is the first multicast entry */
   3511 			wm_set_filter(sc);
   3512 			error = 0;
   3513 			WM_CORE_UNLOCK(sc);
   3514 			break;
   3515 		}
   3516 		WM_CORE_UNLOCK(sc);
   3517 		/*FALLTHROUGH*/
   3518 	default:
   3519 #ifdef WM_MPSAFE
   3520 		s = splnet();
   3521 #endif
   3522 		/* It may call wm_start, so unlock here */
   3523 		error = ether_ioctl(ifp, cmd, data);
   3524 #ifdef WM_MPSAFE
   3525 		splx(s);
   3526 #endif
   3527 		if (error != ENETRESET)
   3528 			break;
   3529 
   3530 		error = 0;
   3531 
   3532 		if (cmd == SIOCSIFCAP)
   3533 			error = (*ifp->if_init)(ifp);
   3534 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3535 			;
   3536 		else if (ifp->if_flags & IFF_RUNNING) {
   3537 			/*
   3538 			 * Multicast list has changed; set the hardware filter
   3539 			 * accordingly.
   3540 			 */
   3541 			WM_CORE_LOCK(sc);
   3542 			wm_set_filter(sc);
   3543 			WM_CORE_UNLOCK(sc);
   3544 		}
   3545 		break;
   3546 	}
   3547 
   3548 #ifndef WM_MPSAFE
   3549 	splx(s);
   3550 #endif
   3551 	return error;
   3552 }
   3553 
   3554 /* MAC address related */
   3555 
   3556 /*
   3557  * Get the offset of MAC address and return it.
   3558  * If error occured, use offset 0.
   3559  */
   3560 static uint16_t
   3561 wm_check_alt_mac_addr(struct wm_softc *sc)
   3562 {
   3563 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3564 	uint16_t offset = NVM_OFF_MACADDR;
   3565 
   3566 	/* Try to read alternative MAC address pointer */
   3567 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3568 		return 0;
   3569 
   3570 	/* Check pointer if it's valid or not. */
   3571 	if ((offset == 0x0000) || (offset == 0xffff))
   3572 		return 0;
   3573 
   3574 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3575 	/*
   3576 	 * Check whether alternative MAC address is valid or not.
   3577 	 * Some cards have non 0xffff pointer but those don't use
   3578 	 * alternative MAC address in reality.
   3579 	 *
   3580 	 * Check whether the broadcast bit is set or not.
   3581 	 */
   3582 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3583 		if (((myea[0] & 0xff) & 0x01) == 0)
   3584 			return offset; /* Found */
   3585 
   3586 	/* Not found */
   3587 	return 0;
   3588 }
   3589 
   3590 static int
   3591 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3592 {
   3593 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3594 	uint16_t offset = NVM_OFF_MACADDR;
   3595 	int do_invert = 0;
   3596 
   3597 	switch (sc->sc_type) {
   3598 	case WM_T_82580:
   3599 	case WM_T_I350:
   3600 	case WM_T_I354:
   3601 		/* EEPROM Top Level Partitioning */
   3602 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3603 		break;
   3604 	case WM_T_82571:
   3605 	case WM_T_82575:
   3606 	case WM_T_82576:
   3607 	case WM_T_80003:
   3608 	case WM_T_I210:
   3609 	case WM_T_I211:
   3610 		offset = wm_check_alt_mac_addr(sc);
   3611 		if (offset == 0)
   3612 			if ((sc->sc_funcid & 0x01) == 1)
   3613 				do_invert = 1;
   3614 		break;
   3615 	default:
   3616 		if ((sc->sc_funcid & 0x01) == 1)
   3617 			do_invert = 1;
   3618 		break;
   3619 	}
   3620 
   3621 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3622 		goto bad;
   3623 
   3624 	enaddr[0] = myea[0] & 0xff;
   3625 	enaddr[1] = myea[0] >> 8;
   3626 	enaddr[2] = myea[1] & 0xff;
   3627 	enaddr[3] = myea[1] >> 8;
   3628 	enaddr[4] = myea[2] & 0xff;
   3629 	enaddr[5] = myea[2] >> 8;
   3630 
   3631 	/*
   3632 	 * Toggle the LSB of the MAC address on the second port
   3633 	 * of some dual port cards.
   3634 	 */
   3635 	if (do_invert != 0)
   3636 		enaddr[5] ^= 1;
   3637 
   3638 	return 0;
   3639 
   3640  bad:
   3641 	return -1;
   3642 }
   3643 
   3644 /*
   3645  * wm_set_ral:
   3646  *
   3647  *	Set an entery in the receive address list.
   3648  */
   3649 static void
   3650 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3651 {
   3652 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3653 	uint32_t wlock_mac;
   3654 	int rv;
   3655 
   3656 	if (enaddr != NULL) {
   3657 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3658 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3659 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3660 		ral_hi |= RAL_AV;
   3661 	} else {
   3662 		ral_lo = 0;
   3663 		ral_hi = 0;
   3664 	}
   3665 
   3666 	switch (sc->sc_type) {
   3667 	case WM_T_82542_2_0:
   3668 	case WM_T_82542_2_1:
   3669 	case WM_T_82543:
   3670 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3671 		CSR_WRITE_FLUSH(sc);
   3672 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3673 		CSR_WRITE_FLUSH(sc);
   3674 		break;
   3675 	case WM_T_PCH2:
   3676 	case WM_T_PCH_LPT:
   3677 	case WM_T_PCH_SPT:
   3678 	case WM_T_PCH_CNP:
   3679 		if (idx == 0) {
   3680 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3681 			CSR_WRITE_FLUSH(sc);
   3682 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3683 			CSR_WRITE_FLUSH(sc);
   3684 			return;
   3685 		}
   3686 		if (sc->sc_type != WM_T_PCH2) {
   3687 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3688 			    FWSM_WLOCK_MAC);
   3689 			addrl = WMREG_SHRAL(idx - 1);
   3690 			addrh = WMREG_SHRAH(idx - 1);
   3691 		} else {
   3692 			wlock_mac = 0;
   3693 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3694 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3695 		}
   3696 
   3697 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3698 			rv = wm_get_swflag_ich8lan(sc);
   3699 			if (rv != 0)
   3700 				return;
   3701 			CSR_WRITE(sc, addrl, ral_lo);
   3702 			CSR_WRITE_FLUSH(sc);
   3703 			CSR_WRITE(sc, addrh, ral_hi);
   3704 			CSR_WRITE_FLUSH(sc);
   3705 			wm_put_swflag_ich8lan(sc);
   3706 		}
   3707 
   3708 		break;
   3709 	default:
   3710 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3711 		CSR_WRITE_FLUSH(sc);
   3712 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3713 		CSR_WRITE_FLUSH(sc);
   3714 		break;
   3715 	}
   3716 }
   3717 
   3718 /*
   3719  * wm_mchash:
   3720  *
   3721  *	Compute the hash of the multicast address for the 4096-bit
   3722  *	multicast filter.
   3723  */
   3724 static uint32_t
   3725 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3726 {
   3727 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3728 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3729 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3730 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3731 	uint32_t hash;
   3732 
   3733 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3734 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3735 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3736 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3737 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3738 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3739 		return (hash & 0x3ff);
   3740 	}
   3741 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3742 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3743 
   3744 	return (hash & 0xfff);
   3745 }
   3746 
   3747 /*
   3748  *
   3749  *
   3750  */
   3751 static int
   3752 wm_rar_count(struct wm_softc *sc)
   3753 {
   3754 	int size;
   3755 
   3756 	switch (sc->sc_type) {
   3757 	case WM_T_ICH8:
   3758 		size = WM_RAL_TABSIZE_ICH8 -1;
   3759 		break;
   3760 	case WM_T_ICH9:
   3761 	case WM_T_ICH10:
   3762 	case WM_T_PCH:
   3763 		size = WM_RAL_TABSIZE_ICH8;
   3764 		break;
   3765 	case WM_T_PCH2:
   3766 		size = WM_RAL_TABSIZE_PCH2;
   3767 		break;
   3768 	case WM_T_PCH_LPT:
   3769 	case WM_T_PCH_SPT:
   3770 	case WM_T_PCH_CNP:
   3771 		size = WM_RAL_TABSIZE_PCH_LPT;
   3772 		break;
   3773 	case WM_T_82575:
   3774 	case WM_T_I210:
   3775 	case WM_T_I211:
   3776 		size = WM_RAL_TABSIZE_82575;
   3777 		break;
   3778 	case WM_T_82576:
   3779 	case WM_T_82580:
   3780 		size = WM_RAL_TABSIZE_82576;
   3781 		break;
   3782 	case WM_T_I350:
   3783 	case WM_T_I354:
   3784 		size = WM_RAL_TABSIZE_I350;
   3785 		break;
   3786 	default:
   3787 		size = WM_RAL_TABSIZE;
   3788 	}
   3789 
   3790 	return size;
   3791 }
   3792 
   3793 /*
   3794  * wm_set_filter:
   3795  *
   3796  *	Set up the receive filter.
   3797  */
   3798 static void
   3799 wm_set_filter(struct wm_softc *sc)
   3800 {
   3801 	struct ethercom *ec = &sc->sc_ethercom;
   3802 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3803 	struct ether_multi *enm;
   3804 	struct ether_multistep step;
   3805 	bus_addr_t mta_reg;
   3806 	uint32_t hash, reg, bit;
   3807 	int i, size, ralmax, rv;
   3808 
   3809 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3810 		device_xname(sc->sc_dev), __func__));
   3811 
   3812 	if (sc->sc_type >= WM_T_82544)
   3813 		mta_reg = WMREG_CORDOVA_MTA;
   3814 	else
   3815 		mta_reg = WMREG_MTA;
   3816 
   3817 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3818 
   3819 	if (ifp->if_flags & IFF_BROADCAST)
   3820 		sc->sc_rctl |= RCTL_BAM;
   3821 	if (ifp->if_flags & IFF_PROMISC) {
   3822 		sc->sc_rctl |= RCTL_UPE;
   3823 		ETHER_LOCK(ec);
   3824 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3825 		ETHER_UNLOCK(ec);
   3826 		goto allmulti;
   3827 	}
   3828 
   3829 	/*
   3830 	 * Set the station address in the first RAL slot, and
   3831 	 * clear the remaining slots.
   3832 	 */
   3833 	size = wm_rar_count(sc);
   3834 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3835 
   3836 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3837 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3838 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3839 		switch (i) {
   3840 		case 0:
   3841 			/* We can use all entries */
   3842 			ralmax = size;
   3843 			break;
   3844 		case 1:
   3845 			/* Only RAR[0] */
   3846 			ralmax = 1;
   3847 			break;
   3848 		default:
   3849 			/* Available SHRA + RAR[0] */
   3850 			ralmax = i + 1;
   3851 		}
   3852 	} else
   3853 		ralmax = size;
   3854 	for (i = 1; i < size; i++) {
   3855 		if (i < ralmax)
   3856 			wm_set_ral(sc, NULL, i);
   3857 	}
   3858 
   3859 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3860 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3861 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3862 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3863 		size = WM_ICH8_MC_TABSIZE;
   3864 	else
   3865 		size = WM_MC_TABSIZE;
   3866 	/* Clear out the multicast table. */
   3867 	for (i = 0; i < size; i++) {
   3868 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3869 		CSR_WRITE_FLUSH(sc);
   3870 	}
   3871 
   3872 	ETHER_LOCK(ec);
   3873 	ETHER_FIRST_MULTI(step, ec, enm);
   3874 	while (enm != NULL) {
   3875 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3876 			ec->ec_flags |= ETHER_F_ALLMULTI;
   3877 			ETHER_UNLOCK(ec);
   3878 			/*
   3879 			 * We must listen to a range of multicast addresses.
   3880 			 * For now, just accept all multicasts, rather than
   3881 			 * trying to set only those filter bits needed to match
   3882 			 * the range.  (At this time, the only use of address
   3883 			 * ranges is for IP multicast routing, for which the
   3884 			 * range is big enough to require all bits set.)
   3885 			 */
   3886 			goto allmulti;
   3887 		}
   3888 
   3889 		hash = wm_mchash(sc, enm->enm_addrlo);
   3890 
   3891 		reg = (hash >> 5);
   3892 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3893 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3894 		    || (sc->sc_type == WM_T_PCH2)
   3895 		    || (sc->sc_type == WM_T_PCH_LPT)
   3896 		    || (sc->sc_type == WM_T_PCH_SPT)
   3897 		    || (sc->sc_type == WM_T_PCH_CNP))
   3898 			reg &= 0x1f;
   3899 		else
   3900 			reg &= 0x7f;
   3901 		bit = hash & 0x1f;
   3902 
   3903 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3904 		hash |= 1U << bit;
   3905 
   3906 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3907 			/*
   3908 			 * 82544 Errata 9: Certain register cannot be written
   3909 			 * with particular alignments in PCI-X bus operation
   3910 			 * (FCAH, MTA and VFTA).
   3911 			 */
   3912 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3913 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3914 			CSR_WRITE_FLUSH(sc);
   3915 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3916 			CSR_WRITE_FLUSH(sc);
   3917 		} else {
   3918 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3919 			CSR_WRITE_FLUSH(sc);
   3920 		}
   3921 
   3922 		ETHER_NEXT_MULTI(step, enm);
   3923 	}
   3924 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   3925 	ETHER_UNLOCK(ec);
   3926 
   3927 	goto setit;
   3928 
   3929  allmulti:
   3930 	sc->sc_rctl |= RCTL_MPE;
   3931 
   3932  setit:
   3933 	if (sc->sc_type >= WM_T_PCH2) {
   3934 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   3935 		    && (ifp->if_mtu > ETHERMTU))
   3936 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   3937 		else
   3938 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   3939 		if (rv != 0)
   3940 			device_printf(sc->sc_dev,
   3941 			    "Failed to do workaround for jumbo frame.\n");
   3942 	}
   3943 
   3944 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3945 }
   3946 
   3947 /* Reset and init related */
   3948 
   3949 static void
   3950 wm_set_vlan(struct wm_softc *sc)
   3951 {
   3952 
   3953 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3954 		device_xname(sc->sc_dev), __func__));
   3955 
   3956 	/* Deal with VLAN enables. */
   3957 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3958 		sc->sc_ctrl |= CTRL_VME;
   3959 	else
   3960 		sc->sc_ctrl &= ~CTRL_VME;
   3961 
   3962 	/* Write the control registers. */
   3963 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3964 }
   3965 
   3966 static void
   3967 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3968 {
   3969 	uint32_t gcr;
   3970 	pcireg_t ctrl2;
   3971 
   3972 	gcr = CSR_READ(sc, WMREG_GCR);
   3973 
   3974 	/* Only take action if timeout value is defaulted to 0 */
   3975 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3976 		goto out;
   3977 
   3978 	if ((gcr & GCR_CAP_VER2) == 0) {
   3979 		gcr |= GCR_CMPL_TMOUT_10MS;
   3980 		goto out;
   3981 	}
   3982 
   3983 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3984 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3985 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3986 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3987 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3988 
   3989 out:
   3990 	/* Disable completion timeout resend */
   3991 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3992 
   3993 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3994 }
   3995 
   3996 void
   3997 wm_get_auto_rd_done(struct wm_softc *sc)
   3998 {
   3999 	int i;
   4000 
   4001 	/* wait for eeprom to reload */
   4002 	switch (sc->sc_type) {
   4003 	case WM_T_82571:
   4004 	case WM_T_82572:
   4005 	case WM_T_82573:
   4006 	case WM_T_82574:
   4007 	case WM_T_82583:
   4008 	case WM_T_82575:
   4009 	case WM_T_82576:
   4010 	case WM_T_82580:
   4011 	case WM_T_I350:
   4012 	case WM_T_I354:
   4013 	case WM_T_I210:
   4014 	case WM_T_I211:
   4015 	case WM_T_80003:
   4016 	case WM_T_ICH8:
   4017 	case WM_T_ICH9:
   4018 		for (i = 0; i < 10; i++) {
   4019 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4020 				break;
   4021 			delay(1000);
   4022 		}
   4023 		if (i == 10) {
   4024 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4025 			    "complete\n", device_xname(sc->sc_dev));
   4026 		}
   4027 		break;
   4028 	default:
   4029 		break;
   4030 	}
   4031 }
   4032 
   4033 void
   4034 wm_lan_init_done(struct wm_softc *sc)
   4035 {
   4036 	uint32_t reg = 0;
   4037 	int i;
   4038 
   4039 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4040 		device_xname(sc->sc_dev), __func__));
   4041 
   4042 	/* Wait for eeprom to reload */
   4043 	switch (sc->sc_type) {
   4044 	case WM_T_ICH10:
   4045 	case WM_T_PCH:
   4046 	case WM_T_PCH2:
   4047 	case WM_T_PCH_LPT:
   4048 	case WM_T_PCH_SPT:
   4049 	case WM_T_PCH_CNP:
   4050 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4051 			reg = CSR_READ(sc, WMREG_STATUS);
   4052 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4053 				break;
   4054 			delay(100);
   4055 		}
   4056 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4057 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4058 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4059 		}
   4060 		break;
   4061 	default:
   4062 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4063 		    __func__);
   4064 		break;
   4065 	}
   4066 
   4067 	reg &= ~STATUS_LAN_INIT_DONE;
   4068 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4069 }
   4070 
   4071 void
   4072 wm_get_cfg_done(struct wm_softc *sc)
   4073 {
   4074 	int mask;
   4075 	uint32_t reg;
   4076 	int i;
   4077 
   4078 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4079 		device_xname(sc->sc_dev), __func__));
   4080 
   4081 	/* Wait for eeprom to reload */
   4082 	switch (sc->sc_type) {
   4083 	case WM_T_82542_2_0:
   4084 	case WM_T_82542_2_1:
   4085 		/* null */
   4086 		break;
   4087 	case WM_T_82543:
   4088 	case WM_T_82544:
   4089 	case WM_T_82540:
   4090 	case WM_T_82545:
   4091 	case WM_T_82545_3:
   4092 	case WM_T_82546:
   4093 	case WM_T_82546_3:
   4094 	case WM_T_82541:
   4095 	case WM_T_82541_2:
   4096 	case WM_T_82547:
   4097 	case WM_T_82547_2:
   4098 	case WM_T_82573:
   4099 	case WM_T_82574:
   4100 	case WM_T_82583:
   4101 		/* generic */
   4102 		delay(10*1000);
   4103 		break;
   4104 	case WM_T_80003:
   4105 	case WM_T_82571:
   4106 	case WM_T_82572:
   4107 	case WM_T_82575:
   4108 	case WM_T_82576:
   4109 	case WM_T_82580:
   4110 	case WM_T_I350:
   4111 	case WM_T_I354:
   4112 	case WM_T_I210:
   4113 	case WM_T_I211:
   4114 		if (sc->sc_type == WM_T_82571) {
   4115 			/* Only 82571 shares port 0 */
   4116 			mask = EEMNGCTL_CFGDONE_0;
   4117 		} else
   4118 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4119 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4120 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4121 				break;
   4122 			delay(1000);
   4123 		}
   4124 		if (i >= WM_PHY_CFG_TIMEOUT)
   4125 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   4126 				device_xname(sc->sc_dev), __func__));
   4127 		break;
   4128 	case WM_T_ICH8:
   4129 	case WM_T_ICH9:
   4130 	case WM_T_ICH10:
   4131 	case WM_T_PCH:
   4132 	case WM_T_PCH2:
   4133 	case WM_T_PCH_LPT:
   4134 	case WM_T_PCH_SPT:
   4135 	case WM_T_PCH_CNP:
   4136 		delay(10*1000);
   4137 		if (sc->sc_type >= WM_T_ICH10)
   4138 			wm_lan_init_done(sc);
   4139 		else
   4140 			wm_get_auto_rd_done(sc);
   4141 
   4142 		/* Clear PHY Reset Asserted bit */
   4143 		reg = CSR_READ(sc, WMREG_STATUS);
   4144 		if ((reg & STATUS_PHYRA) != 0)
   4145 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4146 		break;
   4147 	default:
   4148 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4149 		    __func__);
   4150 		break;
   4151 	}
   4152 }
   4153 
   4154 int
   4155 wm_phy_post_reset(struct wm_softc *sc)
   4156 {
   4157 	device_t dev = sc->sc_dev;
   4158 	uint16_t reg;
   4159 	int rv = 0;
   4160 
   4161 	/* This function is only for ICH8 and newer. */
   4162 	if (sc->sc_type < WM_T_ICH8)
   4163 		return 0;
   4164 
   4165 	if (wm_phy_resetisblocked(sc)) {
   4166 		/* XXX */
   4167 		device_printf(dev, "PHY is blocked\n");
   4168 		return -1;
   4169 	}
   4170 
   4171 	/* Allow time for h/w to get to quiescent state after reset */
   4172 	delay(10*1000);
   4173 
   4174 	/* Perform any necessary post-reset workarounds */
   4175 	if (sc->sc_type == WM_T_PCH)
   4176 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4177 	else if (sc->sc_type == WM_T_PCH2)
   4178 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4179 	if (rv != 0)
   4180 		return rv;
   4181 
   4182 	/* Clear the host wakeup bit after lcd reset */
   4183 	if (sc->sc_type >= WM_T_PCH) {
   4184 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4185 		reg &= ~BM_WUC_HOST_WU_BIT;
   4186 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4187 	}
   4188 
   4189 	/* Configure the LCD with the extended configuration region in NVM */
   4190 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4191 		return rv;
   4192 
   4193 	/* Configure the LCD with the OEM bits in NVM */
   4194 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4195 
   4196 	if (sc->sc_type == WM_T_PCH2) {
   4197 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4198 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4199 			delay(10 * 1000);
   4200 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4201 		}
   4202 		/* Set EEE LPI Update Timer to 200usec */
   4203 		rv = sc->phy.acquire(sc);
   4204 		if (rv)
   4205 			return rv;
   4206 		rv = wm_write_emi_reg_locked(dev,
   4207 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4208 		sc->phy.release(sc);
   4209 	}
   4210 
   4211 	return rv;
   4212 }
   4213 
   4214 /* Only for PCH and newer */
   4215 static int
   4216 wm_write_smbus_addr(struct wm_softc *sc)
   4217 {
   4218 	uint32_t strap, freq;
   4219 	uint16_t phy_data;
   4220 	int rv;
   4221 
   4222 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4223 		device_xname(sc->sc_dev), __func__));
   4224 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4225 
   4226 	strap = CSR_READ(sc, WMREG_STRAP);
   4227 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4228 
   4229 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4230 	if (rv != 0)
   4231 		return -1;
   4232 
   4233 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4234 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4235 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4236 
   4237 	if (sc->sc_phytype == WMPHY_I217) {
   4238 		/* Restore SMBus frequency */
   4239 		if (freq --) {
   4240 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4241 			    | HV_SMB_ADDR_FREQ_HIGH);
   4242 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4243 			    HV_SMB_ADDR_FREQ_LOW);
   4244 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4245 			    HV_SMB_ADDR_FREQ_HIGH);
   4246 		} else
   4247 			DPRINTF(WM_DEBUG_INIT,
   4248 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4249 				device_xname(sc->sc_dev), __func__));
   4250 	}
   4251 
   4252 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4253 	    phy_data);
   4254 }
   4255 
   4256 static int
   4257 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4258 {
   4259 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4260 	uint16_t phy_page = 0;
   4261 	int rv = 0;
   4262 
   4263 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4264 		device_xname(sc->sc_dev), __func__));
   4265 
   4266 	switch (sc->sc_type) {
   4267 	case WM_T_ICH8:
   4268 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4269 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4270 			return 0;
   4271 
   4272 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4273 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4274 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4275 			break;
   4276 		}
   4277 		/* FALLTHROUGH */
   4278 	case WM_T_PCH:
   4279 	case WM_T_PCH2:
   4280 	case WM_T_PCH_LPT:
   4281 	case WM_T_PCH_SPT:
   4282 	case WM_T_PCH_CNP:
   4283 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4284 		break;
   4285 	default:
   4286 		return 0;
   4287 	}
   4288 
   4289 	if ((rv = sc->phy.acquire(sc)) != 0)
   4290 		return rv;
   4291 
   4292 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4293 	if ((reg & sw_cfg_mask) == 0)
   4294 		goto release;
   4295 
   4296 	/*
   4297 	 * Make sure HW does not configure LCD from PHY extended configuration
   4298 	 * before SW configuration
   4299 	 */
   4300 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4301 	if ((sc->sc_type < WM_T_PCH2)
   4302 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4303 		goto release;
   4304 
   4305 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4306 		device_xname(sc->sc_dev), __func__));
   4307 	/* word_addr is in DWORD */
   4308 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4309 
   4310 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4311 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4312 	if (cnf_size == 0)
   4313 		goto release;
   4314 
   4315 	if (((sc->sc_type == WM_T_PCH)
   4316 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4317 	    || (sc->sc_type > WM_T_PCH)) {
   4318 		/*
   4319 		 * HW configures the SMBus address and LEDs when the OEM and
   4320 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4321 		 * are cleared, SW will configure them instead.
   4322 		 */
   4323 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4324 			device_xname(sc->sc_dev), __func__));
   4325 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4326 			goto release;
   4327 
   4328 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4329 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4330 		    (uint16_t)reg);
   4331 		if (rv != 0)
   4332 			goto release;
   4333 	}
   4334 
   4335 	/* Configure LCD from extended configuration region. */
   4336 	for (i = 0; i < cnf_size; i++) {
   4337 		uint16_t reg_data, reg_addr;
   4338 
   4339 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4340 			goto release;
   4341 
   4342 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4343 			goto release;
   4344 
   4345 		if (reg_addr == IGPHY_PAGE_SELECT)
   4346 			phy_page = reg_data;
   4347 
   4348 		reg_addr &= IGPHY_MAXREGADDR;
   4349 		reg_addr |= phy_page;
   4350 
   4351 		KASSERT(sc->phy.writereg_locked != NULL);
   4352 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4353 		    reg_data);
   4354 	}
   4355 
   4356 release:
   4357 	sc->phy.release(sc);
   4358 	return rv;
   4359 }
   4360 
   4361 /*
   4362  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4363  *  @sc:       pointer to the HW structure
   4364  *  @d0_state: boolean if entering d0 or d3 device state
   4365  *
   4366  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4367  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4368  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4369  */
   4370 int
   4371 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4372 {
   4373 	uint32_t mac_reg;
   4374 	uint16_t oem_reg;
   4375 	int rv;
   4376 
   4377 	if (sc->sc_type < WM_T_PCH)
   4378 		return 0;
   4379 
   4380 	rv = sc->phy.acquire(sc);
   4381 	if (rv != 0)
   4382 		return rv;
   4383 
   4384 	if (sc->sc_type == WM_T_PCH) {
   4385 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4386 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4387 			goto release;
   4388 	}
   4389 
   4390 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4391 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4392 		goto release;
   4393 
   4394 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4395 
   4396 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4397 	if (rv != 0)
   4398 		goto release;
   4399 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4400 
   4401 	if (d0_state) {
   4402 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4403 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4404 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4405 			oem_reg |= HV_OEM_BITS_LPLU;
   4406 	} else {
   4407 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4408 		    != 0)
   4409 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4410 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4411 		    != 0)
   4412 			oem_reg |= HV_OEM_BITS_LPLU;
   4413 	}
   4414 
   4415 	/* Set Restart auto-neg to activate the bits */
   4416 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4417 	    && (wm_phy_resetisblocked(sc) == false))
   4418 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4419 
   4420 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4421 
   4422 release:
   4423 	sc->phy.release(sc);
   4424 
   4425 	return rv;
   4426 }
   4427 
   4428 /* Init hardware bits */
   4429 void
   4430 wm_initialize_hardware_bits(struct wm_softc *sc)
   4431 {
   4432 	uint32_t tarc0, tarc1, reg;
   4433 
   4434 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4435 		device_xname(sc->sc_dev), __func__));
   4436 
   4437 	/* For 82571 variant, 80003 and ICHs */
   4438 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4439 	    || (sc->sc_type >= WM_T_80003)) {
   4440 
   4441 		/* Transmit Descriptor Control 0 */
   4442 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4443 		reg |= TXDCTL_COUNT_DESC;
   4444 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4445 
   4446 		/* Transmit Descriptor Control 1 */
   4447 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4448 		reg |= TXDCTL_COUNT_DESC;
   4449 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4450 
   4451 		/* TARC0 */
   4452 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4453 		switch (sc->sc_type) {
   4454 		case WM_T_82571:
   4455 		case WM_T_82572:
   4456 		case WM_T_82573:
   4457 		case WM_T_82574:
   4458 		case WM_T_82583:
   4459 		case WM_T_80003:
   4460 			/* Clear bits 30..27 */
   4461 			tarc0 &= ~__BITS(30, 27);
   4462 			break;
   4463 		default:
   4464 			break;
   4465 		}
   4466 
   4467 		switch (sc->sc_type) {
   4468 		case WM_T_82571:
   4469 		case WM_T_82572:
   4470 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4471 
   4472 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4473 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4474 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4475 			/* 8257[12] Errata No.7 */
   4476 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4477 
   4478 			/* TARC1 bit 28 */
   4479 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4480 				tarc1 &= ~__BIT(28);
   4481 			else
   4482 				tarc1 |= __BIT(28);
   4483 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4484 
   4485 			/*
   4486 			 * 8257[12] Errata No.13
   4487 			 * Disable Dyamic Clock Gating.
   4488 			 */
   4489 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4490 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4491 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4492 			break;
   4493 		case WM_T_82573:
   4494 		case WM_T_82574:
   4495 		case WM_T_82583:
   4496 			if ((sc->sc_type == WM_T_82574)
   4497 			    || (sc->sc_type == WM_T_82583))
   4498 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4499 
   4500 			/* Extended Device Control */
   4501 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4502 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4503 			reg |= __BIT(22);	/* Set bit 22 */
   4504 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4505 
   4506 			/* Device Control */
   4507 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4508 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4509 
   4510 			/* PCIe Control Register */
   4511 			/*
   4512 			 * 82573 Errata (unknown).
   4513 			 *
   4514 			 * 82574 Errata 25 and 82583 Errata 12
   4515 			 * "Dropped Rx Packets":
   4516 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4517 			 */
   4518 			reg = CSR_READ(sc, WMREG_GCR);
   4519 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4520 			CSR_WRITE(sc, WMREG_GCR, reg);
   4521 
   4522 			if ((sc->sc_type == WM_T_82574)
   4523 			    || (sc->sc_type == WM_T_82583)) {
   4524 				/*
   4525 				 * Document says this bit must be set for
   4526 				 * proper operation.
   4527 				 */
   4528 				reg = CSR_READ(sc, WMREG_GCR);
   4529 				reg |= __BIT(22);
   4530 				CSR_WRITE(sc, WMREG_GCR, reg);
   4531 
   4532 				/*
   4533 				 * Apply workaround for hardware errata
   4534 				 * documented in errata docs Fixes issue where
   4535 				 * some error prone or unreliable PCIe
   4536 				 * completions are occurring, particularly
   4537 				 * with ASPM enabled. Without fix, issue can
   4538 				 * cause Tx timeouts.
   4539 				 */
   4540 				reg = CSR_READ(sc, WMREG_GCR2);
   4541 				reg |= __BIT(0);
   4542 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4543 			}
   4544 			break;
   4545 		case WM_T_80003:
   4546 			/* TARC0 */
   4547 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4548 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4549 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4550 
   4551 			/* TARC1 bit 28 */
   4552 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4553 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4554 				tarc1 &= ~__BIT(28);
   4555 			else
   4556 				tarc1 |= __BIT(28);
   4557 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4558 			break;
   4559 		case WM_T_ICH8:
   4560 		case WM_T_ICH9:
   4561 		case WM_T_ICH10:
   4562 		case WM_T_PCH:
   4563 		case WM_T_PCH2:
   4564 		case WM_T_PCH_LPT:
   4565 		case WM_T_PCH_SPT:
   4566 		case WM_T_PCH_CNP:
   4567 			/* TARC0 */
   4568 			if (sc->sc_type == WM_T_ICH8) {
   4569 				/* Set TARC0 bits 29 and 28 */
   4570 				tarc0 |= __BITS(29, 28);
   4571 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4572 				tarc0 |= __BIT(29);
   4573 				/*
   4574 				 *  Drop bit 28. From Linux.
   4575 				 * See I218/I219 spec update
   4576 				 * "5. Buffer Overrun While the I219 is
   4577 				 * Processing DMA Transactions"
   4578 				 */
   4579 				tarc0 &= ~__BIT(28);
   4580 			}
   4581 			/* Set TARC0 bits 23,24,26,27 */
   4582 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4583 
   4584 			/* CTRL_EXT */
   4585 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4586 			reg |= __BIT(22);	/* Set bit 22 */
   4587 			/*
   4588 			 * Enable PHY low-power state when MAC is at D3
   4589 			 * w/o WoL
   4590 			 */
   4591 			if (sc->sc_type >= WM_T_PCH)
   4592 				reg |= CTRL_EXT_PHYPDEN;
   4593 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4594 
   4595 			/* TARC1 */
   4596 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4597 			/* bit 28 */
   4598 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4599 				tarc1 &= ~__BIT(28);
   4600 			else
   4601 				tarc1 |= __BIT(28);
   4602 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4603 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4604 
   4605 			/* Device Status */
   4606 			if (sc->sc_type == WM_T_ICH8) {
   4607 				reg = CSR_READ(sc, WMREG_STATUS);
   4608 				reg &= ~__BIT(31);
   4609 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4610 
   4611 			}
   4612 
   4613 			/* IOSFPC */
   4614 			if (sc->sc_type == WM_T_PCH_SPT) {
   4615 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4616 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4617 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4618 			}
   4619 			/*
   4620 			 * Work-around descriptor data corruption issue during
   4621 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4622 			 * capability.
   4623 			 */
   4624 			reg = CSR_READ(sc, WMREG_RFCTL);
   4625 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4626 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4627 			break;
   4628 		default:
   4629 			break;
   4630 		}
   4631 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4632 
   4633 		switch (sc->sc_type) {
   4634 		/*
   4635 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4636 		 * Avoid RSS Hash Value bug.
   4637 		 */
   4638 		case WM_T_82571:
   4639 		case WM_T_82572:
   4640 		case WM_T_82573:
   4641 		case WM_T_80003:
   4642 		case WM_T_ICH8:
   4643 			reg = CSR_READ(sc, WMREG_RFCTL);
   4644 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4645 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4646 			break;
   4647 		case WM_T_82574:
   4648 			/* Use extened Rx descriptor. */
   4649 			reg = CSR_READ(sc, WMREG_RFCTL);
   4650 			reg |= WMREG_RFCTL_EXSTEN;
   4651 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4652 			break;
   4653 		default:
   4654 			break;
   4655 		}
   4656 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4657 		/*
   4658 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4659 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4660 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4661 		 * Correctly by the Device"
   4662 		 *
   4663 		 * I354(C2000) Errata AVR53:
   4664 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4665 		 * Hang"
   4666 		 */
   4667 		reg = CSR_READ(sc, WMREG_RFCTL);
   4668 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4669 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4670 	}
   4671 }
   4672 
   4673 static uint32_t
   4674 wm_rxpbs_adjust_82580(uint32_t val)
   4675 {
   4676 	uint32_t rv = 0;
   4677 
   4678 	if (val < __arraycount(wm_82580_rxpbs_table))
   4679 		rv = wm_82580_rxpbs_table[val];
   4680 
   4681 	return rv;
   4682 }
   4683 
   4684 /*
   4685  * wm_reset_phy:
   4686  *
   4687  *	generic PHY reset function.
   4688  *	Same as e1000_phy_hw_reset_generic()
   4689  */
   4690 static int
   4691 wm_reset_phy(struct wm_softc *sc)
   4692 {
   4693 	uint32_t reg;
   4694 
   4695 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4696 		device_xname(sc->sc_dev), __func__));
   4697 	if (wm_phy_resetisblocked(sc))
   4698 		return -1;
   4699 
   4700 	sc->phy.acquire(sc);
   4701 
   4702 	reg = CSR_READ(sc, WMREG_CTRL);
   4703 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4704 	CSR_WRITE_FLUSH(sc);
   4705 
   4706 	delay(sc->phy.reset_delay_us);
   4707 
   4708 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4709 	CSR_WRITE_FLUSH(sc);
   4710 
   4711 	delay(150);
   4712 
   4713 	sc->phy.release(sc);
   4714 
   4715 	wm_get_cfg_done(sc);
   4716 	wm_phy_post_reset(sc);
   4717 
   4718 	return 0;
   4719 }
   4720 
   4721 /*
   4722  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4723  * so it is enough to check sc->sc_queue[0] only.
   4724  */
   4725 static void
   4726 wm_flush_desc_rings(struct wm_softc *sc)
   4727 {
   4728 	pcireg_t preg;
   4729 	uint32_t reg;
   4730 	struct wm_txqueue *txq;
   4731 	wiseman_txdesc_t *txd;
   4732 	int nexttx;
   4733 	uint32_t rctl;
   4734 
   4735 	/* First, disable MULR fix in FEXTNVM11 */
   4736 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4737 	reg |= FEXTNVM11_DIS_MULRFIX;
   4738 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4739 
   4740 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4741 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4742 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4743 		return;
   4744 
   4745 	/* TX */
   4746 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
   4747 	    preg, reg);
   4748 	reg = CSR_READ(sc, WMREG_TCTL);
   4749 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4750 
   4751 	txq = &sc->sc_queue[0].wmq_txq;
   4752 	nexttx = txq->txq_next;
   4753 	txd = &txq->txq_descs[nexttx];
   4754 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4755 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4756 	txd->wtx_fields.wtxu_status = 0;
   4757 	txd->wtx_fields.wtxu_options = 0;
   4758 	txd->wtx_fields.wtxu_vlan = 0;
   4759 
   4760 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4761 	    BUS_SPACE_BARRIER_WRITE);
   4762 
   4763 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4764 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4765 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4766 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4767 	delay(250);
   4768 
   4769 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4770 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4771 		return;
   4772 
   4773 	/* RX */
   4774 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4775 	rctl = CSR_READ(sc, WMREG_RCTL);
   4776 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4777 	CSR_WRITE_FLUSH(sc);
   4778 	delay(150);
   4779 
   4780 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4781 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4782 	reg &= 0xffffc000;
   4783 	/*
   4784 	 * Update thresholds: prefetch threshold to 31, host threshold
   4785 	 * to 1 and make sure the granularity is "descriptors" and not
   4786 	 * "cache lines"
   4787 	 */
   4788 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4789 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4790 
   4791 	/* Momentarily enable the RX ring for the changes to take effect */
   4792 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4793 	CSR_WRITE_FLUSH(sc);
   4794 	delay(150);
   4795 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4796 }
   4797 
   4798 /*
   4799  * wm_reset:
   4800  *
   4801  *	Reset the i82542 chip.
   4802  */
   4803 static void
   4804 wm_reset(struct wm_softc *sc)
   4805 {
   4806 	int phy_reset = 0;
   4807 	int i, error = 0;
   4808 	uint32_t reg;
   4809 	uint16_t kmreg;
   4810 	int rv;
   4811 
   4812 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4813 		device_xname(sc->sc_dev), __func__));
   4814 	KASSERT(sc->sc_type != 0);
   4815 
   4816 	/*
   4817 	 * Allocate on-chip memory according to the MTU size.
   4818 	 * The Packet Buffer Allocation register must be written
   4819 	 * before the chip is reset.
   4820 	 */
   4821 	switch (sc->sc_type) {
   4822 	case WM_T_82547:
   4823 	case WM_T_82547_2:
   4824 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4825 		    PBA_22K : PBA_30K;
   4826 		for (i = 0; i < sc->sc_nqueues; i++) {
   4827 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4828 			txq->txq_fifo_head = 0;
   4829 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4830 			txq->txq_fifo_size =
   4831 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4832 			txq->txq_fifo_stall = 0;
   4833 		}
   4834 		break;
   4835 	case WM_T_82571:
   4836 	case WM_T_82572:
   4837 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4838 	case WM_T_80003:
   4839 		sc->sc_pba = PBA_32K;
   4840 		break;
   4841 	case WM_T_82573:
   4842 		sc->sc_pba = PBA_12K;
   4843 		break;
   4844 	case WM_T_82574:
   4845 	case WM_T_82583:
   4846 		sc->sc_pba = PBA_20K;
   4847 		break;
   4848 	case WM_T_82576:
   4849 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4850 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4851 		break;
   4852 	case WM_T_82580:
   4853 	case WM_T_I350:
   4854 	case WM_T_I354:
   4855 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4856 		break;
   4857 	case WM_T_I210:
   4858 	case WM_T_I211:
   4859 		sc->sc_pba = PBA_34K;
   4860 		break;
   4861 	case WM_T_ICH8:
   4862 		/* Workaround for a bit corruption issue in FIFO memory */
   4863 		sc->sc_pba = PBA_8K;
   4864 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4865 		break;
   4866 	case WM_T_ICH9:
   4867 	case WM_T_ICH10:
   4868 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4869 		    PBA_14K : PBA_10K;
   4870 		break;
   4871 	case WM_T_PCH:
   4872 	case WM_T_PCH2:	/* XXX 14K? */
   4873 	case WM_T_PCH_LPT:
   4874 	case WM_T_PCH_SPT:
   4875 	case WM_T_PCH_CNP:
   4876 		sc->sc_pba = PBA_26K;
   4877 		break;
   4878 	default:
   4879 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4880 		    PBA_40K : PBA_48K;
   4881 		break;
   4882 	}
   4883 	/*
   4884 	 * Only old or non-multiqueue devices have the PBA register
   4885 	 * XXX Need special handling for 82575.
   4886 	 */
   4887 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4888 	    || (sc->sc_type == WM_T_82575))
   4889 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4890 
   4891 	/* Prevent the PCI-E bus from sticking */
   4892 	if (sc->sc_flags & WM_F_PCIE) {
   4893 		int timeout = 800;
   4894 
   4895 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4896 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4897 
   4898 		while (timeout--) {
   4899 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4900 			    == 0)
   4901 				break;
   4902 			delay(100);
   4903 		}
   4904 		if (timeout == 0)
   4905 			device_printf(sc->sc_dev,
   4906 			    "failed to disable busmastering\n");
   4907 	}
   4908 
   4909 	/* Set the completion timeout for interface */
   4910 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4911 	    || (sc->sc_type == WM_T_82580)
   4912 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4913 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4914 		wm_set_pcie_completion_timeout(sc);
   4915 
   4916 	/* Clear interrupt */
   4917 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4918 	if (wm_is_using_msix(sc)) {
   4919 		if (sc->sc_type != WM_T_82574) {
   4920 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4921 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4922 		} else
   4923 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4924 	}
   4925 
   4926 	/* Stop the transmit and receive processes. */
   4927 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4928 	sc->sc_rctl &= ~RCTL_EN;
   4929 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4930 	CSR_WRITE_FLUSH(sc);
   4931 
   4932 	/* XXX set_tbi_sbp_82543() */
   4933 
   4934 	delay(10*1000);
   4935 
   4936 	/* Must acquire the MDIO ownership before MAC reset */
   4937 	switch (sc->sc_type) {
   4938 	case WM_T_82573:
   4939 	case WM_T_82574:
   4940 	case WM_T_82583:
   4941 		error = wm_get_hw_semaphore_82573(sc);
   4942 		break;
   4943 	default:
   4944 		break;
   4945 	}
   4946 
   4947 	/*
   4948 	 * 82541 Errata 29? & 82547 Errata 28?
   4949 	 * See also the description about PHY_RST bit in CTRL register
   4950 	 * in 8254x_GBe_SDM.pdf.
   4951 	 */
   4952 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4953 		CSR_WRITE(sc, WMREG_CTRL,
   4954 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4955 		CSR_WRITE_FLUSH(sc);
   4956 		delay(5000);
   4957 	}
   4958 
   4959 	switch (sc->sc_type) {
   4960 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4961 	case WM_T_82541:
   4962 	case WM_T_82541_2:
   4963 	case WM_T_82547:
   4964 	case WM_T_82547_2:
   4965 		/*
   4966 		 * On some chipsets, a reset through a memory-mapped write
   4967 		 * cycle can cause the chip to reset before completing the
   4968 		 * write cycle. This causes major headache that can be avoided
   4969 		 * by issuing the reset via indirect register writes through
   4970 		 * I/O space.
   4971 		 *
   4972 		 * So, if we successfully mapped the I/O BAR at attach time,
   4973 		 * use that. Otherwise, try our luck with a memory-mapped
   4974 		 * reset.
   4975 		 */
   4976 		if (sc->sc_flags & WM_F_IOH_VALID)
   4977 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4978 		else
   4979 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4980 		break;
   4981 	case WM_T_82545_3:
   4982 	case WM_T_82546_3:
   4983 		/* Use the shadow control register on these chips. */
   4984 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4985 		break;
   4986 	case WM_T_80003:
   4987 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4988 		sc->phy.acquire(sc);
   4989 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4990 		sc->phy.release(sc);
   4991 		break;
   4992 	case WM_T_ICH8:
   4993 	case WM_T_ICH9:
   4994 	case WM_T_ICH10:
   4995 	case WM_T_PCH:
   4996 	case WM_T_PCH2:
   4997 	case WM_T_PCH_LPT:
   4998 	case WM_T_PCH_SPT:
   4999 	case WM_T_PCH_CNP:
   5000 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5001 		if (wm_phy_resetisblocked(sc) == false) {
   5002 			/*
   5003 			 * Gate automatic PHY configuration by hardware on
   5004 			 * non-managed 82579
   5005 			 */
   5006 			if ((sc->sc_type == WM_T_PCH2)
   5007 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5008 				== 0))
   5009 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5010 
   5011 			reg |= CTRL_PHY_RESET;
   5012 			phy_reset = 1;
   5013 		} else
   5014 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5015 		sc->phy.acquire(sc);
   5016 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5017 		/* Don't insert a completion barrier when reset */
   5018 		delay(20*1000);
   5019 		mutex_exit(sc->sc_ich_phymtx);
   5020 		break;
   5021 	case WM_T_82580:
   5022 	case WM_T_I350:
   5023 	case WM_T_I354:
   5024 	case WM_T_I210:
   5025 	case WM_T_I211:
   5026 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5027 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5028 			CSR_WRITE_FLUSH(sc);
   5029 		delay(5000);
   5030 		break;
   5031 	case WM_T_82542_2_0:
   5032 	case WM_T_82542_2_1:
   5033 	case WM_T_82543:
   5034 	case WM_T_82540:
   5035 	case WM_T_82545:
   5036 	case WM_T_82546:
   5037 	case WM_T_82571:
   5038 	case WM_T_82572:
   5039 	case WM_T_82573:
   5040 	case WM_T_82574:
   5041 	case WM_T_82575:
   5042 	case WM_T_82576:
   5043 	case WM_T_82583:
   5044 	default:
   5045 		/* Everything else can safely use the documented method. */
   5046 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5047 		break;
   5048 	}
   5049 
   5050 	/* Must release the MDIO ownership after MAC reset */
   5051 	switch (sc->sc_type) {
   5052 	case WM_T_82573:
   5053 	case WM_T_82574:
   5054 	case WM_T_82583:
   5055 		if (error == 0)
   5056 			wm_put_hw_semaphore_82573(sc);
   5057 		break;
   5058 	default:
   5059 		break;
   5060 	}
   5061 
   5062 	/* Set Phy Config Counter to 50msec */
   5063 	if (sc->sc_type == WM_T_PCH2) {
   5064 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5065 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5066 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5067 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5068 	}
   5069 
   5070 	if (phy_reset != 0)
   5071 		wm_get_cfg_done(sc);
   5072 
   5073 	/* Reload EEPROM */
   5074 	switch (sc->sc_type) {
   5075 	case WM_T_82542_2_0:
   5076 	case WM_T_82542_2_1:
   5077 	case WM_T_82543:
   5078 	case WM_T_82544:
   5079 		delay(10);
   5080 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5081 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5082 		CSR_WRITE_FLUSH(sc);
   5083 		delay(2000);
   5084 		break;
   5085 	case WM_T_82540:
   5086 	case WM_T_82545:
   5087 	case WM_T_82545_3:
   5088 	case WM_T_82546:
   5089 	case WM_T_82546_3:
   5090 		delay(5*1000);
   5091 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5092 		break;
   5093 	case WM_T_82541:
   5094 	case WM_T_82541_2:
   5095 	case WM_T_82547:
   5096 	case WM_T_82547_2:
   5097 		delay(20000);
   5098 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5099 		break;
   5100 	case WM_T_82571:
   5101 	case WM_T_82572:
   5102 	case WM_T_82573:
   5103 	case WM_T_82574:
   5104 	case WM_T_82583:
   5105 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5106 			delay(10);
   5107 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5108 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5109 			CSR_WRITE_FLUSH(sc);
   5110 		}
   5111 		/* check EECD_EE_AUTORD */
   5112 		wm_get_auto_rd_done(sc);
   5113 		/*
   5114 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5115 		 * is set.
   5116 		 */
   5117 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5118 		    || (sc->sc_type == WM_T_82583))
   5119 			delay(25*1000);
   5120 		break;
   5121 	case WM_T_82575:
   5122 	case WM_T_82576:
   5123 	case WM_T_82580:
   5124 	case WM_T_I350:
   5125 	case WM_T_I354:
   5126 	case WM_T_I210:
   5127 	case WM_T_I211:
   5128 	case WM_T_80003:
   5129 		/* check EECD_EE_AUTORD */
   5130 		wm_get_auto_rd_done(sc);
   5131 		break;
   5132 	case WM_T_ICH8:
   5133 	case WM_T_ICH9:
   5134 	case WM_T_ICH10:
   5135 	case WM_T_PCH:
   5136 	case WM_T_PCH2:
   5137 	case WM_T_PCH_LPT:
   5138 	case WM_T_PCH_SPT:
   5139 	case WM_T_PCH_CNP:
   5140 		break;
   5141 	default:
   5142 		panic("%s: unknown type\n", __func__);
   5143 	}
   5144 
   5145 	/* Check whether EEPROM is present or not */
   5146 	switch (sc->sc_type) {
   5147 	case WM_T_82575:
   5148 	case WM_T_82576:
   5149 	case WM_T_82580:
   5150 	case WM_T_I350:
   5151 	case WM_T_I354:
   5152 	case WM_T_ICH8:
   5153 	case WM_T_ICH9:
   5154 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5155 			/* Not found */
   5156 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5157 			if (sc->sc_type == WM_T_82575)
   5158 				wm_reset_init_script_82575(sc);
   5159 		}
   5160 		break;
   5161 	default:
   5162 		break;
   5163 	}
   5164 
   5165 	if (phy_reset != 0)
   5166 		wm_phy_post_reset(sc);
   5167 
   5168 	if ((sc->sc_type == WM_T_82580)
   5169 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5170 		/* Clear global device reset status bit */
   5171 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5172 	}
   5173 
   5174 	/* Clear any pending interrupt events. */
   5175 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5176 	reg = CSR_READ(sc, WMREG_ICR);
   5177 	if (wm_is_using_msix(sc)) {
   5178 		if (sc->sc_type != WM_T_82574) {
   5179 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5180 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5181 		} else
   5182 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5183 	}
   5184 
   5185 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5186 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5187 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5188 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5189 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5190 		reg |= KABGTXD_BGSQLBIAS;
   5191 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5192 	}
   5193 
   5194 	/* Reload sc_ctrl */
   5195 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5196 
   5197 	wm_set_eee(sc);
   5198 
   5199 	/*
   5200 	 * For PCH, this write will make sure that any noise will be detected
   5201 	 * as a CRC error and be dropped rather than show up as a bad packet
   5202 	 * to the DMA engine
   5203 	 */
   5204 	if (sc->sc_type == WM_T_PCH)
   5205 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5206 
   5207 	if (sc->sc_type >= WM_T_82544)
   5208 		CSR_WRITE(sc, WMREG_WUC, 0);
   5209 
   5210 	if (sc->sc_type < WM_T_82575)
   5211 		wm_disable_aspm(sc); /* Workaround for some chips */
   5212 
   5213 	wm_reset_mdicnfg_82580(sc);
   5214 
   5215 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5216 		wm_pll_workaround_i210(sc);
   5217 
   5218 	if (sc->sc_type == WM_T_80003) {
   5219 		/* Default to TRUE to enable the MDIC W/A */
   5220 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5221 
   5222 		rv = wm_kmrn_readreg(sc,
   5223 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5224 		if (rv == 0) {
   5225 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5226 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5227 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5228 			else
   5229 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5230 		}
   5231 	}
   5232 }
   5233 
   5234 /*
   5235  * wm_add_rxbuf:
   5236  *
   5237  *	Add a receive buffer to the indiciated descriptor.
   5238  */
   5239 static int
   5240 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5241 {
   5242 	struct wm_softc *sc = rxq->rxq_sc;
   5243 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5244 	struct mbuf *m;
   5245 	int error;
   5246 
   5247 	KASSERT(mutex_owned(rxq->rxq_lock));
   5248 
   5249 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5250 	if (m == NULL)
   5251 		return ENOBUFS;
   5252 
   5253 	MCLGET(m, M_DONTWAIT);
   5254 	if ((m->m_flags & M_EXT) == 0) {
   5255 		m_freem(m);
   5256 		return ENOBUFS;
   5257 	}
   5258 
   5259 	if (rxs->rxs_mbuf != NULL)
   5260 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5261 
   5262 	rxs->rxs_mbuf = m;
   5263 
   5264 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5265 	/*
   5266 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5267 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5268 	 */
   5269 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5270 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5271 	if (error) {
   5272 		/* XXX XXX XXX */
   5273 		aprint_error_dev(sc->sc_dev,
   5274 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5275 		panic("wm_add_rxbuf");
   5276 	}
   5277 
   5278 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5279 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5280 
   5281 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5282 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5283 			wm_init_rxdesc(rxq, idx);
   5284 	} else
   5285 		wm_init_rxdesc(rxq, idx);
   5286 
   5287 	return 0;
   5288 }
   5289 
   5290 /*
   5291  * wm_rxdrain:
   5292  *
   5293  *	Drain the receive queue.
   5294  */
   5295 static void
   5296 wm_rxdrain(struct wm_rxqueue *rxq)
   5297 {
   5298 	struct wm_softc *sc = rxq->rxq_sc;
   5299 	struct wm_rxsoft *rxs;
   5300 	int i;
   5301 
   5302 	KASSERT(mutex_owned(rxq->rxq_lock));
   5303 
   5304 	for (i = 0; i < WM_NRXDESC; i++) {
   5305 		rxs = &rxq->rxq_soft[i];
   5306 		if (rxs->rxs_mbuf != NULL) {
   5307 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5308 			m_freem(rxs->rxs_mbuf);
   5309 			rxs->rxs_mbuf = NULL;
   5310 		}
   5311 	}
   5312 }
   5313 
   5314 /*
   5315  * Setup registers for RSS.
   5316  *
   5317  * XXX not yet VMDq support
   5318  */
   5319 static void
   5320 wm_init_rss(struct wm_softc *sc)
   5321 {
   5322 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5323 	int i;
   5324 
   5325 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5326 
   5327 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5328 		unsigned int qid, reta_ent;
   5329 
   5330 		qid  = i % sc->sc_nqueues;
   5331 		switch (sc->sc_type) {
   5332 		case WM_T_82574:
   5333 			reta_ent = __SHIFTIN(qid,
   5334 			    RETA_ENT_QINDEX_MASK_82574);
   5335 			break;
   5336 		case WM_T_82575:
   5337 			reta_ent = __SHIFTIN(qid,
   5338 			    RETA_ENT_QINDEX1_MASK_82575);
   5339 			break;
   5340 		default:
   5341 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5342 			break;
   5343 		}
   5344 
   5345 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5346 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5347 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5348 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5349 	}
   5350 
   5351 	rss_getkey((uint8_t *)rss_key);
   5352 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5353 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5354 
   5355 	if (sc->sc_type == WM_T_82574)
   5356 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5357 	else
   5358 		mrqc = MRQC_ENABLE_RSS_MQ;
   5359 
   5360 	/*
   5361 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5362 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5363 	 */
   5364 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5365 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5366 #if 0
   5367 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5368 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5369 #endif
   5370 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5371 
   5372 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5373 }
   5374 
   5375 /*
   5376  * Adjust TX and RX queue numbers which the system actulally uses.
   5377  *
   5378  * The numbers are affected by below parameters.
   5379  *     - The nubmer of hardware queues
   5380  *     - The number of MSI-X vectors (= "nvectors" argument)
   5381  *     - ncpu
   5382  */
   5383 static void
   5384 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5385 {
   5386 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5387 
   5388 	if (nvectors < 2) {
   5389 		sc->sc_nqueues = 1;
   5390 		return;
   5391 	}
   5392 
   5393 	switch (sc->sc_type) {
   5394 	case WM_T_82572:
   5395 		hw_ntxqueues = 2;
   5396 		hw_nrxqueues = 2;
   5397 		break;
   5398 	case WM_T_82574:
   5399 		hw_ntxqueues = 2;
   5400 		hw_nrxqueues = 2;
   5401 		break;
   5402 	case WM_T_82575:
   5403 		hw_ntxqueues = 4;
   5404 		hw_nrxqueues = 4;
   5405 		break;
   5406 	case WM_T_82576:
   5407 		hw_ntxqueues = 16;
   5408 		hw_nrxqueues = 16;
   5409 		break;
   5410 	case WM_T_82580:
   5411 	case WM_T_I350:
   5412 	case WM_T_I354:
   5413 		hw_ntxqueues = 8;
   5414 		hw_nrxqueues = 8;
   5415 		break;
   5416 	case WM_T_I210:
   5417 		hw_ntxqueues = 4;
   5418 		hw_nrxqueues = 4;
   5419 		break;
   5420 	case WM_T_I211:
   5421 		hw_ntxqueues = 2;
   5422 		hw_nrxqueues = 2;
   5423 		break;
   5424 		/*
   5425 		 * As below ethernet controllers does not support MSI-X,
   5426 		 * this driver let them not use multiqueue.
   5427 		 *     - WM_T_80003
   5428 		 *     - WM_T_ICH8
   5429 		 *     - WM_T_ICH9
   5430 		 *     - WM_T_ICH10
   5431 		 *     - WM_T_PCH
   5432 		 *     - WM_T_PCH2
   5433 		 *     - WM_T_PCH_LPT
   5434 		 */
   5435 	default:
   5436 		hw_ntxqueues = 1;
   5437 		hw_nrxqueues = 1;
   5438 		break;
   5439 	}
   5440 
   5441 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5442 
   5443 	/*
   5444 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5445 	 * the number of queues used actually.
   5446 	 */
   5447 	if (nvectors < hw_nqueues + 1)
   5448 		sc->sc_nqueues = nvectors - 1;
   5449 	else
   5450 		sc->sc_nqueues = hw_nqueues;
   5451 
   5452 	/*
   5453 	 * As queues more then cpus cannot improve scaling, we limit
   5454 	 * the number of queues used actually.
   5455 	 */
   5456 	if (ncpu < sc->sc_nqueues)
   5457 		sc->sc_nqueues = ncpu;
   5458 }
   5459 
   5460 static inline bool
   5461 wm_is_using_msix(struct wm_softc *sc)
   5462 {
   5463 
   5464 	return (sc->sc_nintrs > 1);
   5465 }
   5466 
   5467 static inline bool
   5468 wm_is_using_multiqueue(struct wm_softc *sc)
   5469 {
   5470 
   5471 	return (sc->sc_nqueues > 1);
   5472 }
   5473 
   5474 static int
   5475 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   5476 {
   5477 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5478 
   5479 	wmq->wmq_id = qidx;
   5480 	wmq->wmq_intr_idx = intr_idx;
   5481 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
   5482 	    wm_handle_queue, wmq);
   5483 	if (wmq->wmq_si != NULL)
   5484 		return 0;
   5485 
   5486 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5487 	    wmq->wmq_id);
   5488 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5489 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5490 	return ENOMEM;
   5491 }
   5492 
   5493 /*
   5494  * Both single interrupt MSI and INTx can use this function.
   5495  */
   5496 static int
   5497 wm_setup_legacy(struct wm_softc *sc)
   5498 {
   5499 	pci_chipset_tag_t pc = sc->sc_pc;
   5500 	const char *intrstr = NULL;
   5501 	char intrbuf[PCI_INTRSTR_LEN];
   5502 	int error;
   5503 
   5504 	error = wm_alloc_txrx_queues(sc);
   5505 	if (error) {
   5506 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5507 		    error);
   5508 		return ENOMEM;
   5509 	}
   5510 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5511 	    sizeof(intrbuf));
   5512 #ifdef WM_MPSAFE
   5513 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5514 #endif
   5515 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5516 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5517 	if (sc->sc_ihs[0] == NULL) {
   5518 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5519 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5520 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5521 		return ENOMEM;
   5522 	}
   5523 
   5524 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5525 	sc->sc_nintrs = 1;
   5526 
   5527 	return wm_softint_establish_queue(sc, 0, 0);
   5528 }
   5529 
   5530 static int
   5531 wm_setup_msix(struct wm_softc *sc)
   5532 {
   5533 	void *vih;
   5534 	kcpuset_t *affinity;
   5535 	int qidx, error, intr_idx, txrx_established;
   5536 	pci_chipset_tag_t pc = sc->sc_pc;
   5537 	const char *intrstr = NULL;
   5538 	char intrbuf[PCI_INTRSTR_LEN];
   5539 	char intr_xname[INTRDEVNAMEBUF];
   5540 
   5541 	if (sc->sc_nqueues < ncpu) {
   5542 		/*
   5543 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5544 		 * interrupts start from CPU#1.
   5545 		 */
   5546 		sc->sc_affinity_offset = 1;
   5547 	} else {
   5548 		/*
   5549 		 * In this case, this device use all CPUs. So, we unify
   5550 		 * affinitied cpu_index to msix vector number for readability.
   5551 		 */
   5552 		sc->sc_affinity_offset = 0;
   5553 	}
   5554 
   5555 	error = wm_alloc_txrx_queues(sc);
   5556 	if (error) {
   5557 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5558 		    error);
   5559 		return ENOMEM;
   5560 	}
   5561 
   5562 	kcpuset_create(&affinity, false);
   5563 	intr_idx = 0;
   5564 
   5565 	/*
   5566 	 * TX and RX
   5567 	 */
   5568 	txrx_established = 0;
   5569 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5570 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5571 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5572 
   5573 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5574 		    sizeof(intrbuf));
   5575 #ifdef WM_MPSAFE
   5576 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5577 		    PCI_INTR_MPSAFE, true);
   5578 #endif
   5579 		memset(intr_xname, 0, sizeof(intr_xname));
   5580 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5581 		    device_xname(sc->sc_dev), qidx);
   5582 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5583 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5584 		if (vih == NULL) {
   5585 			aprint_error_dev(sc->sc_dev,
   5586 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5587 			    intrstr ? " at " : "",
   5588 			    intrstr ? intrstr : "");
   5589 
   5590 			goto fail;
   5591 		}
   5592 		kcpuset_zero(affinity);
   5593 		/* Round-robin affinity */
   5594 		kcpuset_set(affinity, affinity_to);
   5595 		error = interrupt_distribute(vih, affinity, NULL);
   5596 		if (error == 0) {
   5597 			aprint_normal_dev(sc->sc_dev,
   5598 			    "for TX and RX interrupting at %s affinity to %u\n",
   5599 			    intrstr, affinity_to);
   5600 		} else {
   5601 			aprint_normal_dev(sc->sc_dev,
   5602 			    "for TX and RX interrupting at %s\n", intrstr);
   5603 		}
   5604 		sc->sc_ihs[intr_idx] = vih;
   5605 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   5606 			goto fail;
   5607 		txrx_established++;
   5608 		intr_idx++;
   5609 	}
   5610 
   5611 	/* LINK */
   5612 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5613 	    sizeof(intrbuf));
   5614 #ifdef WM_MPSAFE
   5615 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5616 #endif
   5617 	memset(intr_xname, 0, sizeof(intr_xname));
   5618 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5619 	    device_xname(sc->sc_dev));
   5620 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5621 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5622 	if (vih == NULL) {
   5623 		aprint_error_dev(sc->sc_dev,
   5624 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5625 		    intrstr ? " at " : "",
   5626 		    intrstr ? intrstr : "");
   5627 
   5628 		goto fail;
   5629 	}
   5630 	/* Keep default affinity to LINK interrupt */
   5631 	aprint_normal_dev(sc->sc_dev,
   5632 	    "for LINK interrupting at %s\n", intrstr);
   5633 	sc->sc_ihs[intr_idx] = vih;
   5634 	sc->sc_link_intr_idx = intr_idx;
   5635 
   5636 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5637 	kcpuset_destroy(affinity);
   5638 	return 0;
   5639 
   5640  fail:
   5641 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5642 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5643 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5644 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5645 	}
   5646 
   5647 	kcpuset_destroy(affinity);
   5648 	return ENOMEM;
   5649 }
   5650 
   5651 static void
   5652 wm_unset_stopping_flags(struct wm_softc *sc)
   5653 {
   5654 	int i;
   5655 
   5656 	KASSERT(WM_CORE_LOCKED(sc));
   5657 
   5658 	/* Must unset stopping flags in ascending order. */
   5659 	for (i = 0; i < sc->sc_nqueues; i++) {
   5660 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5661 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5662 
   5663 		mutex_enter(txq->txq_lock);
   5664 		txq->txq_stopping = false;
   5665 		mutex_exit(txq->txq_lock);
   5666 
   5667 		mutex_enter(rxq->rxq_lock);
   5668 		rxq->rxq_stopping = false;
   5669 		mutex_exit(rxq->rxq_lock);
   5670 	}
   5671 
   5672 	sc->sc_core_stopping = false;
   5673 }
   5674 
   5675 static void
   5676 wm_set_stopping_flags(struct wm_softc *sc)
   5677 {
   5678 	int i;
   5679 
   5680 	KASSERT(WM_CORE_LOCKED(sc));
   5681 
   5682 	sc->sc_core_stopping = true;
   5683 
   5684 	/* Must set stopping flags in ascending order. */
   5685 	for (i = 0; i < sc->sc_nqueues; i++) {
   5686 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5687 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5688 
   5689 		mutex_enter(rxq->rxq_lock);
   5690 		rxq->rxq_stopping = true;
   5691 		mutex_exit(rxq->rxq_lock);
   5692 
   5693 		mutex_enter(txq->txq_lock);
   5694 		txq->txq_stopping = true;
   5695 		mutex_exit(txq->txq_lock);
   5696 	}
   5697 }
   5698 
   5699 /*
   5700  * Write interrupt interval value to ITR or EITR
   5701  */
   5702 static void
   5703 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5704 {
   5705 
   5706 	if (!wmq->wmq_set_itr)
   5707 		return;
   5708 
   5709 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5710 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5711 
   5712 		/*
   5713 		 * 82575 doesn't have CNT_INGR field.
   5714 		 * So, overwrite counter field by software.
   5715 		 */
   5716 		if (sc->sc_type == WM_T_82575)
   5717 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5718 		else
   5719 			eitr |= EITR_CNT_INGR;
   5720 
   5721 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5722 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5723 		/*
   5724 		 * 82574 has both ITR and EITR. SET EITR when we use
   5725 		 * the multi queue function with MSI-X.
   5726 		 */
   5727 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5728 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5729 	} else {
   5730 		KASSERT(wmq->wmq_id == 0);
   5731 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5732 	}
   5733 
   5734 	wmq->wmq_set_itr = false;
   5735 }
   5736 
   5737 /*
   5738  * TODO
   5739  * Below dynamic calculation of itr is almost the same as linux igb,
   5740  * however it does not fit to wm(4). So, we will have been disable AIM
   5741  * until we will find appropriate calculation of itr.
   5742  */
   5743 /*
   5744  * calculate interrupt interval value to be going to write register in
   5745  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5746  */
   5747 static void
   5748 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5749 {
   5750 #ifdef NOTYET
   5751 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5752 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5753 	uint32_t avg_size = 0;
   5754 	uint32_t new_itr;
   5755 
   5756 	if (rxq->rxq_packets)
   5757 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5758 	if (txq->txq_packets)
   5759 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5760 
   5761 	if (avg_size == 0) {
   5762 		new_itr = 450; /* restore default value */
   5763 		goto out;
   5764 	}
   5765 
   5766 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5767 	avg_size += 24;
   5768 
   5769 	/* Don't starve jumbo frames */
   5770 	avg_size = uimin(avg_size, 3000);
   5771 
   5772 	/* Give a little boost to mid-size frames */
   5773 	if ((avg_size > 300) && (avg_size < 1200))
   5774 		new_itr = avg_size / 3;
   5775 	else
   5776 		new_itr = avg_size / 2;
   5777 
   5778 out:
   5779 	/*
   5780 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5781 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5782 	 */
   5783 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5784 		new_itr *= 4;
   5785 
   5786 	if (new_itr != wmq->wmq_itr) {
   5787 		wmq->wmq_itr = new_itr;
   5788 		wmq->wmq_set_itr = true;
   5789 	} else
   5790 		wmq->wmq_set_itr = false;
   5791 
   5792 	rxq->rxq_packets = 0;
   5793 	rxq->rxq_bytes = 0;
   5794 	txq->txq_packets = 0;
   5795 	txq->txq_bytes = 0;
   5796 #endif
   5797 }
   5798 
   5799 static void
   5800 wm_init_sysctls(struct wm_softc *sc)
   5801 {
   5802 	struct sysctllog **log;
   5803 	const struct sysctlnode *rnode, *cnode;
   5804 	int rv;
   5805 	const char *dvname;
   5806 
   5807 	log = &sc->sc_sysctllog;
   5808 	dvname = device_xname(sc->sc_dev);
   5809 
   5810 	rv = sysctl_createv(log, 0, NULL, &rnode,
   5811 	    0, CTLTYPE_NODE, dvname,
   5812 	    SYSCTL_DESCR("wm information and settings"),
   5813 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   5814 	if (rv != 0)
   5815 		goto err;
   5816 
   5817 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5818 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   5819 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   5820 	if (rv != 0)
   5821 		goto teardown;
   5822 
   5823 	return;
   5824 
   5825 teardown:
   5826 	sysctl_teardown(log);
   5827 err:
   5828 	sc->sc_sysctllog = NULL;
   5829 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   5830 	    __func__, rv);
   5831 }
   5832 
   5833 /*
   5834  * wm_init:		[ifnet interface function]
   5835  *
   5836  *	Initialize the interface.
   5837  */
   5838 static int
   5839 wm_init(struct ifnet *ifp)
   5840 {
   5841 	struct wm_softc *sc = ifp->if_softc;
   5842 	int ret;
   5843 
   5844 	WM_CORE_LOCK(sc);
   5845 	ret = wm_init_locked(ifp);
   5846 	WM_CORE_UNLOCK(sc);
   5847 
   5848 	return ret;
   5849 }
   5850 
   5851 static int
   5852 wm_init_locked(struct ifnet *ifp)
   5853 {
   5854 	struct wm_softc *sc = ifp->if_softc;
   5855 	struct ethercom *ec = &sc->sc_ethercom;
   5856 	int i, j, trynum, error = 0;
   5857 	uint32_t reg, sfp_mask = 0;
   5858 
   5859 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5860 		device_xname(sc->sc_dev), __func__));
   5861 	KASSERT(WM_CORE_LOCKED(sc));
   5862 
   5863 	/*
   5864 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5865 	 * There is a small but measurable benefit to avoiding the adjusment
   5866 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5867 	 * on such platforms.  One possibility is that the DMA itself is
   5868 	 * slightly more efficient if the front of the entire packet (instead
   5869 	 * of the front of the headers) is aligned.
   5870 	 *
   5871 	 * Note we must always set align_tweak to 0 if we are using
   5872 	 * jumbo frames.
   5873 	 */
   5874 #ifdef __NO_STRICT_ALIGNMENT
   5875 	sc->sc_align_tweak = 0;
   5876 #else
   5877 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5878 		sc->sc_align_tweak = 0;
   5879 	else
   5880 		sc->sc_align_tweak = 2;
   5881 #endif /* __NO_STRICT_ALIGNMENT */
   5882 
   5883 	/* Cancel any pending I/O. */
   5884 	wm_stop_locked(ifp, false, false);
   5885 
   5886 	/* Update statistics before reset */
   5887 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   5888 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   5889 
   5890 	/* PCH_SPT hardware workaround */
   5891 	if (sc->sc_type == WM_T_PCH_SPT)
   5892 		wm_flush_desc_rings(sc);
   5893 
   5894 	/* Reset the chip to a known state. */
   5895 	wm_reset(sc);
   5896 
   5897 	/*
   5898 	 * AMT based hardware can now take control from firmware
   5899 	 * Do this after reset.
   5900 	 */
   5901 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5902 		wm_get_hw_control(sc);
   5903 
   5904 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5905 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5906 		wm_legacy_irq_quirk_spt(sc);
   5907 
   5908 	/* Init hardware bits */
   5909 	wm_initialize_hardware_bits(sc);
   5910 
   5911 	/* Reset the PHY. */
   5912 	if (sc->sc_flags & WM_F_HAS_MII)
   5913 		wm_gmii_reset(sc);
   5914 
   5915 	if (sc->sc_type >= WM_T_ICH8) {
   5916 		reg = CSR_READ(sc, WMREG_GCR);
   5917 		/*
   5918 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5919 		 * default after reset.
   5920 		 */
   5921 		if (sc->sc_type == WM_T_ICH8)
   5922 			reg |= GCR_NO_SNOOP_ALL;
   5923 		else
   5924 			reg &= ~GCR_NO_SNOOP_ALL;
   5925 		CSR_WRITE(sc, WMREG_GCR, reg);
   5926 	}
   5927 
   5928 	if ((sc->sc_type >= WM_T_ICH8)
   5929 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5930 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5931 
   5932 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5933 		reg |= CTRL_EXT_RO_DIS;
   5934 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5935 	}
   5936 
   5937 	/* Calculate (E)ITR value */
   5938 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5939 		/*
   5940 		 * For NEWQUEUE's EITR (except for 82575).
   5941 		 * 82575's EITR should be set same throttling value as other
   5942 		 * old controllers' ITR because the interrupt/sec calculation
   5943 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5944 		 *
   5945 		 * 82574's EITR should be set same throttling value as ITR.
   5946 		 *
   5947 		 * For N interrupts/sec, set this value to:
   5948 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5949 		 */
   5950 		sc->sc_itr_init = 450;
   5951 	} else if (sc->sc_type >= WM_T_82543) {
   5952 		/*
   5953 		 * Set up the interrupt throttling register (units of 256ns)
   5954 		 * Note that a footnote in Intel's documentation says this
   5955 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5956 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5957 		 * that that is also true for the 1024ns units of the other
   5958 		 * interrupt-related timer registers -- so, really, we ought
   5959 		 * to divide this value by 4 when the link speed is low.
   5960 		 *
   5961 		 * XXX implement this division at link speed change!
   5962 		 */
   5963 
   5964 		/*
   5965 		 * For N interrupts/sec, set this value to:
   5966 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5967 		 * absolute and packet timer values to this value
   5968 		 * divided by 4 to get "simple timer" behavior.
   5969 		 */
   5970 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5971 	}
   5972 
   5973 	error = wm_init_txrx_queues(sc);
   5974 	if (error)
   5975 		goto out;
   5976 
   5977 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   5978 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   5979 	    (sc->sc_type >= WM_T_82575))
   5980 		wm_serdes_power_up_link_82575(sc);
   5981 
   5982 	/* Clear out the VLAN table -- we don't use it (yet). */
   5983 	CSR_WRITE(sc, WMREG_VET, 0);
   5984 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5985 		trynum = 10; /* Due to hw errata */
   5986 	else
   5987 		trynum = 1;
   5988 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5989 		for (j = 0; j < trynum; j++)
   5990 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5991 
   5992 	/*
   5993 	 * Set up flow-control parameters.
   5994 	 *
   5995 	 * XXX Values could probably stand some tuning.
   5996 	 */
   5997 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5998 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5999 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   6000 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   6001 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   6002 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   6003 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   6004 	}
   6005 
   6006 	sc->sc_fcrtl = FCRTL_DFLT;
   6007 	if (sc->sc_type < WM_T_82543) {
   6008 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   6009 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   6010 	} else {
   6011 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   6012 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6013 	}
   6014 
   6015 	if (sc->sc_type == WM_T_80003)
   6016 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6017 	else
   6018 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6019 
   6020 	/* Writes the control register. */
   6021 	wm_set_vlan(sc);
   6022 
   6023 	if (sc->sc_flags & WM_F_HAS_MII) {
   6024 		uint16_t kmreg;
   6025 
   6026 		switch (sc->sc_type) {
   6027 		case WM_T_80003:
   6028 		case WM_T_ICH8:
   6029 		case WM_T_ICH9:
   6030 		case WM_T_ICH10:
   6031 		case WM_T_PCH:
   6032 		case WM_T_PCH2:
   6033 		case WM_T_PCH_LPT:
   6034 		case WM_T_PCH_SPT:
   6035 		case WM_T_PCH_CNP:
   6036 			/*
   6037 			 * Set the mac to wait the maximum time between each
   6038 			 * iteration and increase the max iterations when
   6039 			 * polling the phy; this fixes erroneous timeouts at
   6040 			 * 10Mbps.
   6041 			 */
   6042 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6043 			    0xFFFF);
   6044 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6045 			    &kmreg);
   6046 			kmreg |= 0x3F;
   6047 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6048 			    kmreg);
   6049 			break;
   6050 		default:
   6051 			break;
   6052 		}
   6053 
   6054 		if (sc->sc_type == WM_T_80003) {
   6055 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6056 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6057 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6058 
   6059 			/* Bypass RX and TX FIFO's */
   6060 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6061 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6062 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6063 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6064 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6065 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6066 		}
   6067 	}
   6068 #if 0
   6069 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6070 #endif
   6071 
   6072 	/* Set up checksum offload parameters. */
   6073 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6074 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6075 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6076 		reg |= RXCSUM_IPOFL;
   6077 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6078 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6079 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6080 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6081 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6082 
   6083 	/* Set registers about MSI-X */
   6084 	if (wm_is_using_msix(sc)) {
   6085 		uint32_t ivar, qintr_idx;
   6086 		struct wm_queue *wmq;
   6087 		unsigned int qid;
   6088 
   6089 		if (sc->sc_type == WM_T_82575) {
   6090 			/* Interrupt control */
   6091 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6092 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6093 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6094 
   6095 			/* TX and RX */
   6096 			for (i = 0; i < sc->sc_nqueues; i++) {
   6097 				wmq = &sc->sc_queue[i];
   6098 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6099 				    EITR_TX_QUEUE(wmq->wmq_id)
   6100 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6101 			}
   6102 			/* Link status */
   6103 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6104 			    EITR_OTHER);
   6105 		} else if (sc->sc_type == WM_T_82574) {
   6106 			/* Interrupt control */
   6107 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6108 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6109 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6110 
   6111 			/*
   6112 			 * Workaround issue with spurious interrupts
   6113 			 * in MSI-X mode.
   6114 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6115 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6116 			 */
   6117 			reg = CSR_READ(sc, WMREG_RFCTL);
   6118 			reg |= WMREG_RFCTL_ACKDIS;
   6119 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6120 
   6121 			ivar = 0;
   6122 			/* TX and RX */
   6123 			for (i = 0; i < sc->sc_nqueues; i++) {
   6124 				wmq = &sc->sc_queue[i];
   6125 				qid = wmq->wmq_id;
   6126 				qintr_idx = wmq->wmq_intr_idx;
   6127 
   6128 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6129 				    IVAR_TX_MASK_Q_82574(qid));
   6130 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6131 				    IVAR_RX_MASK_Q_82574(qid));
   6132 			}
   6133 			/* Link status */
   6134 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6135 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6136 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6137 		} else {
   6138 			/* Interrupt control */
   6139 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6140 			    | GPIE_EIAME | GPIE_PBA);
   6141 
   6142 			switch (sc->sc_type) {
   6143 			case WM_T_82580:
   6144 			case WM_T_I350:
   6145 			case WM_T_I354:
   6146 			case WM_T_I210:
   6147 			case WM_T_I211:
   6148 				/* TX and RX */
   6149 				for (i = 0; i < sc->sc_nqueues; i++) {
   6150 					wmq = &sc->sc_queue[i];
   6151 					qid = wmq->wmq_id;
   6152 					qintr_idx = wmq->wmq_intr_idx;
   6153 
   6154 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6155 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6156 					ivar |= __SHIFTIN((qintr_idx
   6157 						| IVAR_VALID),
   6158 					    IVAR_TX_MASK_Q(qid));
   6159 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6160 					ivar |= __SHIFTIN((qintr_idx
   6161 						| IVAR_VALID),
   6162 					    IVAR_RX_MASK_Q(qid));
   6163 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6164 				}
   6165 				break;
   6166 			case WM_T_82576:
   6167 				/* TX and RX */
   6168 				for (i = 0; i < sc->sc_nqueues; i++) {
   6169 					wmq = &sc->sc_queue[i];
   6170 					qid = wmq->wmq_id;
   6171 					qintr_idx = wmq->wmq_intr_idx;
   6172 
   6173 					ivar = CSR_READ(sc,
   6174 					    WMREG_IVAR_Q_82576(qid));
   6175 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6176 					ivar |= __SHIFTIN((qintr_idx
   6177 						| IVAR_VALID),
   6178 					    IVAR_TX_MASK_Q_82576(qid));
   6179 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6180 					ivar |= __SHIFTIN((qintr_idx
   6181 						| IVAR_VALID),
   6182 					    IVAR_RX_MASK_Q_82576(qid));
   6183 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6184 					    ivar);
   6185 				}
   6186 				break;
   6187 			default:
   6188 				break;
   6189 			}
   6190 
   6191 			/* Link status */
   6192 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6193 			    IVAR_MISC_OTHER);
   6194 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6195 		}
   6196 
   6197 		if (wm_is_using_multiqueue(sc)) {
   6198 			wm_init_rss(sc);
   6199 
   6200 			/*
   6201 			** NOTE: Receive Full-Packet Checksum Offload
   6202 			** is mutually exclusive with Multiqueue. However
   6203 			** this is not the same as TCP/IP checksums which
   6204 			** still work.
   6205 			*/
   6206 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6207 			reg |= RXCSUM_PCSD;
   6208 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6209 		}
   6210 	}
   6211 
   6212 	/* Set up the interrupt registers. */
   6213 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6214 
   6215 	/* Enable SFP module insertion interrupt if it's required */
   6216 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6217 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6218 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6219 		sfp_mask = ICR_GPI(0);
   6220 	}
   6221 
   6222 	if (wm_is_using_msix(sc)) {
   6223 		uint32_t mask;
   6224 		struct wm_queue *wmq;
   6225 
   6226 		switch (sc->sc_type) {
   6227 		case WM_T_82574:
   6228 			mask = 0;
   6229 			for (i = 0; i < sc->sc_nqueues; i++) {
   6230 				wmq = &sc->sc_queue[i];
   6231 				mask |= ICR_TXQ(wmq->wmq_id);
   6232 				mask |= ICR_RXQ(wmq->wmq_id);
   6233 			}
   6234 			mask |= ICR_OTHER;
   6235 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6236 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6237 			break;
   6238 		default:
   6239 			if (sc->sc_type == WM_T_82575) {
   6240 				mask = 0;
   6241 				for (i = 0; i < sc->sc_nqueues; i++) {
   6242 					wmq = &sc->sc_queue[i];
   6243 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6244 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6245 				}
   6246 				mask |= EITR_OTHER;
   6247 			} else {
   6248 				mask = 0;
   6249 				for (i = 0; i < sc->sc_nqueues; i++) {
   6250 					wmq = &sc->sc_queue[i];
   6251 					mask |= 1 << wmq->wmq_intr_idx;
   6252 				}
   6253 				mask |= 1 << sc->sc_link_intr_idx;
   6254 			}
   6255 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6256 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6257 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6258 
   6259 			/* For other interrupts */
   6260 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6261 			break;
   6262 		}
   6263 	} else {
   6264 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6265 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6266 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6267 	}
   6268 
   6269 	/* Set up the inter-packet gap. */
   6270 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6271 
   6272 	if (sc->sc_type >= WM_T_82543) {
   6273 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6274 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6275 			wm_itrs_writereg(sc, wmq);
   6276 		}
   6277 		/*
   6278 		 * Link interrupts occur much less than TX
   6279 		 * interrupts and RX interrupts. So, we don't
   6280 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6281 		 * FreeBSD's if_igb.
   6282 		 */
   6283 	}
   6284 
   6285 	/* Set the VLAN ethernetype. */
   6286 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6287 
   6288 	/*
   6289 	 * Set up the transmit control register; we start out with
   6290 	 * a collision distance suitable for FDX, but update it whe
   6291 	 * we resolve the media type.
   6292 	 */
   6293 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6294 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6295 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6296 	if (sc->sc_type >= WM_T_82571)
   6297 		sc->sc_tctl |= TCTL_MULR;
   6298 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6299 
   6300 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6301 		/* Write TDT after TCTL.EN is set. See the document. */
   6302 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6303 	}
   6304 
   6305 	if (sc->sc_type == WM_T_80003) {
   6306 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6307 		reg &= ~TCTL_EXT_GCEX_MASK;
   6308 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6309 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6310 	}
   6311 
   6312 	/* Set the media. */
   6313 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6314 		goto out;
   6315 
   6316 	/* Configure for OS presence */
   6317 	wm_init_manageability(sc);
   6318 
   6319 	/*
   6320 	 * Set up the receive control register; we actually program the
   6321 	 * register when we set the receive filter. Use multicast address
   6322 	 * offset type 0.
   6323 	 *
   6324 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6325 	 * don't enable that feature.
   6326 	 */
   6327 	sc->sc_mchash_type = 0;
   6328 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6329 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6330 
   6331 	/* 82574 use one buffer extended Rx descriptor. */
   6332 	if (sc->sc_type == WM_T_82574)
   6333 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6334 
   6335 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   6336 		sc->sc_rctl |= RCTL_SECRC;
   6337 
   6338 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6339 	    && (ifp->if_mtu > ETHERMTU)) {
   6340 		sc->sc_rctl |= RCTL_LPE;
   6341 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6342 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6343 	}
   6344 
   6345 	if (MCLBYTES == 2048)
   6346 		sc->sc_rctl |= RCTL_2k;
   6347 	else {
   6348 		if (sc->sc_type >= WM_T_82543) {
   6349 			switch (MCLBYTES) {
   6350 			case 4096:
   6351 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6352 				break;
   6353 			case 8192:
   6354 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6355 				break;
   6356 			case 16384:
   6357 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6358 				break;
   6359 			default:
   6360 				panic("wm_init: MCLBYTES %d unsupported",
   6361 				    MCLBYTES);
   6362 				break;
   6363 			}
   6364 		} else
   6365 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6366 	}
   6367 
   6368 	/* Enable ECC */
   6369 	switch (sc->sc_type) {
   6370 	case WM_T_82571:
   6371 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6372 		reg |= PBA_ECC_CORR_EN;
   6373 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6374 		break;
   6375 	case WM_T_PCH_LPT:
   6376 	case WM_T_PCH_SPT:
   6377 	case WM_T_PCH_CNP:
   6378 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6379 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6380 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6381 
   6382 		sc->sc_ctrl |= CTRL_MEHE;
   6383 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6384 		break;
   6385 	default:
   6386 		break;
   6387 	}
   6388 
   6389 	/*
   6390 	 * Set the receive filter.
   6391 	 *
   6392 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6393 	 * the setting of RCTL.EN in wm_set_filter()
   6394 	 */
   6395 	wm_set_filter(sc);
   6396 
   6397 	/* On 575 and later set RDT only if RX enabled */
   6398 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6399 		int qidx;
   6400 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6401 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6402 			for (i = 0; i < WM_NRXDESC; i++) {
   6403 				mutex_enter(rxq->rxq_lock);
   6404 				wm_init_rxdesc(rxq, i);
   6405 				mutex_exit(rxq->rxq_lock);
   6406 
   6407 			}
   6408 		}
   6409 	}
   6410 
   6411 	wm_unset_stopping_flags(sc);
   6412 
   6413 	/* Start the one second link check clock. */
   6414 	callout_schedule(&sc->sc_tick_ch, hz);
   6415 
   6416 	/* ...all done! */
   6417 	ifp->if_flags |= IFF_RUNNING;
   6418 
   6419  out:
   6420 	/* Save last flags for the callback */
   6421 	sc->sc_if_flags = ifp->if_flags;
   6422 	sc->sc_ec_capenable = ec->ec_capenable;
   6423 	if (error)
   6424 		log(LOG_ERR, "%s: interface not running\n",
   6425 		    device_xname(sc->sc_dev));
   6426 	return error;
   6427 }
   6428 
   6429 /*
   6430  * wm_stop:		[ifnet interface function]
   6431  *
   6432  *	Stop transmission on the interface.
   6433  */
   6434 static void
   6435 wm_stop(struct ifnet *ifp, int disable)
   6436 {
   6437 	struct wm_softc *sc = ifp->if_softc;
   6438 
   6439 	ASSERT_SLEEPABLE();
   6440 
   6441 	WM_CORE_LOCK(sc);
   6442 	wm_stop_locked(ifp, disable ? true : false, true);
   6443 	WM_CORE_UNLOCK(sc);
   6444 
   6445 	/*
   6446 	 * After wm_set_stopping_flags(), it is guaranteed
   6447 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   6448 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   6449 	 * because it can sleep...
   6450 	 * so, call workqueue_wait() here.
   6451 	 */
   6452 	for (int i = 0; i < sc->sc_nqueues; i++)
   6453 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   6454 }
   6455 
   6456 static void
   6457 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   6458 {
   6459 	struct wm_softc *sc = ifp->if_softc;
   6460 	struct wm_txsoft *txs;
   6461 	int i, qidx;
   6462 
   6463 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6464 		device_xname(sc->sc_dev), __func__));
   6465 	KASSERT(WM_CORE_LOCKED(sc));
   6466 
   6467 	wm_set_stopping_flags(sc);
   6468 
   6469 	if (sc->sc_flags & WM_F_HAS_MII) {
   6470 		/* Down the MII. */
   6471 		mii_down(&sc->sc_mii);
   6472 	} else {
   6473 #if 0
   6474 		/* Should we clear PHY's status properly? */
   6475 		wm_reset(sc);
   6476 #endif
   6477 	}
   6478 
   6479 	/* Stop the transmit and receive processes. */
   6480 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6481 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6482 	sc->sc_rctl &= ~RCTL_EN;
   6483 
   6484 	/*
   6485 	 * Clear the interrupt mask to ensure the device cannot assert its
   6486 	 * interrupt line.
   6487 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6488 	 * service any currently pending or shared interrupt.
   6489 	 */
   6490 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6491 	sc->sc_icr = 0;
   6492 	if (wm_is_using_msix(sc)) {
   6493 		if (sc->sc_type != WM_T_82574) {
   6494 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6495 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6496 		} else
   6497 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6498 	}
   6499 
   6500 	/*
   6501 	 * Stop callouts after interrupts are disabled; if we have
   6502 	 * to wait for them, we will be releasing the CORE_LOCK
   6503 	 * briefly, which will unblock interrupts on the current CPU.
   6504 	 */
   6505 
   6506 	/* Stop the one second clock. */
   6507 	if (wait)
   6508 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   6509 	else
   6510 		callout_stop(&sc->sc_tick_ch);
   6511 
   6512 	/* Stop the 82547 Tx FIFO stall check timer. */
   6513 	if (sc->sc_type == WM_T_82547) {
   6514 		if (wait)
   6515 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   6516 		else
   6517 			callout_stop(&sc->sc_txfifo_ch);
   6518 	}
   6519 
   6520 	/* Release any queued transmit buffers. */
   6521 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6522 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6523 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6524 		mutex_enter(txq->txq_lock);
   6525 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6526 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6527 			txs = &txq->txq_soft[i];
   6528 			if (txs->txs_mbuf != NULL) {
   6529 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6530 				m_freem(txs->txs_mbuf);
   6531 				txs->txs_mbuf = NULL;
   6532 			}
   6533 		}
   6534 		mutex_exit(txq->txq_lock);
   6535 	}
   6536 
   6537 	/* Mark the interface as down and cancel the watchdog timer. */
   6538 	ifp->if_flags &= ~IFF_RUNNING;
   6539 
   6540 	if (disable) {
   6541 		for (i = 0; i < sc->sc_nqueues; i++) {
   6542 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6543 			mutex_enter(rxq->rxq_lock);
   6544 			wm_rxdrain(rxq);
   6545 			mutex_exit(rxq->rxq_lock);
   6546 		}
   6547 	}
   6548 
   6549 #if 0 /* notyet */
   6550 	if (sc->sc_type >= WM_T_82544)
   6551 		CSR_WRITE(sc, WMREG_WUC, 0);
   6552 #endif
   6553 }
   6554 
   6555 static void
   6556 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6557 {
   6558 	struct mbuf *m;
   6559 	int i;
   6560 
   6561 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6562 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6563 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6564 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6565 		    m->m_data, m->m_len, m->m_flags);
   6566 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6567 	    i, i == 1 ? "" : "s");
   6568 }
   6569 
   6570 /*
   6571  * wm_82547_txfifo_stall:
   6572  *
   6573  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6574  *	reset the FIFO pointers, and restart packet transmission.
   6575  */
   6576 static void
   6577 wm_82547_txfifo_stall(void *arg)
   6578 {
   6579 	struct wm_softc *sc = arg;
   6580 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6581 
   6582 	mutex_enter(txq->txq_lock);
   6583 
   6584 	if (txq->txq_stopping)
   6585 		goto out;
   6586 
   6587 	if (txq->txq_fifo_stall) {
   6588 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6589 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6590 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6591 			/*
   6592 			 * Packets have drained.  Stop transmitter, reset
   6593 			 * FIFO pointers, restart transmitter, and kick
   6594 			 * the packet queue.
   6595 			 */
   6596 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6597 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6598 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6599 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6600 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6601 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6602 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6603 			CSR_WRITE_FLUSH(sc);
   6604 
   6605 			txq->txq_fifo_head = 0;
   6606 			txq->txq_fifo_stall = 0;
   6607 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6608 		} else {
   6609 			/*
   6610 			 * Still waiting for packets to drain; try again in
   6611 			 * another tick.
   6612 			 */
   6613 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6614 		}
   6615 	}
   6616 
   6617 out:
   6618 	mutex_exit(txq->txq_lock);
   6619 }
   6620 
   6621 /*
   6622  * wm_82547_txfifo_bugchk:
   6623  *
   6624  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6625  *	prevent enqueueing a packet that would wrap around the end
   6626  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6627  *
   6628  *	We do this by checking the amount of space before the end
   6629  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6630  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6631  *	the internal FIFO pointers to the beginning, and restart
   6632  *	transmission on the interface.
   6633  */
   6634 #define	WM_FIFO_HDR		0x10
   6635 #define	WM_82547_PAD_LEN	0x3e0
   6636 static int
   6637 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6638 {
   6639 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6640 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6641 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6642 
   6643 	/* Just return if already stalled. */
   6644 	if (txq->txq_fifo_stall)
   6645 		return 1;
   6646 
   6647 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6648 		/* Stall only occurs in half-duplex mode. */
   6649 		goto send_packet;
   6650 	}
   6651 
   6652 	if (len >= WM_82547_PAD_LEN + space) {
   6653 		txq->txq_fifo_stall = 1;
   6654 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6655 		return 1;
   6656 	}
   6657 
   6658  send_packet:
   6659 	txq->txq_fifo_head += len;
   6660 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6661 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6662 
   6663 	return 0;
   6664 }
   6665 
   6666 static int
   6667 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6668 {
   6669 	int error;
   6670 
   6671 	/*
   6672 	 * Allocate the control data structures, and create and load the
   6673 	 * DMA map for it.
   6674 	 *
   6675 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6676 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6677 	 * both sets within the same 4G segment.
   6678 	 */
   6679 	if (sc->sc_type < WM_T_82544)
   6680 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6681 	else
   6682 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6683 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6684 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6685 	else
   6686 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6687 
   6688 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6689 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6690 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6691 		aprint_error_dev(sc->sc_dev,
   6692 		    "unable to allocate TX control data, error = %d\n",
   6693 		    error);
   6694 		goto fail_0;
   6695 	}
   6696 
   6697 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6698 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6699 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6700 		aprint_error_dev(sc->sc_dev,
   6701 		    "unable to map TX control data, error = %d\n", error);
   6702 		goto fail_1;
   6703 	}
   6704 
   6705 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6706 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6707 		aprint_error_dev(sc->sc_dev,
   6708 		    "unable to create TX control data DMA map, error = %d\n",
   6709 		    error);
   6710 		goto fail_2;
   6711 	}
   6712 
   6713 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6714 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6715 		aprint_error_dev(sc->sc_dev,
   6716 		    "unable to load TX control data DMA map, error = %d\n",
   6717 		    error);
   6718 		goto fail_3;
   6719 	}
   6720 
   6721 	return 0;
   6722 
   6723  fail_3:
   6724 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6725  fail_2:
   6726 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6727 	    WM_TXDESCS_SIZE(txq));
   6728  fail_1:
   6729 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6730  fail_0:
   6731 	return error;
   6732 }
   6733 
   6734 static void
   6735 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6736 {
   6737 
   6738 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6739 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6740 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6741 	    WM_TXDESCS_SIZE(txq));
   6742 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6743 }
   6744 
   6745 static int
   6746 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6747 {
   6748 	int error;
   6749 	size_t rxq_descs_size;
   6750 
   6751 	/*
   6752 	 * Allocate the control data structures, and create and load the
   6753 	 * DMA map for it.
   6754 	 *
   6755 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6756 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6757 	 * both sets within the same 4G segment.
   6758 	 */
   6759 	rxq->rxq_ndesc = WM_NRXDESC;
   6760 	if (sc->sc_type == WM_T_82574)
   6761 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6762 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6763 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6764 	else
   6765 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6766 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6767 
   6768 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6769 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6770 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6771 		aprint_error_dev(sc->sc_dev,
   6772 		    "unable to allocate RX control data, error = %d\n",
   6773 		    error);
   6774 		goto fail_0;
   6775 	}
   6776 
   6777 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6778 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6779 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6780 		aprint_error_dev(sc->sc_dev,
   6781 		    "unable to map RX control data, error = %d\n", error);
   6782 		goto fail_1;
   6783 	}
   6784 
   6785 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6786 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6787 		aprint_error_dev(sc->sc_dev,
   6788 		    "unable to create RX control data DMA map, error = %d\n",
   6789 		    error);
   6790 		goto fail_2;
   6791 	}
   6792 
   6793 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6794 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6795 		aprint_error_dev(sc->sc_dev,
   6796 		    "unable to load RX control data DMA map, error = %d\n",
   6797 		    error);
   6798 		goto fail_3;
   6799 	}
   6800 
   6801 	return 0;
   6802 
   6803  fail_3:
   6804 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6805  fail_2:
   6806 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6807 	    rxq_descs_size);
   6808  fail_1:
   6809 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6810  fail_0:
   6811 	return error;
   6812 }
   6813 
   6814 static void
   6815 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6816 {
   6817 
   6818 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6819 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6820 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6821 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6822 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6823 }
   6824 
   6825 
   6826 static int
   6827 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6828 {
   6829 	int i, error;
   6830 
   6831 	/* Create the transmit buffer DMA maps. */
   6832 	WM_TXQUEUELEN(txq) =
   6833 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6834 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6835 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6836 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6837 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6838 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6839 			aprint_error_dev(sc->sc_dev,
   6840 			    "unable to create Tx DMA map %d, error = %d\n",
   6841 			    i, error);
   6842 			goto fail;
   6843 		}
   6844 	}
   6845 
   6846 	return 0;
   6847 
   6848  fail:
   6849 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6850 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6851 			bus_dmamap_destroy(sc->sc_dmat,
   6852 			    txq->txq_soft[i].txs_dmamap);
   6853 	}
   6854 	return error;
   6855 }
   6856 
   6857 static void
   6858 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6859 {
   6860 	int i;
   6861 
   6862 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6863 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6864 			bus_dmamap_destroy(sc->sc_dmat,
   6865 			    txq->txq_soft[i].txs_dmamap);
   6866 	}
   6867 }
   6868 
   6869 static int
   6870 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6871 {
   6872 	int i, error;
   6873 
   6874 	/* Create the receive buffer DMA maps. */
   6875 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6876 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6877 			    MCLBYTES, 0, 0,
   6878 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6879 			aprint_error_dev(sc->sc_dev,
   6880 			    "unable to create Rx DMA map %d error = %d\n",
   6881 			    i, error);
   6882 			goto fail;
   6883 		}
   6884 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6885 	}
   6886 
   6887 	return 0;
   6888 
   6889  fail:
   6890 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6891 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6892 			bus_dmamap_destroy(sc->sc_dmat,
   6893 			    rxq->rxq_soft[i].rxs_dmamap);
   6894 	}
   6895 	return error;
   6896 }
   6897 
   6898 static void
   6899 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6900 {
   6901 	int i;
   6902 
   6903 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6904 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6905 			bus_dmamap_destroy(sc->sc_dmat,
   6906 			    rxq->rxq_soft[i].rxs_dmamap);
   6907 	}
   6908 }
   6909 
   6910 /*
   6911  * wm_alloc_quques:
   6912  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6913  */
   6914 static int
   6915 wm_alloc_txrx_queues(struct wm_softc *sc)
   6916 {
   6917 	int i, error, tx_done, rx_done;
   6918 
   6919 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6920 	    KM_SLEEP);
   6921 	if (sc->sc_queue == NULL) {
   6922 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6923 		error = ENOMEM;
   6924 		goto fail_0;
   6925 	}
   6926 
   6927 	/* For transmission */
   6928 	error = 0;
   6929 	tx_done = 0;
   6930 	for (i = 0; i < sc->sc_nqueues; i++) {
   6931 #ifdef WM_EVENT_COUNTERS
   6932 		int j;
   6933 		const char *xname;
   6934 #endif
   6935 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6936 		txq->txq_sc = sc;
   6937 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6938 
   6939 		error = wm_alloc_tx_descs(sc, txq);
   6940 		if (error)
   6941 			break;
   6942 		error = wm_alloc_tx_buffer(sc, txq);
   6943 		if (error) {
   6944 			wm_free_tx_descs(sc, txq);
   6945 			break;
   6946 		}
   6947 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6948 		if (txq->txq_interq == NULL) {
   6949 			wm_free_tx_descs(sc, txq);
   6950 			wm_free_tx_buffer(sc, txq);
   6951 			error = ENOMEM;
   6952 			break;
   6953 		}
   6954 
   6955 #ifdef WM_EVENT_COUNTERS
   6956 		xname = device_xname(sc->sc_dev);
   6957 
   6958 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6959 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6960 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6961 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6962 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6963 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6964 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6965 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6966 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6967 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6968 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6969 
   6970 		for (j = 0; j < WM_NTXSEGS; j++) {
   6971 			snprintf(txq->txq_txseg_evcnt_names[j],
   6972 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6973 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6974 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6975 		}
   6976 
   6977 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6978 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6979 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6980 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6981 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6982 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   6983 #endif /* WM_EVENT_COUNTERS */
   6984 
   6985 		tx_done++;
   6986 	}
   6987 	if (error)
   6988 		goto fail_1;
   6989 
   6990 	/* For receive */
   6991 	error = 0;
   6992 	rx_done = 0;
   6993 	for (i = 0; i < sc->sc_nqueues; i++) {
   6994 #ifdef WM_EVENT_COUNTERS
   6995 		const char *xname;
   6996 #endif
   6997 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6998 		rxq->rxq_sc = sc;
   6999 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7000 
   7001 		error = wm_alloc_rx_descs(sc, rxq);
   7002 		if (error)
   7003 			break;
   7004 
   7005 		error = wm_alloc_rx_buffer(sc, rxq);
   7006 		if (error) {
   7007 			wm_free_rx_descs(sc, rxq);
   7008 			break;
   7009 		}
   7010 
   7011 #ifdef WM_EVENT_COUNTERS
   7012 		xname = device_xname(sc->sc_dev);
   7013 
   7014 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7015 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7016 
   7017 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7018 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7019 #endif /* WM_EVENT_COUNTERS */
   7020 
   7021 		rx_done++;
   7022 	}
   7023 	if (error)
   7024 		goto fail_2;
   7025 
   7026 	return 0;
   7027 
   7028  fail_2:
   7029 	for (i = 0; i < rx_done; i++) {
   7030 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7031 		wm_free_rx_buffer(sc, rxq);
   7032 		wm_free_rx_descs(sc, rxq);
   7033 		if (rxq->rxq_lock)
   7034 			mutex_obj_free(rxq->rxq_lock);
   7035 	}
   7036  fail_1:
   7037 	for (i = 0; i < tx_done; i++) {
   7038 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7039 		pcq_destroy(txq->txq_interq);
   7040 		wm_free_tx_buffer(sc, txq);
   7041 		wm_free_tx_descs(sc, txq);
   7042 		if (txq->txq_lock)
   7043 			mutex_obj_free(txq->txq_lock);
   7044 	}
   7045 
   7046 	kmem_free(sc->sc_queue,
   7047 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7048  fail_0:
   7049 	return error;
   7050 }
   7051 
   7052 /*
   7053  * wm_free_quques:
   7054  *	Free {tx,rx}descs and {tx,rx} buffers
   7055  */
   7056 static void
   7057 wm_free_txrx_queues(struct wm_softc *sc)
   7058 {
   7059 	int i;
   7060 
   7061 	for (i = 0; i < sc->sc_nqueues; i++) {
   7062 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7063 
   7064 #ifdef WM_EVENT_COUNTERS
   7065 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7066 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7067 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7068 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7069 #endif /* WM_EVENT_COUNTERS */
   7070 
   7071 		wm_free_rx_buffer(sc, rxq);
   7072 		wm_free_rx_descs(sc, rxq);
   7073 		if (rxq->rxq_lock)
   7074 			mutex_obj_free(rxq->rxq_lock);
   7075 	}
   7076 
   7077 	for (i = 0; i < sc->sc_nqueues; i++) {
   7078 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7079 		struct mbuf *m;
   7080 #ifdef WM_EVENT_COUNTERS
   7081 		int j;
   7082 
   7083 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7084 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7085 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7086 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7087 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7088 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7089 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7090 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7091 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7092 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7093 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7094 
   7095 		for (j = 0; j < WM_NTXSEGS; j++)
   7096 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7097 
   7098 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7099 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7100 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7101 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7102 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7103 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7104 #endif /* WM_EVENT_COUNTERS */
   7105 
   7106 		/* Drain txq_interq */
   7107 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7108 			m_freem(m);
   7109 		pcq_destroy(txq->txq_interq);
   7110 
   7111 		wm_free_tx_buffer(sc, txq);
   7112 		wm_free_tx_descs(sc, txq);
   7113 		if (txq->txq_lock)
   7114 			mutex_obj_free(txq->txq_lock);
   7115 	}
   7116 
   7117 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7118 }
   7119 
   7120 static void
   7121 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7122 {
   7123 
   7124 	KASSERT(mutex_owned(txq->txq_lock));
   7125 
   7126 	/* Initialize the transmit descriptor ring. */
   7127 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7128 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7129 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7130 	txq->txq_free = WM_NTXDESC(txq);
   7131 	txq->txq_next = 0;
   7132 }
   7133 
   7134 static void
   7135 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7136     struct wm_txqueue *txq)
   7137 {
   7138 
   7139 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7140 		device_xname(sc->sc_dev), __func__));
   7141 	KASSERT(mutex_owned(txq->txq_lock));
   7142 
   7143 	if (sc->sc_type < WM_T_82543) {
   7144 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7145 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7146 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7147 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7148 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7149 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7150 	} else {
   7151 		int qid = wmq->wmq_id;
   7152 
   7153 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7154 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7155 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7156 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7157 
   7158 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7159 			/*
   7160 			 * Don't write TDT before TCTL.EN is set.
   7161 			 * See the document.
   7162 			 */
   7163 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7164 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7165 			    | TXDCTL_WTHRESH(0));
   7166 		else {
   7167 			/* XXX should update with AIM? */
   7168 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7169 			if (sc->sc_type >= WM_T_82540) {
   7170 				/* Should be the same */
   7171 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7172 			}
   7173 
   7174 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7175 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7176 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7177 		}
   7178 	}
   7179 }
   7180 
   7181 static void
   7182 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7183 {
   7184 	int i;
   7185 
   7186 	KASSERT(mutex_owned(txq->txq_lock));
   7187 
   7188 	/* Initialize the transmit job descriptors. */
   7189 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7190 		txq->txq_soft[i].txs_mbuf = NULL;
   7191 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7192 	txq->txq_snext = 0;
   7193 	txq->txq_sdirty = 0;
   7194 }
   7195 
   7196 static void
   7197 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7198     struct wm_txqueue *txq)
   7199 {
   7200 
   7201 	KASSERT(mutex_owned(txq->txq_lock));
   7202 
   7203 	/*
   7204 	 * Set up some register offsets that are different between
   7205 	 * the i82542 and the i82543 and later chips.
   7206 	 */
   7207 	if (sc->sc_type < WM_T_82543)
   7208 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7209 	else
   7210 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7211 
   7212 	wm_init_tx_descs(sc, txq);
   7213 	wm_init_tx_regs(sc, wmq, txq);
   7214 	wm_init_tx_buffer(sc, txq);
   7215 
   7216 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7217 	txq->txq_sending = false;
   7218 }
   7219 
   7220 static void
   7221 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7222     struct wm_rxqueue *rxq)
   7223 {
   7224 
   7225 	KASSERT(mutex_owned(rxq->rxq_lock));
   7226 
   7227 	/*
   7228 	 * Initialize the receive descriptor and receive job
   7229 	 * descriptor rings.
   7230 	 */
   7231 	if (sc->sc_type < WM_T_82543) {
   7232 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7233 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7234 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7235 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7236 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7237 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7238 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7239 
   7240 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7241 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7242 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7243 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7244 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7245 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7246 	} else {
   7247 		int qid = wmq->wmq_id;
   7248 
   7249 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7250 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7251 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7252 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7253 
   7254 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7255 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7256 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7257 
   7258 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7259 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7260 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7261 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7262 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7263 			    | RXDCTL_WTHRESH(1));
   7264 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7265 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7266 		} else {
   7267 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7268 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7269 			/* XXX should update with AIM? */
   7270 			CSR_WRITE(sc, WMREG_RDTR,
   7271 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7272 			/* MUST be same */
   7273 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7274 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7275 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7276 		}
   7277 	}
   7278 }
   7279 
   7280 static int
   7281 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7282 {
   7283 	struct wm_rxsoft *rxs;
   7284 	int error, i;
   7285 
   7286 	KASSERT(mutex_owned(rxq->rxq_lock));
   7287 
   7288 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7289 		rxs = &rxq->rxq_soft[i];
   7290 		if (rxs->rxs_mbuf == NULL) {
   7291 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7292 				log(LOG_ERR, "%s: unable to allocate or map "
   7293 				    "rx buffer %d, error = %d\n",
   7294 				    device_xname(sc->sc_dev), i, error);
   7295 				/*
   7296 				 * XXX Should attempt to run with fewer receive
   7297 				 * XXX buffers instead of just failing.
   7298 				 */
   7299 				wm_rxdrain(rxq);
   7300 				return ENOMEM;
   7301 			}
   7302 		} else {
   7303 			/*
   7304 			 * For 82575 and 82576, the RX descriptors must be
   7305 			 * initialized after the setting of RCTL.EN in
   7306 			 * wm_set_filter()
   7307 			 */
   7308 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7309 				wm_init_rxdesc(rxq, i);
   7310 		}
   7311 	}
   7312 	rxq->rxq_ptr = 0;
   7313 	rxq->rxq_discard = 0;
   7314 	WM_RXCHAIN_RESET(rxq);
   7315 
   7316 	return 0;
   7317 }
   7318 
   7319 static int
   7320 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7321     struct wm_rxqueue *rxq)
   7322 {
   7323 
   7324 	KASSERT(mutex_owned(rxq->rxq_lock));
   7325 
   7326 	/*
   7327 	 * Set up some register offsets that are different between
   7328 	 * the i82542 and the i82543 and later chips.
   7329 	 */
   7330 	if (sc->sc_type < WM_T_82543)
   7331 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7332 	else
   7333 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7334 
   7335 	wm_init_rx_regs(sc, wmq, rxq);
   7336 	return wm_init_rx_buffer(sc, rxq);
   7337 }
   7338 
   7339 /*
   7340  * wm_init_quques:
   7341  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7342  */
   7343 static int
   7344 wm_init_txrx_queues(struct wm_softc *sc)
   7345 {
   7346 	int i, error = 0;
   7347 
   7348 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7349 		device_xname(sc->sc_dev), __func__));
   7350 
   7351 	for (i = 0; i < sc->sc_nqueues; i++) {
   7352 		struct wm_queue *wmq = &sc->sc_queue[i];
   7353 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7354 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7355 
   7356 		/*
   7357 		 * TODO
   7358 		 * Currently, use constant variable instead of AIM.
   7359 		 * Furthermore, the interrupt interval of multiqueue which use
   7360 		 * polling mode is less than default value.
   7361 		 * More tuning and AIM are required.
   7362 		 */
   7363 		if (wm_is_using_multiqueue(sc))
   7364 			wmq->wmq_itr = 50;
   7365 		else
   7366 			wmq->wmq_itr = sc->sc_itr_init;
   7367 		wmq->wmq_set_itr = true;
   7368 
   7369 		mutex_enter(txq->txq_lock);
   7370 		wm_init_tx_queue(sc, wmq, txq);
   7371 		mutex_exit(txq->txq_lock);
   7372 
   7373 		mutex_enter(rxq->rxq_lock);
   7374 		error = wm_init_rx_queue(sc, wmq, rxq);
   7375 		mutex_exit(rxq->rxq_lock);
   7376 		if (error)
   7377 			break;
   7378 	}
   7379 
   7380 	return error;
   7381 }
   7382 
   7383 /*
   7384  * wm_tx_offload:
   7385  *
   7386  *	Set up TCP/IP checksumming parameters for the
   7387  *	specified packet.
   7388  */
   7389 static void
   7390 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7391     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7392 {
   7393 	struct mbuf *m0 = txs->txs_mbuf;
   7394 	struct livengood_tcpip_ctxdesc *t;
   7395 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7396 	uint32_t ipcse;
   7397 	struct ether_header *eh;
   7398 	int offset, iphl;
   7399 	uint8_t fields;
   7400 
   7401 	/*
   7402 	 * XXX It would be nice if the mbuf pkthdr had offset
   7403 	 * fields for the protocol headers.
   7404 	 */
   7405 
   7406 	eh = mtod(m0, struct ether_header *);
   7407 	switch (htons(eh->ether_type)) {
   7408 	case ETHERTYPE_IP:
   7409 	case ETHERTYPE_IPV6:
   7410 		offset = ETHER_HDR_LEN;
   7411 		break;
   7412 
   7413 	case ETHERTYPE_VLAN:
   7414 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7415 		break;
   7416 
   7417 	default:
   7418 		/* Don't support this protocol or encapsulation. */
   7419  		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7420  		txq->txq_last_hw_ipcs = 0;
   7421  		txq->txq_last_hw_tucs = 0;
   7422 		*fieldsp = 0;
   7423 		*cmdp = 0;
   7424 		return;
   7425 	}
   7426 
   7427 	if ((m0->m_pkthdr.csum_flags &
   7428 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7429 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7430 	} else
   7431 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7432 
   7433 	ipcse = offset + iphl - 1;
   7434 
   7435 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7436 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7437 	seg = 0;
   7438 	fields = 0;
   7439 
   7440 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7441 		int hlen = offset + iphl;
   7442 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7443 
   7444 		if (__predict_false(m0->m_len <
   7445 				    (hlen + sizeof(struct tcphdr)))) {
   7446 			/*
   7447 			 * TCP/IP headers are not in the first mbuf; we need
   7448 			 * to do this the slow and painful way. Let's just
   7449 			 * hope this doesn't happen very often.
   7450 			 */
   7451 			struct tcphdr th;
   7452 
   7453 			WM_Q_EVCNT_INCR(txq, tsopain);
   7454 
   7455 			m_copydata(m0, hlen, sizeof(th), &th);
   7456 			if (v4) {
   7457 				struct ip ip;
   7458 
   7459 				m_copydata(m0, offset, sizeof(ip), &ip);
   7460 				ip.ip_len = 0;
   7461 				m_copyback(m0,
   7462 				    offset + offsetof(struct ip, ip_len),
   7463 				    sizeof(ip.ip_len), &ip.ip_len);
   7464 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7465 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7466 			} else {
   7467 				struct ip6_hdr ip6;
   7468 
   7469 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7470 				ip6.ip6_plen = 0;
   7471 				m_copyback(m0,
   7472 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7473 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7474 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7475 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7476 			}
   7477 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7478 			    sizeof(th.th_sum), &th.th_sum);
   7479 
   7480 			hlen += th.th_off << 2;
   7481 		} else {
   7482 			/*
   7483 			 * TCP/IP headers are in the first mbuf; we can do
   7484 			 * this the easy way.
   7485 			 */
   7486 			struct tcphdr *th;
   7487 
   7488 			if (v4) {
   7489 				struct ip *ip =
   7490 				    (void *)(mtod(m0, char *) + offset);
   7491 				th = (void *)(mtod(m0, char *) + hlen);
   7492 
   7493 				ip->ip_len = 0;
   7494 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7495 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7496 			} else {
   7497 				struct ip6_hdr *ip6 =
   7498 				    (void *)(mtod(m0, char *) + offset);
   7499 				th = (void *)(mtod(m0, char *) + hlen);
   7500 
   7501 				ip6->ip6_plen = 0;
   7502 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7503 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7504 			}
   7505 			hlen += th->th_off << 2;
   7506 		}
   7507 
   7508 		if (v4) {
   7509 			WM_Q_EVCNT_INCR(txq, tso);
   7510 			cmdlen |= WTX_TCPIP_CMD_IP;
   7511 		} else {
   7512 			WM_Q_EVCNT_INCR(txq, tso6);
   7513 			ipcse = 0;
   7514 		}
   7515 		cmd |= WTX_TCPIP_CMD_TSE;
   7516 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7517 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7518 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7519 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7520 	}
   7521 
   7522 	/*
   7523 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7524 	 * offload feature, if we load the context descriptor, we
   7525 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7526 	 */
   7527 
   7528 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7529 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7530 	    WTX_TCPIP_IPCSE(ipcse);
   7531 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7532 		WM_Q_EVCNT_INCR(txq, ipsum);
   7533 		fields |= WTX_IXSM;
   7534 	}
   7535 
   7536 	offset += iphl;
   7537 
   7538 	if (m0->m_pkthdr.csum_flags &
   7539 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7540 		WM_Q_EVCNT_INCR(txq, tusum);
   7541 		fields |= WTX_TXSM;
   7542 		tucs = WTX_TCPIP_TUCSS(offset) |
   7543 		    WTX_TCPIP_TUCSO(offset +
   7544 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7545 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7546 	} else if ((m0->m_pkthdr.csum_flags &
   7547 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7548 		WM_Q_EVCNT_INCR(txq, tusum6);
   7549 		fields |= WTX_TXSM;
   7550 		tucs = WTX_TCPIP_TUCSS(offset) |
   7551 		    WTX_TCPIP_TUCSO(offset +
   7552 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7553 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7554 	} else {
   7555 		/* Just initialize it to a valid TCP context. */
   7556 		tucs = WTX_TCPIP_TUCSS(offset) |
   7557 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7558 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7559 	}
   7560 
   7561 	*cmdp = cmd;
   7562 	*fieldsp = fields;
   7563 
   7564 	/*
   7565 	 * We don't have to write context descriptor for every packet
   7566 	 * except for 82574. For 82574, we must write context descriptor
   7567 	 * for every packet when we use two descriptor queues.
   7568 	 *
   7569 	 * The 82574L can only remember the *last* context used
   7570 	 * regardless of queue that it was use for.  We cannot reuse
   7571 	 * contexts on this hardware platform and must generate a new
   7572 	 * context every time.  82574L hardware spec, section 7.2.6,
   7573 	 * second note.
   7574 	 */
   7575 	if (sc->sc_nqueues < 2) {
   7576 		/*
   7577 	 	 *
   7578 	  	 * Setting up new checksum offload context for every
   7579 		 * frames takes a lot of processing time for hardware.
   7580 		 * This also reduces performance a lot for small sized
   7581 		 * frames so avoid it if driver can use previously
   7582 		 * configured checksum offload context.
   7583 		 * For TSO, in theory we can use the same TSO context only if
   7584 		 * frame is the same type(IP/TCP) and the same MSS. However
   7585 		 * checking whether a frame has the same IP/TCP structure is
   7586 		 * hard thing so just ignore that and always restablish a
   7587 		 * new TSO context.
   7588 	  	 */
   7589 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   7590 		    == 0) {
   7591 			if (txq->txq_last_hw_cmd == cmd &&
   7592 			    txq->txq_last_hw_fields == fields &&
   7593 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   7594 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   7595 				WM_Q_EVCNT_INCR(txq, skipcontext);
   7596 				return;
   7597 			}
   7598 		}
   7599 
   7600 	 	txq->txq_last_hw_cmd = cmd;
   7601  		txq->txq_last_hw_fields = fields;
   7602  		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   7603 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   7604 	}
   7605 
   7606 	/* Fill in the context descriptor. */
   7607 	t = (struct livengood_tcpip_ctxdesc *)
   7608 	    &txq->txq_descs[txq->txq_next];
   7609 	t->tcpip_ipcs = htole32(ipcs);
   7610 	t->tcpip_tucs = htole32(tucs);
   7611 	t->tcpip_cmdlen = htole32(cmdlen);
   7612 	t->tcpip_seg = htole32(seg);
   7613 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7614 
   7615 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7616 	txs->txs_ndesc++;
   7617 }
   7618 
   7619 static inline int
   7620 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7621 {
   7622 	struct wm_softc *sc = ifp->if_softc;
   7623 	u_int cpuid = cpu_index(curcpu());
   7624 
   7625 	/*
   7626 	 * Currently, simple distribute strategy.
   7627 	 * TODO:
   7628 	 * distribute by flowid(RSS has value).
   7629 	 */
   7630 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7631 }
   7632 
   7633 /*
   7634  * wm_start:		[ifnet interface function]
   7635  *
   7636  *	Start packet transmission on the interface.
   7637  */
   7638 static void
   7639 wm_start(struct ifnet *ifp)
   7640 {
   7641 	struct wm_softc *sc = ifp->if_softc;
   7642 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7643 
   7644 #ifdef WM_MPSAFE
   7645 	KASSERT(if_is_mpsafe(ifp));
   7646 #endif
   7647 	/*
   7648 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   7649 	 */
   7650 
   7651 	mutex_enter(txq->txq_lock);
   7652 	if (!txq->txq_stopping)
   7653 		wm_start_locked(ifp);
   7654 	mutex_exit(txq->txq_lock);
   7655 }
   7656 
   7657 static void
   7658 wm_start_locked(struct ifnet *ifp)
   7659 {
   7660 	struct wm_softc *sc = ifp->if_softc;
   7661 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7662 
   7663 	wm_send_common_locked(ifp, txq, false);
   7664 }
   7665 
   7666 static int
   7667 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7668 {
   7669 	int qid;
   7670 	struct wm_softc *sc = ifp->if_softc;
   7671 	struct wm_txqueue *txq;
   7672 
   7673 	qid = wm_select_txqueue(ifp, m);
   7674 	txq = &sc->sc_queue[qid].wmq_txq;
   7675 
   7676 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7677 		m_freem(m);
   7678 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7679 		return ENOBUFS;
   7680 	}
   7681 
   7682 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   7683 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   7684 	if (m->m_flags & M_MCAST)
   7685 		if_statinc_ref(nsr, if_omcasts);
   7686 	IF_STAT_PUTREF(ifp);
   7687 
   7688 	if (mutex_tryenter(txq->txq_lock)) {
   7689 		if (!txq->txq_stopping)
   7690 			wm_transmit_locked(ifp, txq);
   7691 		mutex_exit(txq->txq_lock);
   7692 	}
   7693 
   7694 	return 0;
   7695 }
   7696 
   7697 static void
   7698 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7699 {
   7700 
   7701 	wm_send_common_locked(ifp, txq, true);
   7702 }
   7703 
   7704 static void
   7705 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7706     bool is_transmit)
   7707 {
   7708 	struct wm_softc *sc = ifp->if_softc;
   7709 	struct mbuf *m0;
   7710 	struct wm_txsoft *txs;
   7711 	bus_dmamap_t dmamap;
   7712 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7713 	bus_addr_t curaddr;
   7714 	bus_size_t seglen, curlen;
   7715 	uint32_t cksumcmd;
   7716 	uint8_t cksumfields;
   7717 	bool remap = true;
   7718 
   7719 	KASSERT(mutex_owned(txq->txq_lock));
   7720 
   7721 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7722 		return;
   7723 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7724 		return;
   7725 
   7726 	/* Remember the previous number of free descriptors. */
   7727 	ofree = txq->txq_free;
   7728 
   7729 	/*
   7730 	 * Loop through the send queue, setting up transmit descriptors
   7731 	 * until we drain the queue, or use up all available transmit
   7732 	 * descriptors.
   7733 	 */
   7734 	for (;;) {
   7735 		m0 = NULL;
   7736 
   7737 		/* Get a work queue entry. */
   7738 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7739 			wm_txeof(txq, UINT_MAX);
   7740 			if (txq->txq_sfree == 0) {
   7741 				DPRINTF(WM_DEBUG_TX,
   7742 				    ("%s: TX: no free job descriptors\n",
   7743 					device_xname(sc->sc_dev)));
   7744 				WM_Q_EVCNT_INCR(txq, txsstall);
   7745 				break;
   7746 			}
   7747 		}
   7748 
   7749 		/* Grab a packet off the queue. */
   7750 		if (is_transmit)
   7751 			m0 = pcq_get(txq->txq_interq);
   7752 		else
   7753 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7754 		if (m0 == NULL)
   7755 			break;
   7756 
   7757 		DPRINTF(WM_DEBUG_TX,
   7758 		    ("%s: TX: have packet to transmit: %p\n",
   7759 			device_xname(sc->sc_dev), m0));
   7760 
   7761 		txs = &txq->txq_soft[txq->txq_snext];
   7762 		dmamap = txs->txs_dmamap;
   7763 
   7764 		use_tso = (m0->m_pkthdr.csum_flags &
   7765 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7766 
   7767 		/*
   7768 		 * So says the Linux driver:
   7769 		 * The controller does a simple calculation to make sure
   7770 		 * there is enough room in the FIFO before initiating the
   7771 		 * DMA for each buffer. The calc is:
   7772 		 *	4 = ceil(buffer len / MSS)
   7773 		 * To make sure we don't overrun the FIFO, adjust the max
   7774 		 * buffer len if the MSS drops.
   7775 		 */
   7776 		dmamap->dm_maxsegsz =
   7777 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7778 		    ? m0->m_pkthdr.segsz << 2
   7779 		    : WTX_MAX_LEN;
   7780 
   7781 		/*
   7782 		 * Load the DMA map.  If this fails, the packet either
   7783 		 * didn't fit in the allotted number of segments, or we
   7784 		 * were short on resources.  For the too-many-segments
   7785 		 * case, we simply report an error and drop the packet,
   7786 		 * since we can't sanely copy a jumbo packet to a single
   7787 		 * buffer.
   7788 		 */
   7789 retry:
   7790 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7791 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7792 		if (__predict_false(error)) {
   7793 			if (error == EFBIG) {
   7794 				if (remap == true) {
   7795 					struct mbuf *m;
   7796 
   7797 					remap = false;
   7798 					m = m_defrag(m0, M_NOWAIT);
   7799 					if (m != NULL) {
   7800 						WM_Q_EVCNT_INCR(txq, defrag);
   7801 						m0 = m;
   7802 						goto retry;
   7803 					}
   7804 				}
   7805 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7806 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7807 				    "DMA segments, dropping...\n",
   7808 				    device_xname(sc->sc_dev));
   7809 				wm_dump_mbuf_chain(sc, m0);
   7810 				m_freem(m0);
   7811 				continue;
   7812 			}
   7813 			/* Short on resources, just stop for now. */
   7814 			DPRINTF(WM_DEBUG_TX,
   7815 			    ("%s: TX: dmamap load failed: %d\n",
   7816 				device_xname(sc->sc_dev), error));
   7817 			break;
   7818 		}
   7819 
   7820 		segs_needed = dmamap->dm_nsegs;
   7821 		if (use_tso) {
   7822 			/* For sentinel descriptor; see below. */
   7823 			segs_needed++;
   7824 		}
   7825 
   7826 		/*
   7827 		 * Ensure we have enough descriptors free to describe
   7828 		 * the packet. Note, we always reserve one descriptor
   7829 		 * at the end of the ring due to the semantics of the
   7830 		 * TDT register, plus one more in the event we need
   7831 		 * to load offload context.
   7832 		 */
   7833 		if (segs_needed > txq->txq_free - 2) {
   7834 			/*
   7835 			 * Not enough free descriptors to transmit this
   7836 			 * packet.  We haven't committed anything yet,
   7837 			 * so just unload the DMA map, put the packet
   7838 			 * pack on the queue, and punt. Notify the upper
   7839 			 * layer that there are no more slots left.
   7840 			 */
   7841 			DPRINTF(WM_DEBUG_TX,
   7842 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7843 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7844 				segs_needed, txq->txq_free - 1));
   7845 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7846 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7847 			WM_Q_EVCNT_INCR(txq, txdstall);
   7848 			break;
   7849 		}
   7850 
   7851 		/*
   7852 		 * Check for 82547 Tx FIFO bug. We need to do this
   7853 		 * once we know we can transmit the packet, since we
   7854 		 * do some internal FIFO space accounting here.
   7855 		 */
   7856 		if (sc->sc_type == WM_T_82547 &&
   7857 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7858 			DPRINTF(WM_DEBUG_TX,
   7859 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7860 				device_xname(sc->sc_dev)));
   7861 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7862 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7863 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7864 			break;
   7865 		}
   7866 
   7867 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7868 
   7869 		DPRINTF(WM_DEBUG_TX,
   7870 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7871 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7872 
   7873 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7874 
   7875 		/*
   7876 		 * Store a pointer to the packet so that we can free it
   7877 		 * later.
   7878 		 *
   7879 		 * Initially, we consider the number of descriptors the
   7880 		 * packet uses the number of DMA segments.  This may be
   7881 		 * incremented by 1 if we do checksum offload (a descriptor
   7882 		 * is used to set the checksum context).
   7883 		 */
   7884 		txs->txs_mbuf = m0;
   7885 		txs->txs_firstdesc = txq->txq_next;
   7886 		txs->txs_ndesc = segs_needed;
   7887 
   7888 		/* Set up offload parameters for this packet. */
   7889 		if (m0->m_pkthdr.csum_flags &
   7890 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7891 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7892 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7893 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   7894 		} else {
   7895  			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7896  			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   7897 			cksumcmd = 0;
   7898 			cksumfields = 0;
   7899 		}
   7900 
   7901 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7902 
   7903 		/* Sync the DMA map. */
   7904 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7905 		    BUS_DMASYNC_PREWRITE);
   7906 
   7907 		/* Initialize the transmit descriptor. */
   7908 		for (nexttx = txq->txq_next, seg = 0;
   7909 		     seg < dmamap->dm_nsegs; seg++) {
   7910 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7911 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7912 			     seglen != 0;
   7913 			     curaddr += curlen, seglen -= curlen,
   7914 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7915 				curlen = seglen;
   7916 
   7917 				/*
   7918 				 * So says the Linux driver:
   7919 				 * Work around for premature descriptor
   7920 				 * write-backs in TSO mode.  Append a
   7921 				 * 4-byte sentinel descriptor.
   7922 				 */
   7923 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7924 				    curlen > 8)
   7925 					curlen -= 4;
   7926 
   7927 				wm_set_dma_addr(
   7928 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7929 				txq->txq_descs[nexttx].wtx_cmdlen
   7930 				    = htole32(cksumcmd | curlen);
   7931 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7932 				    = 0;
   7933 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7934 				    = cksumfields;
   7935 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7936 				lasttx = nexttx;
   7937 
   7938 				DPRINTF(WM_DEBUG_TX,
   7939 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7940 					"len %#04zx\n",
   7941 					device_xname(sc->sc_dev), nexttx,
   7942 					(uint64_t)curaddr, curlen));
   7943 			}
   7944 		}
   7945 
   7946 		KASSERT(lasttx != -1);
   7947 
   7948 		/*
   7949 		 * Set up the command byte on the last descriptor of
   7950 		 * the packet. If we're in the interrupt delay window,
   7951 		 * delay the interrupt.
   7952 		 */
   7953 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7954 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7955 
   7956 		/*
   7957 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7958 		 * up the descriptor to encapsulate the packet for us.
   7959 		 *
   7960 		 * This is only valid on the last descriptor of the packet.
   7961 		 */
   7962 		if (vlan_has_tag(m0)) {
   7963 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7964 			    htole32(WTX_CMD_VLE);
   7965 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7966 			    = htole16(vlan_get_tag(m0));
   7967 		}
   7968 
   7969 		txs->txs_lastdesc = lasttx;
   7970 
   7971 		DPRINTF(WM_DEBUG_TX,
   7972 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7973 			device_xname(sc->sc_dev),
   7974 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7975 
   7976 		/* Sync the descriptors we're using. */
   7977 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7978 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7979 
   7980 		/* Give the packet to the chip. */
   7981 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7982 
   7983 		DPRINTF(WM_DEBUG_TX,
   7984 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7985 
   7986 		DPRINTF(WM_DEBUG_TX,
   7987 		    ("%s: TX: finished transmitting packet, job %d\n",
   7988 			device_xname(sc->sc_dev), txq->txq_snext));
   7989 
   7990 		/* Advance the tx pointer. */
   7991 		txq->txq_free -= txs->txs_ndesc;
   7992 		txq->txq_next = nexttx;
   7993 
   7994 		txq->txq_sfree--;
   7995 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7996 
   7997 		/* Pass the packet to any BPF listeners. */
   7998 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7999 	}
   8000 
   8001 	if (m0 != NULL) {
   8002 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8003 		WM_Q_EVCNT_INCR(txq, descdrop);
   8004 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8005 			__func__));
   8006 		m_freem(m0);
   8007 	}
   8008 
   8009 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8010 		/* No more slots; notify upper layer. */
   8011 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8012 	}
   8013 
   8014 	if (txq->txq_free != ofree) {
   8015 		/* Set a watchdog timer in case the chip flakes out. */
   8016 		txq->txq_lastsent = time_uptime;
   8017 		txq->txq_sending = true;
   8018 	}
   8019 }
   8020 
   8021 /*
   8022  * wm_nq_tx_offload:
   8023  *
   8024  *	Set up TCP/IP checksumming parameters for the
   8025  *	specified packet, for NEWQUEUE devices
   8026  */
   8027 static void
   8028 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8029     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8030 {
   8031 	struct mbuf *m0 = txs->txs_mbuf;
   8032 	uint32_t vl_len, mssidx, cmdc;
   8033 	struct ether_header *eh;
   8034 	int offset, iphl;
   8035 
   8036 	/*
   8037 	 * XXX It would be nice if the mbuf pkthdr had offset
   8038 	 * fields for the protocol headers.
   8039 	 */
   8040 	*cmdlenp = 0;
   8041 	*fieldsp = 0;
   8042 
   8043 	eh = mtod(m0, struct ether_header *);
   8044 	switch (htons(eh->ether_type)) {
   8045 	case ETHERTYPE_IP:
   8046 	case ETHERTYPE_IPV6:
   8047 		offset = ETHER_HDR_LEN;
   8048 		break;
   8049 
   8050 	case ETHERTYPE_VLAN:
   8051 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8052 		break;
   8053 
   8054 	default:
   8055 		/* Don't support this protocol or encapsulation. */
   8056 		*do_csum = false;
   8057 		return;
   8058 	}
   8059 	*do_csum = true;
   8060 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8061 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8062 
   8063 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8064 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8065 
   8066 	if ((m0->m_pkthdr.csum_flags &
   8067 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8068 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8069 	} else {
   8070 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8071 	}
   8072 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8073 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8074 
   8075 	if (vlan_has_tag(m0)) {
   8076 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8077 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8078 		*cmdlenp |= NQTX_CMD_VLE;
   8079 	}
   8080 
   8081 	mssidx = 0;
   8082 
   8083 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8084 		int hlen = offset + iphl;
   8085 		int tcp_hlen;
   8086 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8087 
   8088 		if (__predict_false(m0->m_len <
   8089 				    (hlen + sizeof(struct tcphdr)))) {
   8090 			/*
   8091 			 * TCP/IP headers are not in the first mbuf; we need
   8092 			 * to do this the slow and painful way. Let's just
   8093 			 * hope this doesn't happen very often.
   8094 			 */
   8095 			struct tcphdr th;
   8096 
   8097 			WM_Q_EVCNT_INCR(txq, tsopain);
   8098 
   8099 			m_copydata(m0, hlen, sizeof(th), &th);
   8100 			if (v4) {
   8101 				struct ip ip;
   8102 
   8103 				m_copydata(m0, offset, sizeof(ip), &ip);
   8104 				ip.ip_len = 0;
   8105 				m_copyback(m0,
   8106 				    offset + offsetof(struct ip, ip_len),
   8107 				    sizeof(ip.ip_len), &ip.ip_len);
   8108 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8109 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8110 			} else {
   8111 				struct ip6_hdr ip6;
   8112 
   8113 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8114 				ip6.ip6_plen = 0;
   8115 				m_copyback(m0,
   8116 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8117 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8118 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8119 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8120 			}
   8121 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8122 			    sizeof(th.th_sum), &th.th_sum);
   8123 
   8124 			tcp_hlen = th.th_off << 2;
   8125 		} else {
   8126 			/*
   8127 			 * TCP/IP headers are in the first mbuf; we can do
   8128 			 * this the easy way.
   8129 			 */
   8130 			struct tcphdr *th;
   8131 
   8132 			if (v4) {
   8133 				struct ip *ip =
   8134 				    (void *)(mtod(m0, char *) + offset);
   8135 				th = (void *)(mtod(m0, char *) + hlen);
   8136 
   8137 				ip->ip_len = 0;
   8138 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8139 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8140 			} else {
   8141 				struct ip6_hdr *ip6 =
   8142 				    (void *)(mtod(m0, char *) + offset);
   8143 				th = (void *)(mtod(m0, char *) + hlen);
   8144 
   8145 				ip6->ip6_plen = 0;
   8146 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8147 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8148 			}
   8149 			tcp_hlen = th->th_off << 2;
   8150 		}
   8151 		hlen += tcp_hlen;
   8152 		*cmdlenp |= NQTX_CMD_TSE;
   8153 
   8154 		if (v4) {
   8155 			WM_Q_EVCNT_INCR(txq, tso);
   8156 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8157 		} else {
   8158 			WM_Q_EVCNT_INCR(txq, tso6);
   8159 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8160 		}
   8161 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8162 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8163 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8164 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8165 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8166 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8167 	} else {
   8168 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8169 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8170 	}
   8171 
   8172 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8173 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8174 		cmdc |= NQTXC_CMD_IP4;
   8175 	}
   8176 
   8177 	if (m0->m_pkthdr.csum_flags &
   8178 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8179 		WM_Q_EVCNT_INCR(txq, tusum);
   8180 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8181 			cmdc |= NQTXC_CMD_TCP;
   8182 		else
   8183 			cmdc |= NQTXC_CMD_UDP;
   8184 
   8185 		cmdc |= NQTXC_CMD_IP4;
   8186 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8187 	}
   8188 	if (m0->m_pkthdr.csum_flags &
   8189 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8190 		WM_Q_EVCNT_INCR(txq, tusum6);
   8191 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8192 			cmdc |= NQTXC_CMD_TCP;
   8193 		else
   8194 			cmdc |= NQTXC_CMD_UDP;
   8195 
   8196 		cmdc |= NQTXC_CMD_IP6;
   8197 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8198 	}
   8199 
   8200 	/*
   8201 	 * We don't have to write context descriptor for every packet to
   8202 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8203 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8204 	 * controllers.
   8205 	 * It would be overhead to write context descriptor for every packet,
   8206 	 * however it does not cause problems.
   8207 	 */
   8208 	/* Fill in the context descriptor. */
   8209 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8210 	    htole32(vl_len);
   8211 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8212 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8213 	    htole32(cmdc);
   8214 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8215 	    htole32(mssidx);
   8216 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8217 	DPRINTF(WM_DEBUG_TX,
   8218 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8219 		txq->txq_next, 0, vl_len));
   8220 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8221 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8222 	txs->txs_ndesc++;
   8223 }
   8224 
   8225 /*
   8226  * wm_nq_start:		[ifnet interface function]
   8227  *
   8228  *	Start packet transmission on the interface for NEWQUEUE devices
   8229  */
   8230 static void
   8231 wm_nq_start(struct ifnet *ifp)
   8232 {
   8233 	struct wm_softc *sc = ifp->if_softc;
   8234 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8235 
   8236 #ifdef WM_MPSAFE
   8237 	KASSERT(if_is_mpsafe(ifp));
   8238 #endif
   8239 	/*
   8240 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8241 	 */
   8242 
   8243 	mutex_enter(txq->txq_lock);
   8244 	if (!txq->txq_stopping)
   8245 		wm_nq_start_locked(ifp);
   8246 	mutex_exit(txq->txq_lock);
   8247 }
   8248 
   8249 static void
   8250 wm_nq_start_locked(struct ifnet *ifp)
   8251 {
   8252 	struct wm_softc *sc = ifp->if_softc;
   8253 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8254 
   8255 	wm_nq_send_common_locked(ifp, txq, false);
   8256 }
   8257 
   8258 static int
   8259 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8260 {
   8261 	int qid;
   8262 	struct wm_softc *sc = ifp->if_softc;
   8263 	struct wm_txqueue *txq;
   8264 
   8265 	qid = wm_select_txqueue(ifp, m);
   8266 	txq = &sc->sc_queue[qid].wmq_txq;
   8267 
   8268 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8269 		m_freem(m);
   8270 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8271 		return ENOBUFS;
   8272 	}
   8273 
   8274 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8275 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8276 	if (m->m_flags & M_MCAST)
   8277 		if_statinc_ref(nsr, if_omcasts);
   8278 	IF_STAT_PUTREF(ifp);
   8279 
   8280 	/*
   8281 	 * The situations which this mutex_tryenter() fails at running time
   8282 	 * are below two patterns.
   8283 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8284 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8285 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8286 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8287 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8288 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8289 	 * stuck, either.
   8290 	 */
   8291 	if (mutex_tryenter(txq->txq_lock)) {
   8292 		if (!txq->txq_stopping)
   8293 			wm_nq_transmit_locked(ifp, txq);
   8294 		mutex_exit(txq->txq_lock);
   8295 	}
   8296 
   8297 	return 0;
   8298 }
   8299 
   8300 static void
   8301 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8302 {
   8303 
   8304 	wm_nq_send_common_locked(ifp, txq, true);
   8305 }
   8306 
   8307 static void
   8308 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8309     bool is_transmit)
   8310 {
   8311 	struct wm_softc *sc = ifp->if_softc;
   8312 	struct mbuf *m0;
   8313 	struct wm_txsoft *txs;
   8314 	bus_dmamap_t dmamap;
   8315 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8316 	bool do_csum, sent;
   8317 	bool remap = true;
   8318 
   8319 	KASSERT(mutex_owned(txq->txq_lock));
   8320 
   8321 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8322 		return;
   8323 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8324 		return;
   8325 
   8326 	sent = false;
   8327 
   8328 	/*
   8329 	 * Loop through the send queue, setting up transmit descriptors
   8330 	 * until we drain the queue, or use up all available transmit
   8331 	 * descriptors.
   8332 	 */
   8333 	for (;;) {
   8334 		m0 = NULL;
   8335 
   8336 		/* Get a work queue entry. */
   8337 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8338 			wm_txeof(txq, UINT_MAX);
   8339 			if (txq->txq_sfree == 0) {
   8340 				DPRINTF(WM_DEBUG_TX,
   8341 				    ("%s: TX: no free job descriptors\n",
   8342 					device_xname(sc->sc_dev)));
   8343 				WM_Q_EVCNT_INCR(txq, txsstall);
   8344 				break;
   8345 			}
   8346 		}
   8347 
   8348 		/* Grab a packet off the queue. */
   8349 		if (is_transmit)
   8350 			m0 = pcq_get(txq->txq_interq);
   8351 		else
   8352 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8353 		if (m0 == NULL)
   8354 			break;
   8355 
   8356 		DPRINTF(WM_DEBUG_TX,
   8357 		    ("%s: TX: have packet to transmit: %p\n",
   8358 		    device_xname(sc->sc_dev), m0));
   8359 
   8360 		txs = &txq->txq_soft[txq->txq_snext];
   8361 		dmamap = txs->txs_dmamap;
   8362 
   8363 		/*
   8364 		 * Load the DMA map.  If this fails, the packet either
   8365 		 * didn't fit in the allotted number of segments, or we
   8366 		 * were short on resources.  For the too-many-segments
   8367 		 * case, we simply report an error and drop the packet,
   8368 		 * since we can't sanely copy a jumbo packet to a single
   8369 		 * buffer.
   8370 		 */
   8371 retry:
   8372 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8373 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8374 		if (__predict_false(error)) {
   8375 			if (error == EFBIG) {
   8376 				if (remap == true) {
   8377 					struct mbuf *m;
   8378 
   8379 					remap = false;
   8380 					m = m_defrag(m0, M_NOWAIT);
   8381 					if (m != NULL) {
   8382 						WM_Q_EVCNT_INCR(txq, defrag);
   8383 						m0 = m;
   8384 						goto retry;
   8385 					}
   8386 				}
   8387 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8388 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8389 				    "DMA segments, dropping...\n",
   8390 				    device_xname(sc->sc_dev));
   8391 				wm_dump_mbuf_chain(sc, m0);
   8392 				m_freem(m0);
   8393 				continue;
   8394 			}
   8395 			/* Short on resources, just stop for now. */
   8396 			DPRINTF(WM_DEBUG_TX,
   8397 			    ("%s: TX: dmamap load failed: %d\n",
   8398 				device_xname(sc->sc_dev), error));
   8399 			break;
   8400 		}
   8401 
   8402 		segs_needed = dmamap->dm_nsegs;
   8403 
   8404 		/*
   8405 		 * Ensure we have enough descriptors free to describe
   8406 		 * the packet. Note, we always reserve one descriptor
   8407 		 * at the end of the ring due to the semantics of the
   8408 		 * TDT register, plus one more in the event we need
   8409 		 * to load offload context.
   8410 		 */
   8411 		if (segs_needed > txq->txq_free - 2) {
   8412 			/*
   8413 			 * Not enough free descriptors to transmit this
   8414 			 * packet.  We haven't committed anything yet,
   8415 			 * so just unload the DMA map, put the packet
   8416 			 * pack on the queue, and punt. Notify the upper
   8417 			 * layer that there are no more slots left.
   8418 			 */
   8419 			DPRINTF(WM_DEBUG_TX,
   8420 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8421 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8422 				segs_needed, txq->txq_free - 1));
   8423 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8424 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8425 			WM_Q_EVCNT_INCR(txq, txdstall);
   8426 			break;
   8427 		}
   8428 
   8429 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8430 
   8431 		DPRINTF(WM_DEBUG_TX,
   8432 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8433 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8434 
   8435 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8436 
   8437 		/*
   8438 		 * Store a pointer to the packet so that we can free it
   8439 		 * later.
   8440 		 *
   8441 		 * Initially, we consider the number of descriptors the
   8442 		 * packet uses the number of DMA segments.  This may be
   8443 		 * incremented by 1 if we do checksum offload (a descriptor
   8444 		 * is used to set the checksum context).
   8445 		 */
   8446 		txs->txs_mbuf = m0;
   8447 		txs->txs_firstdesc = txq->txq_next;
   8448 		txs->txs_ndesc = segs_needed;
   8449 
   8450 		/* Set up offload parameters for this packet. */
   8451 		uint32_t cmdlen, fields, dcmdlen;
   8452 		if (m0->m_pkthdr.csum_flags &
   8453 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8454 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8455 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8456 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8457 			    &do_csum);
   8458 		} else {
   8459 			do_csum = false;
   8460 			cmdlen = 0;
   8461 			fields = 0;
   8462 		}
   8463 
   8464 		/* Sync the DMA map. */
   8465 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8466 		    BUS_DMASYNC_PREWRITE);
   8467 
   8468 		/* Initialize the first transmit descriptor. */
   8469 		nexttx = txq->txq_next;
   8470 		if (!do_csum) {
   8471 			/* Setup a legacy descriptor */
   8472 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8473 			    dmamap->dm_segs[0].ds_addr);
   8474 			txq->txq_descs[nexttx].wtx_cmdlen =
   8475 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8476 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8477 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8478 			if (vlan_has_tag(m0)) {
   8479 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8480 				    htole32(WTX_CMD_VLE);
   8481 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8482 				    htole16(vlan_get_tag(m0));
   8483 			} else
   8484 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8485 
   8486 			dcmdlen = 0;
   8487 		} else {
   8488 			/* Setup an advanced data descriptor */
   8489 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8490 			    htole64(dmamap->dm_segs[0].ds_addr);
   8491 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8492 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8493 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   8494 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8495 			    htole32(fields);
   8496 			DPRINTF(WM_DEBUG_TX,
   8497 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8498 				device_xname(sc->sc_dev), nexttx,
   8499 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8500 			DPRINTF(WM_DEBUG_TX,
   8501 			    ("\t 0x%08x%08x\n", fields,
   8502 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8503 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8504 		}
   8505 
   8506 		lasttx = nexttx;
   8507 		nexttx = WM_NEXTTX(txq, nexttx);
   8508 		/*
   8509 		 * Fill in the next descriptors. legacy or advanced format
   8510 		 * is the same here
   8511 		 */
   8512 		for (seg = 1; seg < dmamap->dm_nsegs;
   8513 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8514 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8515 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8516 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8517 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8518 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8519 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8520 			lasttx = nexttx;
   8521 
   8522 			DPRINTF(WM_DEBUG_TX,
   8523 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8524 				device_xname(sc->sc_dev), nexttx,
   8525 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8526 				dmamap->dm_segs[seg].ds_len));
   8527 		}
   8528 
   8529 		KASSERT(lasttx != -1);
   8530 
   8531 		/*
   8532 		 * Set up the command byte on the last descriptor of
   8533 		 * the packet. If we're in the interrupt delay window,
   8534 		 * delay the interrupt.
   8535 		 */
   8536 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8537 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8538 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8539 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8540 
   8541 		txs->txs_lastdesc = lasttx;
   8542 
   8543 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8544 		    device_xname(sc->sc_dev),
   8545 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8546 
   8547 		/* Sync the descriptors we're using. */
   8548 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8549 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8550 
   8551 		/* Give the packet to the chip. */
   8552 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8553 		sent = true;
   8554 
   8555 		DPRINTF(WM_DEBUG_TX,
   8556 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8557 
   8558 		DPRINTF(WM_DEBUG_TX,
   8559 		    ("%s: TX: finished transmitting packet, job %d\n",
   8560 			device_xname(sc->sc_dev), txq->txq_snext));
   8561 
   8562 		/* Advance the tx pointer. */
   8563 		txq->txq_free -= txs->txs_ndesc;
   8564 		txq->txq_next = nexttx;
   8565 
   8566 		txq->txq_sfree--;
   8567 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8568 
   8569 		/* Pass the packet to any BPF listeners. */
   8570 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8571 	}
   8572 
   8573 	if (m0 != NULL) {
   8574 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8575 		WM_Q_EVCNT_INCR(txq, descdrop);
   8576 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8577 			__func__));
   8578 		m_freem(m0);
   8579 	}
   8580 
   8581 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8582 		/* No more slots; notify upper layer. */
   8583 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8584 	}
   8585 
   8586 	if (sent) {
   8587 		/* Set a watchdog timer in case the chip flakes out. */
   8588 		txq->txq_lastsent = time_uptime;
   8589 		txq->txq_sending = true;
   8590 	}
   8591 }
   8592 
   8593 static void
   8594 wm_deferred_start_locked(struct wm_txqueue *txq)
   8595 {
   8596 	struct wm_softc *sc = txq->txq_sc;
   8597 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8598 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8599 	int qid = wmq->wmq_id;
   8600 
   8601 	KASSERT(mutex_owned(txq->txq_lock));
   8602 
   8603 	if (txq->txq_stopping) {
   8604 		mutex_exit(txq->txq_lock);
   8605 		return;
   8606 	}
   8607 
   8608 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8609 		/* XXX need for ALTQ or one CPU system */
   8610 		if (qid == 0)
   8611 			wm_nq_start_locked(ifp);
   8612 		wm_nq_transmit_locked(ifp, txq);
   8613 	} else {
   8614 		/* XXX need for ALTQ or one CPU system */
   8615 		if (qid == 0)
   8616 			wm_start_locked(ifp);
   8617 		wm_transmit_locked(ifp, txq);
   8618 	}
   8619 }
   8620 
   8621 /* Interrupt */
   8622 
   8623 /*
   8624  * wm_txeof:
   8625  *
   8626  *	Helper; handle transmit interrupts.
   8627  */
   8628 static bool
   8629 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8630 {
   8631 	struct wm_softc *sc = txq->txq_sc;
   8632 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8633 	struct wm_txsoft *txs;
   8634 	int count = 0;
   8635 	int i;
   8636 	uint8_t status;
   8637 	bool more = false;
   8638 
   8639 	KASSERT(mutex_owned(txq->txq_lock));
   8640 
   8641 	if (txq->txq_stopping)
   8642 		return false;
   8643 
   8644 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8645 
   8646 	/*
   8647 	 * Go through the Tx list and free mbufs for those
   8648 	 * frames which have been transmitted.
   8649 	 */
   8650 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8651 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8652 		if (limit-- == 0) {
   8653 			more = true;
   8654 			DPRINTF(WM_DEBUG_TX,
   8655 			    ("%s: TX: loop limited, job %d is not processed\n",
   8656 				device_xname(sc->sc_dev), i));
   8657 			break;
   8658 		}
   8659 
   8660 		txs = &txq->txq_soft[i];
   8661 
   8662 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8663 			device_xname(sc->sc_dev), i));
   8664 
   8665 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8666 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8667 
   8668 		status =
   8669 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8670 		if ((status & WTX_ST_DD) == 0) {
   8671 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8672 			    BUS_DMASYNC_PREREAD);
   8673 			break;
   8674 		}
   8675 
   8676 		count++;
   8677 		DPRINTF(WM_DEBUG_TX,
   8678 		    ("%s: TX: job %d done: descs %d..%d\n",
   8679 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8680 		    txs->txs_lastdesc));
   8681 
   8682 		/*
   8683 		 * XXX We should probably be using the statistics
   8684 		 * XXX registers, but I don't know if they exist
   8685 		 * XXX on chips before the i82544.
   8686 		 */
   8687 
   8688 #ifdef WM_EVENT_COUNTERS
   8689 		if (status & WTX_ST_TU)
   8690 			WM_Q_EVCNT_INCR(txq, underrun);
   8691 #endif /* WM_EVENT_COUNTERS */
   8692 
   8693 		/*
   8694 		 * 82574 and newer's document says the status field has neither
   8695 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8696 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8697 		 * Developer's Manual", 82574 datasheet and newer.
   8698 		 *
   8699 		 * XXX I saw the LC bit was set on I218 even though the media
   8700 		 * was full duplex, so the bit might be used for other
   8701 		 * meaning ...(I have no document).
   8702 		 */
   8703 
   8704 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8705 		    && ((sc->sc_type < WM_T_82574)
   8706 			|| (sc->sc_type == WM_T_80003))) {
   8707 			if_statinc(ifp, if_oerrors);
   8708 			if (status & WTX_ST_LC)
   8709 				log(LOG_WARNING, "%s: late collision\n",
   8710 				    device_xname(sc->sc_dev));
   8711 			else if (status & WTX_ST_EC) {
   8712 				if_statadd(ifp, if_collisions,
   8713 				    TX_COLLISION_THRESHOLD + 1);
   8714 				log(LOG_WARNING, "%s: excessive collisions\n",
   8715 				    device_xname(sc->sc_dev));
   8716 			}
   8717 		} else
   8718 			if_statinc(ifp, if_opackets);
   8719 
   8720 		txq->txq_packets++;
   8721 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8722 
   8723 		txq->txq_free += txs->txs_ndesc;
   8724 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8725 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8726 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8727 		m_freem(txs->txs_mbuf);
   8728 		txs->txs_mbuf = NULL;
   8729 	}
   8730 
   8731 	/* Update the dirty transmit buffer pointer. */
   8732 	txq->txq_sdirty = i;
   8733 	DPRINTF(WM_DEBUG_TX,
   8734 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8735 
   8736 	if (count != 0)
   8737 		rnd_add_uint32(&sc->rnd_source, count);
   8738 
   8739 	/*
   8740 	 * If there are no more pending transmissions, cancel the watchdog
   8741 	 * timer.
   8742 	 */
   8743 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8744 		txq->txq_sending = false;
   8745 
   8746 	return more;
   8747 }
   8748 
   8749 static inline uint32_t
   8750 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8751 {
   8752 	struct wm_softc *sc = rxq->rxq_sc;
   8753 
   8754 	if (sc->sc_type == WM_T_82574)
   8755 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8756 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8757 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8758 	else
   8759 		return rxq->rxq_descs[idx].wrx_status;
   8760 }
   8761 
   8762 static inline uint32_t
   8763 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8764 {
   8765 	struct wm_softc *sc = rxq->rxq_sc;
   8766 
   8767 	if (sc->sc_type == WM_T_82574)
   8768 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8769 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8770 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8771 	else
   8772 		return rxq->rxq_descs[idx].wrx_errors;
   8773 }
   8774 
   8775 static inline uint16_t
   8776 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8777 {
   8778 	struct wm_softc *sc = rxq->rxq_sc;
   8779 
   8780 	if (sc->sc_type == WM_T_82574)
   8781 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8782 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8783 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8784 	else
   8785 		return rxq->rxq_descs[idx].wrx_special;
   8786 }
   8787 
   8788 static inline int
   8789 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8790 {
   8791 	struct wm_softc *sc = rxq->rxq_sc;
   8792 
   8793 	if (sc->sc_type == WM_T_82574)
   8794 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8795 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8796 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8797 	else
   8798 		return rxq->rxq_descs[idx].wrx_len;
   8799 }
   8800 
   8801 #ifdef WM_DEBUG
   8802 static inline uint32_t
   8803 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8804 {
   8805 	struct wm_softc *sc = rxq->rxq_sc;
   8806 
   8807 	if (sc->sc_type == WM_T_82574)
   8808 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8809 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8810 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8811 	else
   8812 		return 0;
   8813 }
   8814 
   8815 static inline uint8_t
   8816 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8817 {
   8818 	struct wm_softc *sc = rxq->rxq_sc;
   8819 
   8820 	if (sc->sc_type == WM_T_82574)
   8821 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8822 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8823 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8824 	else
   8825 		return 0;
   8826 }
   8827 #endif /* WM_DEBUG */
   8828 
   8829 static inline bool
   8830 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8831     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8832 {
   8833 
   8834 	if (sc->sc_type == WM_T_82574)
   8835 		return (status & ext_bit) != 0;
   8836 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8837 		return (status & nq_bit) != 0;
   8838 	else
   8839 		return (status & legacy_bit) != 0;
   8840 }
   8841 
   8842 static inline bool
   8843 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8844     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8845 {
   8846 
   8847 	if (sc->sc_type == WM_T_82574)
   8848 		return (error & ext_bit) != 0;
   8849 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8850 		return (error & nq_bit) != 0;
   8851 	else
   8852 		return (error & legacy_bit) != 0;
   8853 }
   8854 
   8855 static inline bool
   8856 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8857 {
   8858 
   8859 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8860 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8861 		return true;
   8862 	else
   8863 		return false;
   8864 }
   8865 
   8866 static inline bool
   8867 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8868 {
   8869 	struct wm_softc *sc = rxq->rxq_sc;
   8870 
   8871 	/* XXX missing error bit for newqueue? */
   8872 	if (wm_rxdesc_is_set_error(sc, errors,
   8873 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8874 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8875 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8876 		NQRXC_ERROR_RXE)) {
   8877 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8878 		    EXTRXC_ERROR_SE, 0))
   8879 			log(LOG_WARNING, "%s: symbol error\n",
   8880 			    device_xname(sc->sc_dev));
   8881 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8882 		    EXTRXC_ERROR_SEQ, 0))
   8883 			log(LOG_WARNING, "%s: receive sequence error\n",
   8884 			    device_xname(sc->sc_dev));
   8885 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8886 		    EXTRXC_ERROR_CE, 0))
   8887 			log(LOG_WARNING, "%s: CRC error\n",
   8888 			    device_xname(sc->sc_dev));
   8889 		return true;
   8890 	}
   8891 
   8892 	return false;
   8893 }
   8894 
   8895 static inline bool
   8896 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8897 {
   8898 	struct wm_softc *sc = rxq->rxq_sc;
   8899 
   8900 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8901 		NQRXC_STATUS_DD)) {
   8902 		/* We have processed all of the receive descriptors. */
   8903 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8904 		return false;
   8905 	}
   8906 
   8907 	return true;
   8908 }
   8909 
   8910 static inline bool
   8911 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8912     uint16_t vlantag, struct mbuf *m)
   8913 {
   8914 
   8915 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8916 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8917 		vlan_set_tag(m, le16toh(vlantag));
   8918 	}
   8919 
   8920 	return true;
   8921 }
   8922 
   8923 static inline void
   8924 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8925     uint32_t errors, struct mbuf *m)
   8926 {
   8927 	struct wm_softc *sc = rxq->rxq_sc;
   8928 
   8929 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8930 		if (wm_rxdesc_is_set_status(sc, status,
   8931 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8932 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8933 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8934 			if (wm_rxdesc_is_set_error(sc, errors,
   8935 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8936 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8937 		}
   8938 		if (wm_rxdesc_is_set_status(sc, status,
   8939 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8940 			/*
   8941 			 * Note: we don't know if this was TCP or UDP,
   8942 			 * so we just set both bits, and expect the
   8943 			 * upper layers to deal.
   8944 			 */
   8945 			WM_Q_EVCNT_INCR(rxq, tusum);
   8946 			m->m_pkthdr.csum_flags |=
   8947 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8948 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8949 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8950 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8951 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8952 		}
   8953 	}
   8954 }
   8955 
   8956 /*
   8957  * wm_rxeof:
   8958  *
   8959  *	Helper; handle receive interrupts.
   8960  */
   8961 static bool
   8962 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8963 {
   8964 	struct wm_softc *sc = rxq->rxq_sc;
   8965 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8966 	struct wm_rxsoft *rxs;
   8967 	struct mbuf *m;
   8968 	int i, len;
   8969 	int count = 0;
   8970 	uint32_t status, errors;
   8971 	uint16_t vlantag;
   8972 	bool more = false;
   8973 
   8974 	KASSERT(mutex_owned(rxq->rxq_lock));
   8975 
   8976 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8977 		if (limit-- == 0) {
   8978 			rxq->rxq_ptr = i;
   8979 			more = true;
   8980 			DPRINTF(WM_DEBUG_RX,
   8981 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8982 				device_xname(sc->sc_dev), i));
   8983 			break;
   8984 		}
   8985 
   8986 		rxs = &rxq->rxq_soft[i];
   8987 
   8988 		DPRINTF(WM_DEBUG_RX,
   8989 		    ("%s: RX: checking descriptor %d\n",
   8990 			device_xname(sc->sc_dev), i));
   8991 		wm_cdrxsync(rxq, i,
   8992 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8993 
   8994 		status = wm_rxdesc_get_status(rxq, i);
   8995 		errors = wm_rxdesc_get_errors(rxq, i);
   8996 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8997 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8998 #ifdef WM_DEBUG
   8999 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   9000 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   9001 #endif
   9002 
   9003 		if (!wm_rxdesc_dd(rxq, i, status)) {
   9004 			/*
   9005 			 * Update the receive pointer holding rxq_lock
   9006 			 * consistent with increment counter.
   9007 			 */
   9008 			rxq->rxq_ptr = i;
   9009 			break;
   9010 		}
   9011 
   9012 		count++;
   9013 		if (__predict_false(rxq->rxq_discard)) {
   9014 			DPRINTF(WM_DEBUG_RX,
   9015 			    ("%s: RX: discarding contents of descriptor %d\n",
   9016 				device_xname(sc->sc_dev), i));
   9017 			wm_init_rxdesc(rxq, i);
   9018 			if (wm_rxdesc_is_eop(rxq, status)) {
   9019 				/* Reset our state. */
   9020 				DPRINTF(WM_DEBUG_RX,
   9021 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9022 					device_xname(sc->sc_dev)));
   9023 				rxq->rxq_discard = 0;
   9024 			}
   9025 			continue;
   9026 		}
   9027 
   9028 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9029 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9030 
   9031 		m = rxs->rxs_mbuf;
   9032 
   9033 		/*
   9034 		 * Add a new receive buffer to the ring, unless of
   9035 		 * course the length is zero. Treat the latter as a
   9036 		 * failed mapping.
   9037 		 */
   9038 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9039 			/*
   9040 			 * Failed, throw away what we've done so
   9041 			 * far, and discard the rest of the packet.
   9042 			 */
   9043 			if_statinc(ifp, if_ierrors);
   9044 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9045 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9046 			wm_init_rxdesc(rxq, i);
   9047 			if (!wm_rxdesc_is_eop(rxq, status))
   9048 				rxq->rxq_discard = 1;
   9049 			if (rxq->rxq_head != NULL)
   9050 				m_freem(rxq->rxq_head);
   9051 			WM_RXCHAIN_RESET(rxq);
   9052 			DPRINTF(WM_DEBUG_RX,
   9053 			    ("%s: RX: Rx buffer allocation failed, "
   9054 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9055 				rxq->rxq_discard ? " (discard)" : ""));
   9056 			continue;
   9057 		}
   9058 
   9059 		m->m_len = len;
   9060 		rxq->rxq_len += len;
   9061 		DPRINTF(WM_DEBUG_RX,
   9062 		    ("%s: RX: buffer at %p len %d\n",
   9063 			device_xname(sc->sc_dev), m->m_data, len));
   9064 
   9065 		/* If this is not the end of the packet, keep looking. */
   9066 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9067 			WM_RXCHAIN_LINK(rxq, m);
   9068 			DPRINTF(WM_DEBUG_RX,
   9069 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9070 				device_xname(sc->sc_dev), rxq->rxq_len));
   9071 			continue;
   9072 		}
   9073 
   9074 		/*
   9075 		 * Okay, we have the entire packet now. The chip is
   9076 		 * configured to include the FCS except I35[05], I21[01].
   9077 		 * (not all chips can be configured to strip it), so we need
   9078 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   9079 		 * in RCTL register is always set, so we don't trim it.
   9080 		 * PCH2 and newer chip also not include FCS when jumbo
   9081 		 * frame is used to do workaround an errata.
   9082 		 * May need to adjust length of previous mbuf in the
   9083 		 * chain if the current mbuf is too short.
   9084 		 */
   9085 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   9086 			if (m->m_len < ETHER_CRC_LEN) {
   9087 				rxq->rxq_tail->m_len
   9088 				    -= (ETHER_CRC_LEN - m->m_len);
   9089 				m->m_len = 0;
   9090 			} else
   9091 				m->m_len -= ETHER_CRC_LEN;
   9092 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9093 		} else
   9094 			len = rxq->rxq_len;
   9095 
   9096 		WM_RXCHAIN_LINK(rxq, m);
   9097 
   9098 		*rxq->rxq_tailp = NULL;
   9099 		m = rxq->rxq_head;
   9100 
   9101 		WM_RXCHAIN_RESET(rxq);
   9102 
   9103 		DPRINTF(WM_DEBUG_RX,
   9104 		    ("%s: RX: have entire packet, len -> %d\n",
   9105 			device_xname(sc->sc_dev), len));
   9106 
   9107 		/* If an error occurred, update stats and drop the packet. */
   9108 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9109 			m_freem(m);
   9110 			continue;
   9111 		}
   9112 
   9113 		/* No errors.  Receive the packet. */
   9114 		m_set_rcvif(m, ifp);
   9115 		m->m_pkthdr.len = len;
   9116 		/*
   9117 		 * TODO
   9118 		 * should be save rsshash and rsstype to this mbuf.
   9119 		 */
   9120 		DPRINTF(WM_DEBUG_RX,
   9121 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9122 			device_xname(sc->sc_dev), rsstype, rsshash));
   9123 
   9124 		/*
   9125 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9126 		 * for us.  Associate the tag with the packet.
   9127 		 */
   9128 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9129 			continue;
   9130 
   9131 		/* Set up checksum info for this packet. */
   9132 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9133 		/*
   9134 		 * Update the receive pointer holding rxq_lock consistent with
   9135 		 * increment counter.
   9136 		 */
   9137 		rxq->rxq_ptr = i;
   9138 		rxq->rxq_packets++;
   9139 		rxq->rxq_bytes += len;
   9140 		mutex_exit(rxq->rxq_lock);
   9141 
   9142 		/* Pass it on. */
   9143 		if_percpuq_enqueue(sc->sc_ipq, m);
   9144 
   9145 		mutex_enter(rxq->rxq_lock);
   9146 
   9147 		if (rxq->rxq_stopping)
   9148 			break;
   9149 	}
   9150 
   9151 	if (count != 0)
   9152 		rnd_add_uint32(&sc->rnd_source, count);
   9153 
   9154 	DPRINTF(WM_DEBUG_RX,
   9155 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9156 
   9157 	return more;
   9158 }
   9159 
   9160 /*
   9161  * wm_linkintr_gmii:
   9162  *
   9163  *	Helper; handle link interrupts for GMII.
   9164  */
   9165 static void
   9166 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9167 {
   9168 	device_t dev = sc->sc_dev;
   9169 	uint32_t status, reg;
   9170 	bool link;
   9171 	int rv;
   9172 
   9173 	KASSERT(WM_CORE_LOCKED(sc));
   9174 
   9175 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9176 		__func__));
   9177 
   9178 	if ((icr & ICR_LSC) == 0) {
   9179 		if (icr & ICR_RXSEQ)
   9180 			DPRINTF(WM_DEBUG_LINK,
   9181 			    ("%s: LINK Receive sequence error\n",
   9182 				device_xname(dev)));
   9183 		return;
   9184 	}
   9185 
   9186 	/* Link status changed */
   9187 	status = CSR_READ(sc, WMREG_STATUS);
   9188 	link = status & STATUS_LU;
   9189 	if (link) {
   9190 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9191 			device_xname(dev),
   9192 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9193 	} else {
   9194 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9195 			device_xname(dev)));
   9196 	}
   9197 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9198 		wm_gig_downshift_workaround_ich8lan(sc);
   9199 
   9200 	if ((sc->sc_type == WM_T_ICH8)
   9201 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9202 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9203 	}
   9204 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9205 		device_xname(dev)));
   9206 	mii_pollstat(&sc->sc_mii);
   9207 	if (sc->sc_type == WM_T_82543) {
   9208 		int miistatus, active;
   9209 
   9210 		/*
   9211 		 * With 82543, we need to force speed and
   9212 		 * duplex on the MAC equal to what the PHY
   9213 		 * speed and duplex configuration is.
   9214 		 */
   9215 		miistatus = sc->sc_mii.mii_media_status;
   9216 
   9217 		if (miistatus & IFM_ACTIVE) {
   9218 			active = sc->sc_mii.mii_media_active;
   9219 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9220 			switch (IFM_SUBTYPE(active)) {
   9221 			case IFM_10_T:
   9222 				sc->sc_ctrl |= CTRL_SPEED_10;
   9223 				break;
   9224 			case IFM_100_TX:
   9225 				sc->sc_ctrl |= CTRL_SPEED_100;
   9226 				break;
   9227 			case IFM_1000_T:
   9228 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9229 				break;
   9230 			default:
   9231 				/*
   9232 				 * Fiber?
   9233 				 * Shoud not enter here.
   9234 				 */
   9235 				device_printf(dev, "unknown media (%x)\n",
   9236 				    active);
   9237 				break;
   9238 			}
   9239 			if (active & IFM_FDX)
   9240 				sc->sc_ctrl |= CTRL_FD;
   9241 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9242 		}
   9243 	} else if (sc->sc_type == WM_T_PCH) {
   9244 		wm_k1_gig_workaround_hv(sc,
   9245 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9246 	}
   9247 
   9248 	/*
   9249 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9250 	 * aggressive resulting in many collisions. To avoid this, increase
   9251 	 * the IPG and reduce Rx latency in the PHY.
   9252 	 */
   9253 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9254 	    && link) {
   9255 		uint32_t tipg_reg;
   9256 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9257 		bool fdx;
   9258 		uint16_t emi_addr, emi_val;
   9259 
   9260 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9261 		tipg_reg &= ~TIPG_IPGT_MASK;
   9262 		fdx = status & STATUS_FD;
   9263 
   9264 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9265 			tipg_reg |= 0xff;
   9266 			/* Reduce Rx latency in analog PHY */
   9267 			emi_val = 0;
   9268 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9269 		    fdx && speed != STATUS_SPEED_1000) {
   9270 			tipg_reg |= 0xc;
   9271 			emi_val = 1;
   9272 		} else {
   9273 			/* Roll back the default values */
   9274 			tipg_reg |= 0x08;
   9275 			emi_val = 1;
   9276 		}
   9277 
   9278 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9279 
   9280 		rv = sc->phy.acquire(sc);
   9281 		if (rv)
   9282 			return;
   9283 
   9284 		if (sc->sc_type == WM_T_PCH2)
   9285 			emi_addr = I82579_RX_CONFIG;
   9286 		else
   9287 			emi_addr = I217_RX_CONFIG;
   9288 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9289 
   9290 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9291 			uint16_t phy_reg;
   9292 
   9293 			sc->phy.readreg_locked(dev, 2,
   9294 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9295 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9296 			if (speed == STATUS_SPEED_100
   9297 			    || speed == STATUS_SPEED_10)
   9298 				phy_reg |= 0x3e8;
   9299 			else
   9300 				phy_reg |= 0xfa;
   9301 			sc->phy.writereg_locked(dev, 2,
   9302 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9303 
   9304 			if (speed == STATUS_SPEED_1000) {
   9305 				sc->phy.readreg_locked(dev, 2,
   9306 				    HV_PM_CTRL, &phy_reg);
   9307 
   9308 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9309 
   9310 				sc->phy.writereg_locked(dev, 2,
   9311 				    HV_PM_CTRL, phy_reg);
   9312 			}
   9313 		}
   9314 		sc->phy.release(sc);
   9315 
   9316 		if (rv)
   9317 			return;
   9318 
   9319 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9320 			uint16_t data, ptr_gap;
   9321 
   9322 			if (speed == STATUS_SPEED_1000) {
   9323 				rv = sc->phy.acquire(sc);
   9324 				if (rv)
   9325 					return;
   9326 
   9327 				rv = sc->phy.readreg_locked(dev, 2,
   9328 				    I82579_UNKNOWN1, &data);
   9329 				if (rv) {
   9330 					sc->phy.release(sc);
   9331 					return;
   9332 				}
   9333 
   9334 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9335 				if (ptr_gap < 0x18) {
   9336 					data &= ~(0x3ff << 2);
   9337 					data |= (0x18 << 2);
   9338 					rv = sc->phy.writereg_locked(dev,
   9339 					    2, I82579_UNKNOWN1, data);
   9340 				}
   9341 				sc->phy.release(sc);
   9342 				if (rv)
   9343 					return;
   9344 			} else {
   9345 				rv = sc->phy.acquire(sc);
   9346 				if (rv)
   9347 					return;
   9348 
   9349 				rv = sc->phy.writereg_locked(dev, 2,
   9350 				    I82579_UNKNOWN1, 0xc023);
   9351 				sc->phy.release(sc);
   9352 				if (rv)
   9353 					return;
   9354 
   9355 			}
   9356 		}
   9357 	}
   9358 
   9359 	/*
   9360 	 * I217 Packet Loss issue:
   9361 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9362 	 * on power up.
   9363 	 * Set the Beacon Duration for I217 to 8 usec
   9364 	 */
   9365 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9366 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9367 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9368 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9369 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9370 	}
   9371 
   9372 	/* Work-around I218 hang issue */
   9373 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9374 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9375 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9376 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9377 		wm_k1_workaround_lpt_lp(sc, link);
   9378 
   9379 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9380 		/*
   9381 		 * Set platform power management values for Latency
   9382 		 * Tolerance Reporting (LTR)
   9383 		 */
   9384 		wm_platform_pm_pch_lpt(sc,
   9385 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9386 	}
   9387 
   9388 	/* Clear link partner's EEE ability */
   9389 	sc->eee_lp_ability = 0;
   9390 
   9391 	/* FEXTNVM6 K1-off workaround */
   9392 	if (sc->sc_type == WM_T_PCH_SPT) {
   9393 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9394 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9395 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9396 		else
   9397 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9398 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9399 	}
   9400 
   9401 	if (!link)
   9402 		return;
   9403 
   9404 	switch (sc->sc_type) {
   9405 	case WM_T_PCH2:
   9406 		wm_k1_workaround_lv(sc);
   9407 		/* FALLTHROUGH */
   9408 	case WM_T_PCH:
   9409 		if (sc->sc_phytype == WMPHY_82578)
   9410 			wm_link_stall_workaround_hv(sc);
   9411 		break;
   9412 	default:
   9413 		break;
   9414 	}
   9415 
   9416 	/* Enable/Disable EEE after link up */
   9417 	if (sc->sc_phytype > WMPHY_82579)
   9418 		wm_set_eee_pchlan(sc);
   9419 }
   9420 
   9421 /*
   9422  * wm_linkintr_tbi:
   9423  *
   9424  *	Helper; handle link interrupts for TBI mode.
   9425  */
   9426 static void
   9427 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9428 {
   9429 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9430 	uint32_t status;
   9431 
   9432 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9433 		__func__));
   9434 
   9435 	status = CSR_READ(sc, WMREG_STATUS);
   9436 	if (icr & ICR_LSC) {
   9437 		wm_check_for_link(sc);
   9438 		if (status & STATUS_LU) {
   9439 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9440 				device_xname(sc->sc_dev),
   9441 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9442 			/*
   9443 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9444 			 * so we should update sc->sc_ctrl
   9445 			 */
   9446 
   9447 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9448 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9449 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9450 			if (status & STATUS_FD)
   9451 				sc->sc_tctl |=
   9452 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9453 			else
   9454 				sc->sc_tctl |=
   9455 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9456 			if (sc->sc_ctrl & CTRL_TFCE)
   9457 				sc->sc_fcrtl |= FCRTL_XONE;
   9458 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9459 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9460 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9461 			sc->sc_tbi_linkup = 1;
   9462 			if_link_state_change(ifp, LINK_STATE_UP);
   9463 		} else {
   9464 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9465 				device_xname(sc->sc_dev)));
   9466 			sc->sc_tbi_linkup = 0;
   9467 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9468 		}
   9469 		/* Update LED */
   9470 		wm_tbi_serdes_set_linkled(sc);
   9471 	} else if (icr & ICR_RXSEQ)
   9472 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9473 			device_xname(sc->sc_dev)));
   9474 }
   9475 
   9476 /*
   9477  * wm_linkintr_serdes:
   9478  *
   9479  *	Helper; handle link interrupts for TBI mode.
   9480  */
   9481 static void
   9482 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9483 {
   9484 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9485 	struct mii_data *mii = &sc->sc_mii;
   9486 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9487 	uint32_t pcs_adv, pcs_lpab, reg;
   9488 
   9489 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9490 		__func__));
   9491 
   9492 	if (icr & ICR_LSC) {
   9493 		/* Check PCS */
   9494 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9495 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9496 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9497 				device_xname(sc->sc_dev)));
   9498 			mii->mii_media_status |= IFM_ACTIVE;
   9499 			sc->sc_tbi_linkup = 1;
   9500 			if_link_state_change(ifp, LINK_STATE_UP);
   9501 		} else {
   9502 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9503 				device_xname(sc->sc_dev)));
   9504 			mii->mii_media_status |= IFM_NONE;
   9505 			sc->sc_tbi_linkup = 0;
   9506 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9507 			wm_tbi_serdes_set_linkled(sc);
   9508 			return;
   9509 		}
   9510 		mii->mii_media_active |= IFM_1000_SX;
   9511 		if ((reg & PCS_LSTS_FDX) != 0)
   9512 			mii->mii_media_active |= IFM_FDX;
   9513 		else
   9514 			mii->mii_media_active |= IFM_HDX;
   9515 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9516 			/* Check flow */
   9517 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9518 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9519 				DPRINTF(WM_DEBUG_LINK,
   9520 				    ("XXX LINKOK but not ACOMP\n"));
   9521 				return;
   9522 			}
   9523 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9524 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9525 			DPRINTF(WM_DEBUG_LINK,
   9526 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9527 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9528 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9529 				mii->mii_media_active |= IFM_FLOW
   9530 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9531 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9532 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9533 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9534 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9535 				mii->mii_media_active |= IFM_FLOW
   9536 				    | IFM_ETH_TXPAUSE;
   9537 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9538 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9539 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9540 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9541 				mii->mii_media_active |= IFM_FLOW
   9542 				    | IFM_ETH_RXPAUSE;
   9543 		}
   9544 		/* Update LED */
   9545 		wm_tbi_serdes_set_linkled(sc);
   9546 	} else
   9547 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9548 		    device_xname(sc->sc_dev)));
   9549 }
   9550 
   9551 /*
   9552  * wm_linkintr:
   9553  *
   9554  *	Helper; handle link interrupts.
   9555  */
   9556 static void
   9557 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9558 {
   9559 
   9560 	KASSERT(WM_CORE_LOCKED(sc));
   9561 
   9562 	if (sc->sc_flags & WM_F_HAS_MII)
   9563 		wm_linkintr_gmii(sc, icr);
   9564 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9565 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9566 		wm_linkintr_serdes(sc, icr);
   9567 	else
   9568 		wm_linkintr_tbi(sc, icr);
   9569 }
   9570 
   9571 
   9572 static inline void
   9573 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   9574 {
   9575 
   9576 	if (wmq->wmq_txrx_use_workqueue)
   9577 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   9578 	else
   9579 		softint_schedule(wmq->wmq_si);
   9580 }
   9581 
   9582 /*
   9583  * wm_intr_legacy:
   9584  *
   9585  *	Interrupt service routine for INTx and MSI.
   9586  */
   9587 static int
   9588 wm_intr_legacy(void *arg)
   9589 {
   9590 	struct wm_softc *sc = arg;
   9591 	struct wm_queue *wmq = &sc->sc_queue[0];
   9592 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9593 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9594 	uint32_t icr, rndval = 0;
   9595 	int handled = 0;
   9596 
   9597 	while (1 /* CONSTCOND */) {
   9598 		icr = CSR_READ(sc, WMREG_ICR);
   9599 		if ((icr & sc->sc_icr) == 0)
   9600 			break;
   9601 		if (handled == 0)
   9602 			DPRINTF(WM_DEBUG_TX,
   9603 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9604 		if (rndval == 0)
   9605 			rndval = icr;
   9606 
   9607 		mutex_enter(rxq->rxq_lock);
   9608 
   9609 		if (rxq->rxq_stopping) {
   9610 			mutex_exit(rxq->rxq_lock);
   9611 			break;
   9612 		}
   9613 
   9614 		handled = 1;
   9615 
   9616 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9617 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9618 			DPRINTF(WM_DEBUG_RX,
   9619 			    ("%s: RX: got Rx intr 0x%08x\n",
   9620 				device_xname(sc->sc_dev),
   9621 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9622 			WM_Q_EVCNT_INCR(rxq, intr);
   9623 		}
   9624 #endif
   9625 		/*
   9626 		 * wm_rxeof() does *not* call upper layer functions directly,
   9627 		 * as if_percpuq_enqueue() just call softint_schedule().
   9628 		 * So, we can call wm_rxeof() in interrupt context.
   9629 		 */
   9630 		wm_rxeof(rxq, UINT_MAX);
   9631 
   9632 		mutex_exit(rxq->rxq_lock);
   9633 		mutex_enter(txq->txq_lock);
   9634 
   9635 		if (txq->txq_stopping) {
   9636 			mutex_exit(txq->txq_lock);
   9637 			break;
   9638 		}
   9639 
   9640 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9641 		if (icr & ICR_TXDW) {
   9642 			DPRINTF(WM_DEBUG_TX,
   9643 			    ("%s: TX: got TXDW interrupt\n",
   9644 				device_xname(sc->sc_dev)));
   9645 			WM_Q_EVCNT_INCR(txq, txdw);
   9646 		}
   9647 #endif
   9648 		wm_txeof(txq, UINT_MAX);
   9649 
   9650 		mutex_exit(txq->txq_lock);
   9651 		WM_CORE_LOCK(sc);
   9652 
   9653 		if (sc->sc_core_stopping) {
   9654 			WM_CORE_UNLOCK(sc);
   9655 			break;
   9656 		}
   9657 
   9658 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9659 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9660 			wm_linkintr(sc, icr);
   9661 		}
   9662 		if ((icr & ICR_GPI(0)) != 0)
   9663 			device_printf(sc->sc_dev, "got module interrupt\n");
   9664 
   9665 		WM_CORE_UNLOCK(sc);
   9666 
   9667 		if (icr & ICR_RXO) {
   9668 #if defined(WM_DEBUG)
   9669 			log(LOG_WARNING, "%s: Receive overrun\n",
   9670 			    device_xname(sc->sc_dev));
   9671 #endif /* defined(WM_DEBUG) */
   9672 		}
   9673 	}
   9674 
   9675 	rnd_add_uint32(&sc->rnd_source, rndval);
   9676 
   9677 	if (handled) {
   9678 		/* Try to get more packets going. */
   9679 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9680 		wm_sched_handle_queue(sc, wmq);
   9681 	}
   9682 
   9683 	return handled;
   9684 }
   9685 
   9686 static inline void
   9687 wm_txrxintr_disable(struct wm_queue *wmq)
   9688 {
   9689 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9690 
   9691 	if (sc->sc_type == WM_T_82574)
   9692 		CSR_WRITE(sc, WMREG_IMC,
   9693 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9694 	else if (sc->sc_type == WM_T_82575)
   9695 		CSR_WRITE(sc, WMREG_EIMC,
   9696 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9697 	else
   9698 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9699 }
   9700 
   9701 static inline void
   9702 wm_txrxintr_enable(struct wm_queue *wmq)
   9703 {
   9704 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9705 
   9706 	wm_itrs_calculate(sc, wmq);
   9707 
   9708 	/*
   9709 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9710 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9711 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9712 	 * while each wm_handle_queue(wmq) is runnig.
   9713 	 */
   9714 	if (sc->sc_type == WM_T_82574)
   9715 		CSR_WRITE(sc, WMREG_IMS,
   9716 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9717 	else if (sc->sc_type == WM_T_82575)
   9718 		CSR_WRITE(sc, WMREG_EIMS,
   9719 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9720 	else
   9721 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9722 }
   9723 
   9724 static int
   9725 wm_txrxintr_msix(void *arg)
   9726 {
   9727 	struct wm_queue *wmq = arg;
   9728 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9729 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9730 	struct wm_softc *sc = txq->txq_sc;
   9731 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9732 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9733 	bool txmore;
   9734 	bool rxmore;
   9735 
   9736 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9737 
   9738 	DPRINTF(WM_DEBUG_TX,
   9739 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9740 
   9741 	wm_txrxintr_disable(wmq);
   9742 
   9743 	mutex_enter(txq->txq_lock);
   9744 
   9745 	if (txq->txq_stopping) {
   9746 		mutex_exit(txq->txq_lock);
   9747 		return 0;
   9748 	}
   9749 
   9750 	WM_Q_EVCNT_INCR(txq, txdw);
   9751 	txmore = wm_txeof(txq, txlimit);
   9752 	/* wm_deferred start() is done in wm_handle_queue(). */
   9753 	mutex_exit(txq->txq_lock);
   9754 
   9755 	DPRINTF(WM_DEBUG_RX,
   9756 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9757 	mutex_enter(rxq->rxq_lock);
   9758 
   9759 	if (rxq->rxq_stopping) {
   9760 		mutex_exit(rxq->rxq_lock);
   9761 		return 0;
   9762 	}
   9763 
   9764 	WM_Q_EVCNT_INCR(rxq, intr);
   9765 	rxmore = wm_rxeof(rxq, rxlimit);
   9766 	mutex_exit(rxq->rxq_lock);
   9767 
   9768 	wm_itrs_writereg(sc, wmq);
   9769 
   9770 	if (txmore || rxmore) {
   9771 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9772 		wm_sched_handle_queue(sc, wmq);
   9773 	} else
   9774 		wm_txrxintr_enable(wmq);
   9775 
   9776 	return 1;
   9777 }
   9778 
   9779 static void
   9780 wm_handle_queue(void *arg)
   9781 {
   9782 	struct wm_queue *wmq = arg;
   9783 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9784 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9785 	struct wm_softc *sc = txq->txq_sc;
   9786 	u_int txlimit = sc->sc_tx_process_limit;
   9787 	u_int rxlimit = sc->sc_rx_process_limit;
   9788 	bool txmore;
   9789 	bool rxmore;
   9790 
   9791 	mutex_enter(txq->txq_lock);
   9792 	if (txq->txq_stopping) {
   9793 		mutex_exit(txq->txq_lock);
   9794 		return;
   9795 	}
   9796 	txmore = wm_txeof(txq, txlimit);
   9797 	wm_deferred_start_locked(txq);
   9798 	mutex_exit(txq->txq_lock);
   9799 
   9800 	mutex_enter(rxq->rxq_lock);
   9801 	if (rxq->rxq_stopping) {
   9802 		mutex_exit(rxq->rxq_lock);
   9803 		return;
   9804 	}
   9805 	WM_Q_EVCNT_INCR(rxq, defer);
   9806 	rxmore = wm_rxeof(rxq, rxlimit);
   9807 	mutex_exit(rxq->rxq_lock);
   9808 
   9809 	if (txmore || rxmore) {
   9810 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9811 		wm_sched_handle_queue(sc, wmq);
   9812 	} else
   9813 		wm_txrxintr_enable(wmq);
   9814 }
   9815 
   9816 static void
   9817 wm_handle_queue_work(struct work *wk, void *context)
   9818 {
   9819 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   9820 
   9821 	/*
   9822 	 * "enqueued flag" is not required here.
   9823 	 */
   9824 	wm_handle_queue(wmq);
   9825 }
   9826 
   9827 /*
   9828  * wm_linkintr_msix:
   9829  *
   9830  *	Interrupt service routine for link status change for MSI-X.
   9831  */
   9832 static int
   9833 wm_linkintr_msix(void *arg)
   9834 {
   9835 	struct wm_softc *sc = arg;
   9836 	uint32_t reg;
   9837 	bool has_rxo;
   9838 
   9839 	reg = CSR_READ(sc, WMREG_ICR);
   9840 	WM_CORE_LOCK(sc);
   9841 	DPRINTF(WM_DEBUG_LINK,
   9842 	    ("%s: LINK: got link intr. ICR = %08x\n",
   9843 		device_xname(sc->sc_dev), reg));
   9844 
   9845 	if (sc->sc_core_stopping)
   9846 		goto out;
   9847 
   9848 	if ((reg & ICR_LSC) != 0) {
   9849 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9850 		wm_linkintr(sc, ICR_LSC);
   9851 	}
   9852 	if ((reg & ICR_GPI(0)) != 0)
   9853 		device_printf(sc->sc_dev, "got module interrupt\n");
   9854 
   9855 	/*
   9856 	 * XXX 82574 MSI-X mode workaround
   9857 	 *
   9858 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9859 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9860 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9861 	 * interrupts by writing WMREG_ICS to process receive packets.
   9862 	 */
   9863 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9864 #if defined(WM_DEBUG)
   9865 		log(LOG_WARNING, "%s: Receive overrun\n",
   9866 		    device_xname(sc->sc_dev));
   9867 #endif /* defined(WM_DEBUG) */
   9868 
   9869 		has_rxo = true;
   9870 		/*
   9871 		 * The RXO interrupt is very high rate when receive traffic is
   9872 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9873 		 * interrupts. ICR_OTHER will be enabled at the end of
   9874 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9875 		 * ICR_RXQ(1) interrupts.
   9876 		 */
   9877 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9878 
   9879 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9880 	}
   9881 
   9882 
   9883 
   9884 out:
   9885 	WM_CORE_UNLOCK(sc);
   9886 
   9887 	if (sc->sc_type == WM_T_82574) {
   9888 		if (!has_rxo)
   9889 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9890 		else
   9891 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9892 	} else if (sc->sc_type == WM_T_82575)
   9893 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9894 	else
   9895 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9896 
   9897 	return 1;
   9898 }
   9899 
   9900 /*
   9901  * Media related.
   9902  * GMII, SGMII, TBI (and SERDES)
   9903  */
   9904 
   9905 /* Common */
   9906 
   9907 /*
   9908  * wm_tbi_serdes_set_linkled:
   9909  *
   9910  *	Update the link LED on TBI and SERDES devices.
   9911  */
   9912 static void
   9913 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9914 {
   9915 
   9916 	if (sc->sc_tbi_linkup)
   9917 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9918 	else
   9919 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9920 
   9921 	/* 82540 or newer devices are active low */
   9922 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9923 
   9924 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9925 }
   9926 
   9927 /* GMII related */
   9928 
   9929 /*
   9930  * wm_gmii_reset:
   9931  *
   9932  *	Reset the PHY.
   9933  */
   9934 static void
   9935 wm_gmii_reset(struct wm_softc *sc)
   9936 {
   9937 	uint32_t reg;
   9938 	int rv;
   9939 
   9940 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9941 		device_xname(sc->sc_dev), __func__));
   9942 
   9943 	rv = sc->phy.acquire(sc);
   9944 	if (rv != 0) {
   9945 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9946 		    __func__);
   9947 		return;
   9948 	}
   9949 
   9950 	switch (sc->sc_type) {
   9951 	case WM_T_82542_2_0:
   9952 	case WM_T_82542_2_1:
   9953 		/* null */
   9954 		break;
   9955 	case WM_T_82543:
   9956 		/*
   9957 		 * With 82543, we need to force speed and duplex on the MAC
   9958 		 * equal to what the PHY speed and duplex configuration is.
   9959 		 * In addition, we need to perform a hardware reset on the PHY
   9960 		 * to take it out of reset.
   9961 		 */
   9962 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9963 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9964 
   9965 		/* The PHY reset pin is active-low. */
   9966 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9967 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9968 		    CTRL_EXT_SWDPIN(4));
   9969 		reg |= CTRL_EXT_SWDPIO(4);
   9970 
   9971 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9972 		CSR_WRITE_FLUSH(sc);
   9973 		delay(10*1000);
   9974 
   9975 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9976 		CSR_WRITE_FLUSH(sc);
   9977 		delay(150);
   9978 #if 0
   9979 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9980 #endif
   9981 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9982 		break;
   9983 	case WM_T_82544:	/* Reset 10000us */
   9984 	case WM_T_82540:
   9985 	case WM_T_82545:
   9986 	case WM_T_82545_3:
   9987 	case WM_T_82546:
   9988 	case WM_T_82546_3:
   9989 	case WM_T_82541:
   9990 	case WM_T_82541_2:
   9991 	case WM_T_82547:
   9992 	case WM_T_82547_2:
   9993 	case WM_T_82571:	/* Reset 100us */
   9994 	case WM_T_82572:
   9995 	case WM_T_82573:
   9996 	case WM_T_82574:
   9997 	case WM_T_82575:
   9998 	case WM_T_82576:
   9999 	case WM_T_82580:
   10000 	case WM_T_I350:
   10001 	case WM_T_I354:
   10002 	case WM_T_I210:
   10003 	case WM_T_I211:
   10004 	case WM_T_82583:
   10005 	case WM_T_80003:
   10006 		/* Generic reset */
   10007 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10008 		CSR_WRITE_FLUSH(sc);
   10009 		delay(20000);
   10010 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10011 		CSR_WRITE_FLUSH(sc);
   10012 		delay(20000);
   10013 
   10014 		if ((sc->sc_type == WM_T_82541)
   10015 		    || (sc->sc_type == WM_T_82541_2)
   10016 		    || (sc->sc_type == WM_T_82547)
   10017 		    || (sc->sc_type == WM_T_82547_2)) {
   10018 			/* Workaround for igp are done in igp_reset() */
   10019 			/* XXX add code to set LED after phy reset */
   10020 		}
   10021 		break;
   10022 	case WM_T_ICH8:
   10023 	case WM_T_ICH9:
   10024 	case WM_T_ICH10:
   10025 	case WM_T_PCH:
   10026 	case WM_T_PCH2:
   10027 	case WM_T_PCH_LPT:
   10028 	case WM_T_PCH_SPT:
   10029 	case WM_T_PCH_CNP:
   10030 		/* Generic reset */
   10031 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10032 		CSR_WRITE_FLUSH(sc);
   10033 		delay(100);
   10034 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10035 		CSR_WRITE_FLUSH(sc);
   10036 		delay(150);
   10037 		break;
   10038 	default:
   10039 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10040 		    __func__);
   10041 		break;
   10042 	}
   10043 
   10044 	sc->phy.release(sc);
   10045 
   10046 	/* get_cfg_done */
   10047 	wm_get_cfg_done(sc);
   10048 
   10049 	/* Extra setup */
   10050 	switch (sc->sc_type) {
   10051 	case WM_T_82542_2_0:
   10052 	case WM_T_82542_2_1:
   10053 	case WM_T_82543:
   10054 	case WM_T_82544:
   10055 	case WM_T_82540:
   10056 	case WM_T_82545:
   10057 	case WM_T_82545_3:
   10058 	case WM_T_82546:
   10059 	case WM_T_82546_3:
   10060 	case WM_T_82541_2:
   10061 	case WM_T_82547_2:
   10062 	case WM_T_82571:
   10063 	case WM_T_82572:
   10064 	case WM_T_82573:
   10065 	case WM_T_82574:
   10066 	case WM_T_82583:
   10067 	case WM_T_82575:
   10068 	case WM_T_82576:
   10069 	case WM_T_82580:
   10070 	case WM_T_I350:
   10071 	case WM_T_I354:
   10072 	case WM_T_I210:
   10073 	case WM_T_I211:
   10074 	case WM_T_80003:
   10075 		/* Null */
   10076 		break;
   10077 	case WM_T_82541:
   10078 	case WM_T_82547:
   10079 		/* XXX Configure actively LED after PHY reset */
   10080 		break;
   10081 	case WM_T_ICH8:
   10082 	case WM_T_ICH9:
   10083 	case WM_T_ICH10:
   10084 	case WM_T_PCH:
   10085 	case WM_T_PCH2:
   10086 	case WM_T_PCH_LPT:
   10087 	case WM_T_PCH_SPT:
   10088 	case WM_T_PCH_CNP:
   10089 		wm_phy_post_reset(sc);
   10090 		break;
   10091 	default:
   10092 		panic("%s: unknown type\n", __func__);
   10093 		break;
   10094 	}
   10095 }
   10096 
   10097 /*
   10098  * Setup sc_phytype and mii_{read|write}reg.
   10099  *
   10100  *  To identify PHY type, correct read/write function should be selected.
   10101  * To select correct read/write function, PCI ID or MAC type are required
   10102  * without accessing PHY registers.
   10103  *
   10104  *  On the first call of this function, PHY ID is not known yet. Check
   10105  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10106  * result might be incorrect.
   10107  *
   10108  *  In the second call, PHY OUI and model is used to identify PHY type.
   10109  * It might not be perfect because of the lack of compared entry, but it
   10110  * would be better than the first call.
   10111  *
   10112  *  If the detected new result and previous assumption is different,
   10113  * diagnous message will be printed.
   10114  */
   10115 static void
   10116 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10117     uint16_t phy_model)
   10118 {
   10119 	device_t dev = sc->sc_dev;
   10120 	struct mii_data *mii = &sc->sc_mii;
   10121 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10122 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10123 	mii_readreg_t new_readreg;
   10124 	mii_writereg_t new_writereg;
   10125 	bool dodiag = true;
   10126 
   10127 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10128 		device_xname(sc->sc_dev), __func__));
   10129 
   10130 	/*
   10131 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10132 	 * incorrect. So don't print diag output when it's 2nd call.
   10133 	 */
   10134 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10135 		dodiag = false;
   10136 
   10137 	if (mii->mii_readreg == NULL) {
   10138 		/*
   10139 		 *  This is the first call of this function. For ICH and PCH
   10140 		 * variants, it's difficult to determine the PHY access method
   10141 		 * by sc_type, so use the PCI product ID for some devices.
   10142 		 */
   10143 
   10144 		switch (sc->sc_pcidevid) {
   10145 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10146 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10147 			/* 82577 */
   10148 			new_phytype = WMPHY_82577;
   10149 			break;
   10150 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10151 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10152 			/* 82578 */
   10153 			new_phytype = WMPHY_82578;
   10154 			break;
   10155 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10156 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10157 			/* 82579 */
   10158 			new_phytype = WMPHY_82579;
   10159 			break;
   10160 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10161 		case PCI_PRODUCT_INTEL_82801I_BM:
   10162 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10163 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10164 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10165 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10166 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10167 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10168 			/* ICH8, 9, 10 with 82567 */
   10169 			new_phytype = WMPHY_BM;
   10170 			break;
   10171 		default:
   10172 			break;
   10173 		}
   10174 	} else {
   10175 		/* It's not the first call. Use PHY OUI and model */
   10176 		switch (phy_oui) {
   10177 		case MII_OUI_ATTANSIC: /* XXX ??? */
   10178 			switch (phy_model) {
   10179 			case 0x0004: /* XXX */
   10180 				new_phytype = WMPHY_82578;
   10181 				break;
   10182 			default:
   10183 				break;
   10184 			}
   10185 			break;
   10186 		case MII_OUI_xxMARVELL:
   10187 			switch (phy_model) {
   10188 			case MII_MODEL_xxMARVELL_I210:
   10189 				new_phytype = WMPHY_I210;
   10190 				break;
   10191 			case MII_MODEL_xxMARVELL_E1011:
   10192 			case MII_MODEL_xxMARVELL_E1000_3:
   10193 			case MII_MODEL_xxMARVELL_E1000_5:
   10194 			case MII_MODEL_xxMARVELL_E1112:
   10195 				new_phytype = WMPHY_M88;
   10196 				break;
   10197 			case MII_MODEL_xxMARVELL_E1149:
   10198 				new_phytype = WMPHY_BM;
   10199 				break;
   10200 			case MII_MODEL_xxMARVELL_E1111:
   10201 			case MII_MODEL_xxMARVELL_I347:
   10202 			case MII_MODEL_xxMARVELL_E1512:
   10203 			case MII_MODEL_xxMARVELL_E1340M:
   10204 			case MII_MODEL_xxMARVELL_E1543:
   10205 				new_phytype = WMPHY_M88;
   10206 				break;
   10207 			case MII_MODEL_xxMARVELL_I82563:
   10208 				new_phytype = WMPHY_GG82563;
   10209 				break;
   10210 			default:
   10211 				break;
   10212 			}
   10213 			break;
   10214 		case MII_OUI_INTEL:
   10215 			switch (phy_model) {
   10216 			case MII_MODEL_INTEL_I82577:
   10217 				new_phytype = WMPHY_82577;
   10218 				break;
   10219 			case MII_MODEL_INTEL_I82579:
   10220 				new_phytype = WMPHY_82579;
   10221 				break;
   10222 			case MII_MODEL_INTEL_I217:
   10223 				new_phytype = WMPHY_I217;
   10224 				break;
   10225 			case MII_MODEL_INTEL_I82580:
   10226 			case MII_MODEL_INTEL_I350:
   10227 				new_phytype = WMPHY_82580;
   10228 				break;
   10229 			default:
   10230 				break;
   10231 			}
   10232 			break;
   10233 		case MII_OUI_yyINTEL:
   10234 			switch (phy_model) {
   10235 			case MII_MODEL_yyINTEL_I82562G:
   10236 			case MII_MODEL_yyINTEL_I82562EM:
   10237 			case MII_MODEL_yyINTEL_I82562ET:
   10238 				new_phytype = WMPHY_IFE;
   10239 				break;
   10240 			case MII_MODEL_yyINTEL_IGP01E1000:
   10241 				new_phytype = WMPHY_IGP;
   10242 				break;
   10243 			case MII_MODEL_yyINTEL_I82566:
   10244 				new_phytype = WMPHY_IGP_3;
   10245 				break;
   10246 			default:
   10247 				break;
   10248 			}
   10249 			break;
   10250 		default:
   10251 			break;
   10252 		}
   10253 
   10254 		if (dodiag) {
   10255 			if (new_phytype == WMPHY_UNKNOWN)
   10256 				aprint_verbose_dev(dev,
   10257 				    "%s: Unknown PHY model. OUI=%06x, "
   10258 				    "model=%04x\n", __func__, phy_oui,
   10259 				    phy_model);
   10260 
   10261 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10262 			    && (sc->sc_phytype != new_phytype)) {
   10263 				aprint_error_dev(dev, "Previously assumed PHY "
   10264 				    "type(%u) was incorrect. PHY type from PHY"
   10265 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   10266 			}
   10267 		}
   10268 	}
   10269 
   10270 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10271 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10272 		/* SGMII */
   10273 		new_readreg = wm_sgmii_readreg;
   10274 		new_writereg = wm_sgmii_writereg;
   10275 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10276 		/* BM2 (phyaddr == 1) */
   10277 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10278 		    && (new_phytype != WMPHY_BM)
   10279 		    && (new_phytype != WMPHY_UNKNOWN))
   10280 			doubt_phytype = new_phytype;
   10281 		new_phytype = WMPHY_BM;
   10282 		new_readreg = wm_gmii_bm_readreg;
   10283 		new_writereg = wm_gmii_bm_writereg;
   10284 	} else if (sc->sc_type >= WM_T_PCH) {
   10285 		/* All PCH* use _hv_ */
   10286 		new_readreg = wm_gmii_hv_readreg;
   10287 		new_writereg = wm_gmii_hv_writereg;
   10288 	} else if (sc->sc_type >= WM_T_ICH8) {
   10289 		/* non-82567 ICH8, 9 and 10 */
   10290 		new_readreg = wm_gmii_i82544_readreg;
   10291 		new_writereg = wm_gmii_i82544_writereg;
   10292 	} else if (sc->sc_type >= WM_T_80003) {
   10293 		/* 80003 */
   10294 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10295 		    && (new_phytype != WMPHY_GG82563)
   10296 		    && (new_phytype != WMPHY_UNKNOWN))
   10297 			doubt_phytype = new_phytype;
   10298 		new_phytype = WMPHY_GG82563;
   10299 		new_readreg = wm_gmii_i80003_readreg;
   10300 		new_writereg = wm_gmii_i80003_writereg;
   10301 	} else if (sc->sc_type >= WM_T_I210) {
   10302 		/* I210 and I211 */
   10303 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10304 		    && (new_phytype != WMPHY_I210)
   10305 		    && (new_phytype != WMPHY_UNKNOWN))
   10306 			doubt_phytype = new_phytype;
   10307 		new_phytype = WMPHY_I210;
   10308 		new_readreg = wm_gmii_gs40g_readreg;
   10309 		new_writereg = wm_gmii_gs40g_writereg;
   10310 	} else if (sc->sc_type >= WM_T_82580) {
   10311 		/* 82580, I350 and I354 */
   10312 		new_readreg = wm_gmii_82580_readreg;
   10313 		new_writereg = wm_gmii_82580_writereg;
   10314 	} else if (sc->sc_type >= WM_T_82544) {
   10315 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10316 		new_readreg = wm_gmii_i82544_readreg;
   10317 		new_writereg = wm_gmii_i82544_writereg;
   10318 	} else {
   10319 		new_readreg = wm_gmii_i82543_readreg;
   10320 		new_writereg = wm_gmii_i82543_writereg;
   10321 	}
   10322 
   10323 	if (new_phytype == WMPHY_BM) {
   10324 		/* All BM use _bm_ */
   10325 		new_readreg = wm_gmii_bm_readreg;
   10326 		new_writereg = wm_gmii_bm_writereg;
   10327 	}
   10328 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10329 		/* All PCH* use _hv_ */
   10330 		new_readreg = wm_gmii_hv_readreg;
   10331 		new_writereg = wm_gmii_hv_writereg;
   10332 	}
   10333 
   10334 	/* Diag output */
   10335 	if (dodiag) {
   10336 		if (doubt_phytype != WMPHY_UNKNOWN)
   10337 			aprint_error_dev(dev, "Assumed new PHY type was "
   10338 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10339 			    new_phytype);
   10340 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10341 		    && (sc->sc_phytype != new_phytype))
   10342 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10343 			    "was incorrect. New PHY type = %u\n",
   10344 			    sc->sc_phytype, new_phytype);
   10345 
   10346 		if ((mii->mii_readreg != NULL) &&
   10347 		    (new_phytype == WMPHY_UNKNOWN))
   10348 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   10349 
   10350 		if ((mii->mii_readreg != NULL) &&
   10351 		    (mii->mii_readreg != new_readreg))
   10352 			aprint_error_dev(dev, "Previously assumed PHY "
   10353 			    "read/write function was incorrect.\n");
   10354 	}
   10355 
   10356 	/* Update now */
   10357 	sc->sc_phytype = new_phytype;
   10358 	mii->mii_readreg = new_readreg;
   10359 	mii->mii_writereg = new_writereg;
   10360 	if (new_readreg == wm_gmii_hv_readreg) {
   10361 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10362 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10363 	} else if (new_readreg == wm_sgmii_readreg) {
   10364 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10365 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10366 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10367 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10368 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10369 	}
   10370 }
   10371 
   10372 /*
   10373  * wm_get_phy_id_82575:
   10374  *
   10375  * Return PHY ID. Return -1 if it failed.
   10376  */
   10377 static int
   10378 wm_get_phy_id_82575(struct wm_softc *sc)
   10379 {
   10380 	uint32_t reg;
   10381 	int phyid = -1;
   10382 
   10383 	/* XXX */
   10384 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10385 		return -1;
   10386 
   10387 	if (wm_sgmii_uses_mdio(sc)) {
   10388 		switch (sc->sc_type) {
   10389 		case WM_T_82575:
   10390 		case WM_T_82576:
   10391 			reg = CSR_READ(sc, WMREG_MDIC);
   10392 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10393 			break;
   10394 		case WM_T_82580:
   10395 		case WM_T_I350:
   10396 		case WM_T_I354:
   10397 		case WM_T_I210:
   10398 		case WM_T_I211:
   10399 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10400 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10401 			break;
   10402 		default:
   10403 			return -1;
   10404 		}
   10405 	}
   10406 
   10407 	return phyid;
   10408 }
   10409 
   10410 /*
   10411  * wm_gmii_mediainit:
   10412  *
   10413  *	Initialize media for use on 1000BASE-T devices.
   10414  */
   10415 static void
   10416 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10417 {
   10418 	device_t dev = sc->sc_dev;
   10419 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10420 	struct mii_data *mii = &sc->sc_mii;
   10421 
   10422 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10423 		device_xname(sc->sc_dev), __func__));
   10424 
   10425 	/* We have GMII. */
   10426 	sc->sc_flags |= WM_F_HAS_MII;
   10427 
   10428 	if (sc->sc_type == WM_T_80003)
   10429 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10430 	else
   10431 		sc->sc_tipg = TIPG_1000T_DFLT;
   10432 
   10433 	/*
   10434 	 * Let the chip set speed/duplex on its own based on
   10435 	 * signals from the PHY.
   10436 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10437 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10438 	 */
   10439 	sc->sc_ctrl |= CTRL_SLU;
   10440 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10441 
   10442 	/* Initialize our media structures and probe the GMII. */
   10443 	mii->mii_ifp = ifp;
   10444 
   10445 	mii->mii_statchg = wm_gmii_statchg;
   10446 
   10447 	/* get PHY control from SMBus to PCIe */
   10448 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10449 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10450 	    || (sc->sc_type == WM_T_PCH_CNP))
   10451 		wm_init_phy_workarounds_pchlan(sc);
   10452 
   10453 	wm_gmii_reset(sc);
   10454 
   10455 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10456 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10457 	    wm_gmii_mediastatus, sc->sc_core_lock);
   10458 
   10459 	/* Setup internal SGMII PHY for SFP */
   10460 	wm_sgmii_sfp_preconfig(sc);
   10461 
   10462 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10463 	    || (sc->sc_type == WM_T_82580)
   10464 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10465 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10466 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10467 			/* Attach only one port */
   10468 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10469 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10470 		} else {
   10471 			int i, id;
   10472 			uint32_t ctrl_ext;
   10473 
   10474 			id = wm_get_phy_id_82575(sc);
   10475 			if (id != -1) {
   10476 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10477 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10478 			}
   10479 			if ((id == -1)
   10480 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10481 				/* Power on sgmii phy if it is disabled */
   10482 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10483 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10484 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10485 				CSR_WRITE_FLUSH(sc);
   10486 				delay(300*1000); /* XXX too long */
   10487 
   10488 				/*
   10489 				 * From 1 to 8.
   10490 				 *
   10491 				 * I2C access fails with I2C register's ERROR
   10492 				 * bit set, so prevent error message while
   10493 				 * scanning.
   10494 				 */
   10495 				sc->phy.no_errprint = true;
   10496 				for (i = 1; i < 8; i++)
   10497 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10498 					    0xffffffff, i, MII_OFFSET_ANY,
   10499 					    MIIF_DOPAUSE);
   10500 				sc->phy.no_errprint = false;
   10501 
   10502 				/* Restore previous sfp cage power state */
   10503 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10504 			}
   10505 		}
   10506 	} else
   10507 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10508 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10509 
   10510 	/*
   10511 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10512 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10513 	 */
   10514 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10515 		|| (sc->sc_type == WM_T_PCH_SPT)
   10516 		|| (sc->sc_type == WM_T_PCH_CNP))
   10517 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10518 		wm_set_mdio_slow_mode_hv(sc);
   10519 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10520 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10521 	}
   10522 
   10523 	/*
   10524 	 * (For ICH8 variants)
   10525 	 * If PHY detection failed, use BM's r/w function and retry.
   10526 	 */
   10527 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10528 		/* if failed, retry with *_bm_* */
   10529 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10530 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10531 		    sc->sc_phytype);
   10532 		sc->sc_phytype = WMPHY_BM;
   10533 		mii->mii_readreg = wm_gmii_bm_readreg;
   10534 		mii->mii_writereg = wm_gmii_bm_writereg;
   10535 
   10536 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10537 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10538 	}
   10539 
   10540 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10541 		/* Any PHY wasn't find */
   10542 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10543 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10544 		sc->sc_phytype = WMPHY_NONE;
   10545 	} else {
   10546 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10547 
   10548 		/*
   10549 		 * PHY Found! Check PHY type again by the second call of
   10550 		 * wm_gmii_setup_phytype.
   10551 		 */
   10552 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10553 		    child->mii_mpd_model);
   10554 
   10555 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10556 	}
   10557 }
   10558 
   10559 /*
   10560  * wm_gmii_mediachange:	[ifmedia interface function]
   10561  *
   10562  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10563  */
   10564 static int
   10565 wm_gmii_mediachange(struct ifnet *ifp)
   10566 {
   10567 	struct wm_softc *sc = ifp->if_softc;
   10568 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10569 	uint32_t reg;
   10570 	int rc;
   10571 
   10572 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10573 		device_xname(sc->sc_dev), __func__));
   10574 	if ((ifp->if_flags & IFF_UP) == 0)
   10575 		return 0;
   10576 
   10577 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10578 	if ((sc->sc_type == WM_T_82580)
   10579 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10580 	    || (sc->sc_type == WM_T_I211)) {
   10581 		reg = CSR_READ(sc, WMREG_PHPM);
   10582 		reg &= ~PHPM_GO_LINK_D;
   10583 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10584 	}
   10585 
   10586 	/* Disable D0 LPLU. */
   10587 	wm_lplu_d0_disable(sc);
   10588 
   10589 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10590 	sc->sc_ctrl |= CTRL_SLU;
   10591 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10592 	    || (sc->sc_type > WM_T_82543)) {
   10593 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10594 	} else {
   10595 		sc->sc_ctrl &= ~CTRL_ASDE;
   10596 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10597 		if (ife->ifm_media & IFM_FDX)
   10598 			sc->sc_ctrl |= CTRL_FD;
   10599 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10600 		case IFM_10_T:
   10601 			sc->sc_ctrl |= CTRL_SPEED_10;
   10602 			break;
   10603 		case IFM_100_TX:
   10604 			sc->sc_ctrl |= CTRL_SPEED_100;
   10605 			break;
   10606 		case IFM_1000_T:
   10607 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10608 			break;
   10609 		case IFM_NONE:
   10610 			/* There is no specific setting for IFM_NONE */
   10611 			break;
   10612 		default:
   10613 			panic("wm_gmii_mediachange: bad media 0x%x",
   10614 			    ife->ifm_media);
   10615 		}
   10616 	}
   10617 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10618 	CSR_WRITE_FLUSH(sc);
   10619 
   10620 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   10621 		wm_serdes_mediachange(ifp);
   10622 
   10623 	if (sc->sc_type <= WM_T_82543)
   10624 		wm_gmii_reset(sc);
   10625 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   10626 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   10627 		/* allow time for SFP cage time to power up phy */
   10628 		delay(300 * 1000);
   10629 		wm_gmii_reset(sc);
   10630 	}
   10631 
   10632 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10633 		return 0;
   10634 	return rc;
   10635 }
   10636 
   10637 /*
   10638  * wm_gmii_mediastatus:	[ifmedia interface function]
   10639  *
   10640  *	Get the current interface media status on a 1000BASE-T device.
   10641  */
   10642 static void
   10643 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10644 {
   10645 	struct wm_softc *sc = ifp->if_softc;
   10646 
   10647 	ether_mediastatus(ifp, ifmr);
   10648 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10649 	    | sc->sc_flowflags;
   10650 }
   10651 
   10652 #define	MDI_IO		CTRL_SWDPIN(2)
   10653 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10654 #define	MDI_CLK		CTRL_SWDPIN(3)
   10655 
   10656 static void
   10657 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10658 {
   10659 	uint32_t i, v;
   10660 
   10661 	v = CSR_READ(sc, WMREG_CTRL);
   10662 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10663 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10664 
   10665 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10666 		if (data & i)
   10667 			v |= MDI_IO;
   10668 		else
   10669 			v &= ~MDI_IO;
   10670 		CSR_WRITE(sc, WMREG_CTRL, v);
   10671 		CSR_WRITE_FLUSH(sc);
   10672 		delay(10);
   10673 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10674 		CSR_WRITE_FLUSH(sc);
   10675 		delay(10);
   10676 		CSR_WRITE(sc, WMREG_CTRL, v);
   10677 		CSR_WRITE_FLUSH(sc);
   10678 		delay(10);
   10679 	}
   10680 }
   10681 
   10682 static uint16_t
   10683 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10684 {
   10685 	uint32_t v, i;
   10686 	uint16_t data = 0;
   10687 
   10688 	v = CSR_READ(sc, WMREG_CTRL);
   10689 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10690 	v |= CTRL_SWDPIO(3);
   10691 
   10692 	CSR_WRITE(sc, WMREG_CTRL, v);
   10693 	CSR_WRITE_FLUSH(sc);
   10694 	delay(10);
   10695 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10696 	CSR_WRITE_FLUSH(sc);
   10697 	delay(10);
   10698 	CSR_WRITE(sc, WMREG_CTRL, v);
   10699 	CSR_WRITE_FLUSH(sc);
   10700 	delay(10);
   10701 
   10702 	for (i = 0; i < 16; i++) {
   10703 		data <<= 1;
   10704 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10705 		CSR_WRITE_FLUSH(sc);
   10706 		delay(10);
   10707 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10708 			data |= 1;
   10709 		CSR_WRITE(sc, WMREG_CTRL, v);
   10710 		CSR_WRITE_FLUSH(sc);
   10711 		delay(10);
   10712 	}
   10713 
   10714 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10715 	CSR_WRITE_FLUSH(sc);
   10716 	delay(10);
   10717 	CSR_WRITE(sc, WMREG_CTRL, v);
   10718 	CSR_WRITE_FLUSH(sc);
   10719 	delay(10);
   10720 
   10721 	return data;
   10722 }
   10723 
   10724 #undef MDI_IO
   10725 #undef MDI_DIR
   10726 #undef MDI_CLK
   10727 
   10728 /*
   10729  * wm_gmii_i82543_readreg:	[mii interface function]
   10730  *
   10731  *	Read a PHY register on the GMII (i82543 version).
   10732  */
   10733 static int
   10734 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10735 {
   10736 	struct wm_softc *sc = device_private(dev);
   10737 
   10738 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10739 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10740 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10741 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10742 
   10743 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10744 		device_xname(dev), phy, reg, *val));
   10745 
   10746 	return 0;
   10747 }
   10748 
   10749 /*
   10750  * wm_gmii_i82543_writereg:	[mii interface function]
   10751  *
   10752  *	Write a PHY register on the GMII (i82543 version).
   10753  */
   10754 static int
   10755 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10756 {
   10757 	struct wm_softc *sc = device_private(dev);
   10758 
   10759 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10760 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10761 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10762 	    (MII_COMMAND_START << 30), 32);
   10763 
   10764 	return 0;
   10765 }
   10766 
   10767 /*
   10768  * wm_gmii_mdic_readreg:	[mii interface function]
   10769  *
   10770  *	Read a PHY register on the GMII.
   10771  */
   10772 static int
   10773 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10774 {
   10775 	struct wm_softc *sc = device_private(dev);
   10776 	uint32_t mdic = 0;
   10777 	int i;
   10778 
   10779 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10780 	    && (reg > MII_ADDRMASK)) {
   10781 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10782 		    __func__, sc->sc_phytype, reg);
   10783 		reg &= MII_ADDRMASK;
   10784 	}
   10785 
   10786 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10787 	    MDIC_REGADD(reg));
   10788 
   10789 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10790 		delay(50);
   10791 		mdic = CSR_READ(sc, WMREG_MDIC);
   10792 		if (mdic & MDIC_READY)
   10793 			break;
   10794 	}
   10795 
   10796 	if ((mdic & MDIC_READY) == 0) {
   10797 		DPRINTF(WM_DEBUG_GMII,
   10798 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10799 			device_xname(dev), phy, reg));
   10800 		return ETIMEDOUT;
   10801 	} else if (mdic & MDIC_E) {
   10802 		/* This is normal if no PHY is present. */
   10803 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10804 			device_xname(sc->sc_dev), phy, reg));
   10805 		return -1;
   10806 	} else
   10807 		*val = MDIC_DATA(mdic);
   10808 
   10809 	/*
   10810 	 * Allow some time after each MDIC transaction to avoid
   10811 	 * reading duplicate data in the next MDIC transaction.
   10812 	 */
   10813 	if (sc->sc_type == WM_T_PCH2)
   10814 		delay(100);
   10815 
   10816 	return 0;
   10817 }
   10818 
   10819 /*
   10820  * wm_gmii_mdic_writereg:	[mii interface function]
   10821  *
   10822  *	Write a PHY register on the GMII.
   10823  */
   10824 static int
   10825 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10826 {
   10827 	struct wm_softc *sc = device_private(dev);
   10828 	uint32_t mdic = 0;
   10829 	int i;
   10830 
   10831 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10832 	    && (reg > MII_ADDRMASK)) {
   10833 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10834 		    __func__, sc->sc_phytype, reg);
   10835 		reg &= MII_ADDRMASK;
   10836 	}
   10837 
   10838 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10839 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10840 
   10841 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10842 		delay(50);
   10843 		mdic = CSR_READ(sc, WMREG_MDIC);
   10844 		if (mdic & MDIC_READY)
   10845 			break;
   10846 	}
   10847 
   10848 	if ((mdic & MDIC_READY) == 0) {
   10849 		DPRINTF(WM_DEBUG_GMII,
   10850 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10851 			device_xname(dev), phy, reg));
   10852 		return ETIMEDOUT;
   10853 	} else if (mdic & MDIC_E) {
   10854 		DPRINTF(WM_DEBUG_GMII,
   10855 		    ("%s: MDIC write error: phy %d reg %d\n",
   10856 			device_xname(dev), phy, reg));
   10857 		return -1;
   10858 	}
   10859 
   10860 	/*
   10861 	 * Allow some time after each MDIC transaction to avoid
   10862 	 * reading duplicate data in the next MDIC transaction.
   10863 	 */
   10864 	if (sc->sc_type == WM_T_PCH2)
   10865 		delay(100);
   10866 
   10867 	return 0;
   10868 }
   10869 
   10870 /*
   10871  * wm_gmii_i82544_readreg:	[mii interface function]
   10872  *
   10873  *	Read a PHY register on the GMII.
   10874  */
   10875 static int
   10876 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10877 {
   10878 	struct wm_softc *sc = device_private(dev);
   10879 	int rv;
   10880 
   10881 	if (sc->phy.acquire(sc)) {
   10882 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10883 		return -1;
   10884 	}
   10885 
   10886 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10887 
   10888 	sc->phy.release(sc);
   10889 
   10890 	return rv;
   10891 }
   10892 
   10893 static int
   10894 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10895 {
   10896 	struct wm_softc *sc = device_private(dev);
   10897 	int rv;
   10898 
   10899 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10900 		switch (sc->sc_phytype) {
   10901 		case WMPHY_IGP:
   10902 		case WMPHY_IGP_2:
   10903 		case WMPHY_IGP_3:
   10904 			rv = wm_gmii_mdic_writereg(dev, phy,
   10905 			    IGPHY_PAGE_SELECT, reg);
   10906 			if (rv != 0)
   10907 				return rv;
   10908 			break;
   10909 		default:
   10910 #ifdef WM_DEBUG
   10911 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10912 			    __func__, sc->sc_phytype, reg);
   10913 #endif
   10914 			break;
   10915 		}
   10916 	}
   10917 
   10918 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10919 }
   10920 
   10921 /*
   10922  * wm_gmii_i82544_writereg:	[mii interface function]
   10923  *
   10924  *	Write a PHY register on the GMII.
   10925  */
   10926 static int
   10927 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10928 {
   10929 	struct wm_softc *sc = device_private(dev);
   10930 	int rv;
   10931 
   10932 	if (sc->phy.acquire(sc)) {
   10933 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10934 		return -1;
   10935 	}
   10936 
   10937 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10938 	sc->phy.release(sc);
   10939 
   10940 	return rv;
   10941 }
   10942 
   10943 static int
   10944 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10945 {
   10946 	struct wm_softc *sc = device_private(dev);
   10947 	int rv;
   10948 
   10949 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10950 		switch (sc->sc_phytype) {
   10951 		case WMPHY_IGP:
   10952 		case WMPHY_IGP_2:
   10953 		case WMPHY_IGP_3:
   10954 			rv = wm_gmii_mdic_writereg(dev, phy,
   10955 			    IGPHY_PAGE_SELECT, reg);
   10956 			if (rv != 0)
   10957 				return rv;
   10958 			break;
   10959 		default:
   10960 #ifdef WM_DEBUG
   10961 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10962 			    __func__, sc->sc_phytype, reg);
   10963 #endif
   10964 			break;
   10965 		}
   10966 	}
   10967 
   10968 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10969 }
   10970 
   10971 /*
   10972  * wm_gmii_i80003_readreg:	[mii interface function]
   10973  *
   10974  *	Read a PHY register on the kumeran
   10975  * This could be handled by the PHY layer if we didn't have to lock the
   10976  * resource ...
   10977  */
   10978 static int
   10979 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10980 {
   10981 	struct wm_softc *sc = device_private(dev);
   10982 	int page_select;
   10983 	uint16_t temp, temp2;
   10984 	int rv = 0;
   10985 
   10986 	if (phy != 1) /* Only one PHY on kumeran bus */
   10987 		return -1;
   10988 
   10989 	if (sc->phy.acquire(sc)) {
   10990 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10991 		return -1;
   10992 	}
   10993 
   10994 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10995 		page_select = GG82563_PHY_PAGE_SELECT;
   10996 	else {
   10997 		/*
   10998 		 * Use Alternative Page Select register to access registers
   10999 		 * 30 and 31.
   11000 		 */
   11001 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11002 	}
   11003 	temp = reg >> GG82563_PAGE_SHIFT;
   11004 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11005 		goto out;
   11006 
   11007 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11008 		/*
   11009 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11010 		 * register.
   11011 		 */
   11012 		delay(200);
   11013 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11014 		if ((rv != 0) || (temp2 != temp)) {
   11015 			device_printf(dev, "%s failed\n", __func__);
   11016 			rv = -1;
   11017 			goto out;
   11018 		}
   11019 		delay(200);
   11020 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11021 		delay(200);
   11022 	} else
   11023 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11024 
   11025 out:
   11026 	sc->phy.release(sc);
   11027 	return rv;
   11028 }
   11029 
   11030 /*
   11031  * wm_gmii_i80003_writereg:	[mii interface function]
   11032  *
   11033  *	Write a PHY register on the kumeran.
   11034  * This could be handled by the PHY layer if we didn't have to lock the
   11035  * resource ...
   11036  */
   11037 static int
   11038 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11039 {
   11040 	struct wm_softc *sc = device_private(dev);
   11041 	int page_select, rv;
   11042 	uint16_t temp, temp2;
   11043 
   11044 	if (phy != 1) /* Only one PHY on kumeran bus */
   11045 		return -1;
   11046 
   11047 	if (sc->phy.acquire(sc)) {
   11048 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11049 		return -1;
   11050 	}
   11051 
   11052 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11053 		page_select = GG82563_PHY_PAGE_SELECT;
   11054 	else {
   11055 		/*
   11056 		 * Use Alternative Page Select register to access registers
   11057 		 * 30 and 31.
   11058 		 */
   11059 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11060 	}
   11061 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11062 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11063 		goto out;
   11064 
   11065 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11066 		/*
   11067 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11068 		 * register.
   11069 		 */
   11070 		delay(200);
   11071 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11072 		if ((rv != 0) || (temp2 != temp)) {
   11073 			device_printf(dev, "%s failed\n", __func__);
   11074 			rv = -1;
   11075 			goto out;
   11076 		}
   11077 		delay(200);
   11078 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11079 		delay(200);
   11080 	} else
   11081 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11082 
   11083 out:
   11084 	sc->phy.release(sc);
   11085 	return rv;
   11086 }
   11087 
   11088 /*
   11089  * wm_gmii_bm_readreg:	[mii interface function]
   11090  *
   11091  *	Read a PHY register on the kumeran
   11092  * This could be handled by the PHY layer if we didn't have to lock the
   11093  * resource ...
   11094  */
   11095 static int
   11096 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11097 {
   11098 	struct wm_softc *sc = device_private(dev);
   11099 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11100 	int rv;
   11101 
   11102 	if (sc->phy.acquire(sc)) {
   11103 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11104 		return -1;
   11105 	}
   11106 
   11107 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11108 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11109 		    || (reg == 31)) ? 1 : phy;
   11110 	/* Page 800 works differently than the rest so it has its own func */
   11111 	if (page == BM_WUC_PAGE) {
   11112 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11113 		goto release;
   11114 	}
   11115 
   11116 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11117 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11118 		    && (sc->sc_type != WM_T_82583))
   11119 			rv = wm_gmii_mdic_writereg(dev, phy,
   11120 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11121 		else
   11122 			rv = wm_gmii_mdic_writereg(dev, phy,
   11123 			    BME1000_PHY_PAGE_SELECT, page);
   11124 		if (rv != 0)
   11125 			goto release;
   11126 	}
   11127 
   11128 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11129 
   11130 release:
   11131 	sc->phy.release(sc);
   11132 	return rv;
   11133 }
   11134 
   11135 /*
   11136  * wm_gmii_bm_writereg:	[mii interface function]
   11137  *
   11138  *	Write a PHY register on the kumeran.
   11139  * This could be handled by the PHY layer if we didn't have to lock the
   11140  * resource ...
   11141  */
   11142 static int
   11143 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11144 {
   11145 	struct wm_softc *sc = device_private(dev);
   11146 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11147 	int rv;
   11148 
   11149 	if (sc->phy.acquire(sc)) {
   11150 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11151 		return -1;
   11152 	}
   11153 
   11154 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11155 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11156 		    || (reg == 31)) ? 1 : phy;
   11157 	/* Page 800 works differently than the rest so it has its own func */
   11158 	if (page == BM_WUC_PAGE) {
   11159 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11160 		goto release;
   11161 	}
   11162 
   11163 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11164 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11165 		    && (sc->sc_type != WM_T_82583))
   11166 			rv = wm_gmii_mdic_writereg(dev, phy,
   11167 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11168 		else
   11169 			rv = wm_gmii_mdic_writereg(dev, phy,
   11170 			    BME1000_PHY_PAGE_SELECT, page);
   11171 		if (rv != 0)
   11172 			goto release;
   11173 	}
   11174 
   11175 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11176 
   11177 release:
   11178 	sc->phy.release(sc);
   11179 	return rv;
   11180 }
   11181 
   11182 /*
   11183  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11184  *  @dev: pointer to the HW structure
   11185  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11186  *
   11187  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11188  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11189  */
   11190 static int
   11191 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11192 {
   11193 	uint16_t temp;
   11194 	int rv;
   11195 
   11196 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11197 		device_xname(dev), __func__));
   11198 
   11199 	if (!phy_regp)
   11200 		return -1;
   11201 
   11202 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11203 
   11204 	/* Select Port Control Registers page */
   11205 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11206 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11207 	if (rv != 0)
   11208 		return rv;
   11209 
   11210 	/* Read WUCE and save it */
   11211 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11212 	if (rv != 0)
   11213 		return rv;
   11214 
   11215 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11216 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11217 	 */
   11218 	temp = *phy_regp;
   11219 	temp |= BM_WUC_ENABLE_BIT;
   11220 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11221 
   11222 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11223 		return rv;
   11224 
   11225 	/* Select Host Wakeup Registers page - caller now able to write
   11226 	 * registers on the Wakeup registers page
   11227 	 */
   11228 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11229 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11230 }
   11231 
   11232 /*
   11233  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11234  *  @dev: pointer to the HW structure
   11235  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11236  *
   11237  *  Restore BM_WUC_ENABLE_REG to its original value.
   11238  *
   11239  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11240  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11241  *  caller.
   11242  */
   11243 static int
   11244 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11245 {
   11246 
   11247 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11248 		device_xname(dev), __func__));
   11249 
   11250 	if (!phy_regp)
   11251 		return -1;
   11252 
   11253 	/* Select Port Control Registers page */
   11254 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11255 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11256 
   11257 	/* Restore 769.17 to its original value */
   11258 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11259 
   11260 	return 0;
   11261 }
   11262 
   11263 /*
   11264  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11265  *  @sc: pointer to the HW structure
   11266  *  @offset: register offset to be read or written
   11267  *  @val: pointer to the data to read or write
   11268  *  @rd: determines if operation is read or write
   11269  *  @page_set: BM_WUC_PAGE already set and access enabled
   11270  *
   11271  *  Read the PHY register at offset and store the retrieved information in
   11272  *  data, or write data to PHY register at offset.  Note the procedure to
   11273  *  access the PHY wakeup registers is different than reading the other PHY
   11274  *  registers. It works as such:
   11275  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11276  *  2) Set page to 800 for host (801 if we were manageability)
   11277  *  3) Write the address using the address opcode (0x11)
   11278  *  4) Read or write the data using the data opcode (0x12)
   11279  *  5) Restore 769.17.2 to its original value
   11280  *
   11281  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11282  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11283  *
   11284  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11285  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11286  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11287  */
   11288 static int
   11289 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11290 	bool page_set)
   11291 {
   11292 	struct wm_softc *sc = device_private(dev);
   11293 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11294 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11295 	uint16_t wuce;
   11296 	int rv = 0;
   11297 
   11298 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11299 		device_xname(dev), __func__));
   11300 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11301 	if ((sc->sc_type == WM_T_PCH)
   11302 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11303 		device_printf(dev,
   11304 		    "Attempting to access page %d while gig enabled.\n", page);
   11305 	}
   11306 
   11307 	if (!page_set) {
   11308 		/* Enable access to PHY wakeup registers */
   11309 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11310 		if (rv != 0) {
   11311 			device_printf(dev,
   11312 			    "%s: Could not enable PHY wakeup reg access\n",
   11313 			    __func__);
   11314 			return rv;
   11315 		}
   11316 	}
   11317 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11318 		device_xname(sc->sc_dev), __func__, page, regnum));
   11319 
   11320 	/*
   11321 	 * 2) Access PHY wakeup register.
   11322 	 * See wm_access_phy_wakeup_reg_bm.
   11323 	 */
   11324 
   11325 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11326 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11327 	if (rv != 0)
   11328 		return rv;
   11329 
   11330 	if (rd) {
   11331 		/* Read the Wakeup register page value using opcode 0x12 */
   11332 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11333 	} else {
   11334 		/* Write the Wakeup register page value using opcode 0x12 */
   11335 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11336 	}
   11337 	if (rv != 0)
   11338 		return rv;
   11339 
   11340 	if (!page_set)
   11341 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11342 
   11343 	return rv;
   11344 }
   11345 
   11346 /*
   11347  * wm_gmii_hv_readreg:	[mii interface function]
   11348  *
   11349  *	Read a PHY register on the kumeran
   11350  * This could be handled by the PHY layer if we didn't have to lock the
   11351  * resource ...
   11352  */
   11353 static int
   11354 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11355 {
   11356 	struct wm_softc *sc = device_private(dev);
   11357 	int rv;
   11358 
   11359 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11360 		device_xname(dev), __func__));
   11361 	if (sc->phy.acquire(sc)) {
   11362 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11363 		return -1;
   11364 	}
   11365 
   11366 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11367 	sc->phy.release(sc);
   11368 	return rv;
   11369 }
   11370 
   11371 static int
   11372 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11373 {
   11374 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11375 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11376 	int rv;
   11377 
   11378 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11379 
   11380 	/* Page 800 works differently than the rest so it has its own func */
   11381 	if (page == BM_WUC_PAGE)
   11382 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11383 
   11384 	/*
   11385 	 * Lower than page 768 works differently than the rest so it has its
   11386 	 * own func
   11387 	 */
   11388 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11389 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11390 		return -1;
   11391 	}
   11392 
   11393 	/*
   11394 	 * XXX I21[789] documents say that the SMBus Address register is at
   11395 	 * PHY address 01, Page 0 (not 768), Register 26.
   11396 	 */
   11397 	if (page == HV_INTC_FC_PAGE_START)
   11398 		page = 0;
   11399 
   11400 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11401 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11402 		    page << BME1000_PAGE_SHIFT);
   11403 		if (rv != 0)
   11404 			return rv;
   11405 	}
   11406 
   11407 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11408 }
   11409 
   11410 /*
   11411  * wm_gmii_hv_writereg:	[mii interface function]
   11412  *
   11413  *	Write a PHY register on the kumeran.
   11414  * This could be handled by the PHY layer if we didn't have to lock the
   11415  * resource ...
   11416  */
   11417 static int
   11418 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11419 {
   11420 	struct wm_softc *sc = device_private(dev);
   11421 	int rv;
   11422 
   11423 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11424 		device_xname(dev), __func__));
   11425 
   11426 	if (sc->phy.acquire(sc)) {
   11427 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11428 		return -1;
   11429 	}
   11430 
   11431 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11432 	sc->phy.release(sc);
   11433 
   11434 	return rv;
   11435 }
   11436 
   11437 static int
   11438 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11439 {
   11440 	struct wm_softc *sc = device_private(dev);
   11441 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11442 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11443 	int rv;
   11444 
   11445 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11446 
   11447 	/* Page 800 works differently than the rest so it has its own func */
   11448 	if (page == BM_WUC_PAGE)
   11449 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11450 		    false);
   11451 
   11452 	/*
   11453 	 * Lower than page 768 works differently than the rest so it has its
   11454 	 * own func
   11455 	 */
   11456 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11457 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11458 		return -1;
   11459 	}
   11460 
   11461 	{
   11462 		/*
   11463 		 * XXX I21[789] documents say that the SMBus Address register
   11464 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11465 		 */
   11466 		if (page == HV_INTC_FC_PAGE_START)
   11467 			page = 0;
   11468 
   11469 		/*
   11470 		 * XXX Workaround MDIO accesses being disabled after entering
   11471 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11472 		 * register is set)
   11473 		 */
   11474 		if (sc->sc_phytype == WMPHY_82578) {
   11475 			struct mii_softc *child;
   11476 
   11477 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11478 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11479 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11480 			    && ((val & (1 << 11)) != 0)) {
   11481 				device_printf(dev, "XXX need workaround\n");
   11482 			}
   11483 		}
   11484 
   11485 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11486 			rv = wm_gmii_mdic_writereg(dev, 1,
   11487 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11488 			if (rv != 0)
   11489 				return rv;
   11490 		}
   11491 	}
   11492 
   11493 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11494 }
   11495 
   11496 /*
   11497  * wm_gmii_82580_readreg:	[mii interface function]
   11498  *
   11499  *	Read a PHY register on the 82580 and I350.
   11500  * This could be handled by the PHY layer if we didn't have to lock the
   11501  * resource ...
   11502  */
   11503 static int
   11504 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11505 {
   11506 	struct wm_softc *sc = device_private(dev);
   11507 	int rv;
   11508 
   11509 	if (sc->phy.acquire(sc) != 0) {
   11510 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11511 		return -1;
   11512 	}
   11513 
   11514 #ifdef DIAGNOSTIC
   11515 	if (reg > MII_ADDRMASK) {
   11516 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11517 		    __func__, sc->sc_phytype, reg);
   11518 		reg &= MII_ADDRMASK;
   11519 	}
   11520 #endif
   11521 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11522 
   11523 	sc->phy.release(sc);
   11524 	return rv;
   11525 }
   11526 
   11527 /*
   11528  * wm_gmii_82580_writereg:	[mii interface function]
   11529  *
   11530  *	Write a PHY register on the 82580 and I350.
   11531  * This could be handled by the PHY layer if we didn't have to lock the
   11532  * resource ...
   11533  */
   11534 static int
   11535 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11536 {
   11537 	struct wm_softc *sc = device_private(dev);
   11538 	int rv;
   11539 
   11540 	if (sc->phy.acquire(sc) != 0) {
   11541 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11542 		return -1;
   11543 	}
   11544 
   11545 #ifdef DIAGNOSTIC
   11546 	if (reg > MII_ADDRMASK) {
   11547 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11548 		    __func__, sc->sc_phytype, reg);
   11549 		reg &= MII_ADDRMASK;
   11550 	}
   11551 #endif
   11552 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11553 
   11554 	sc->phy.release(sc);
   11555 	return rv;
   11556 }
   11557 
   11558 /*
   11559  * wm_gmii_gs40g_readreg:	[mii interface function]
   11560  *
   11561  *	Read a PHY register on the I2100 and I211.
   11562  * This could be handled by the PHY layer if we didn't have to lock the
   11563  * resource ...
   11564  */
   11565 static int
   11566 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11567 {
   11568 	struct wm_softc *sc = device_private(dev);
   11569 	int page, offset;
   11570 	int rv;
   11571 
   11572 	/* Acquire semaphore */
   11573 	if (sc->phy.acquire(sc)) {
   11574 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11575 		return -1;
   11576 	}
   11577 
   11578 	/* Page select */
   11579 	page = reg >> GS40G_PAGE_SHIFT;
   11580 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11581 	if (rv != 0)
   11582 		goto release;
   11583 
   11584 	/* Read reg */
   11585 	offset = reg & GS40G_OFFSET_MASK;
   11586 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11587 
   11588 release:
   11589 	sc->phy.release(sc);
   11590 	return rv;
   11591 }
   11592 
   11593 /*
   11594  * wm_gmii_gs40g_writereg:	[mii interface function]
   11595  *
   11596  *	Write a PHY register on the I210 and I211.
   11597  * This could be handled by the PHY layer if we didn't have to lock the
   11598  * resource ...
   11599  */
   11600 static int
   11601 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11602 {
   11603 	struct wm_softc *sc = device_private(dev);
   11604 	uint16_t page;
   11605 	int offset, rv;
   11606 
   11607 	/* Acquire semaphore */
   11608 	if (sc->phy.acquire(sc)) {
   11609 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11610 		return -1;
   11611 	}
   11612 
   11613 	/* Page select */
   11614 	page = reg >> GS40G_PAGE_SHIFT;
   11615 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11616 	if (rv != 0)
   11617 		goto release;
   11618 
   11619 	/* Write reg */
   11620 	offset = reg & GS40G_OFFSET_MASK;
   11621 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11622 
   11623 release:
   11624 	/* Release semaphore */
   11625 	sc->phy.release(sc);
   11626 	return rv;
   11627 }
   11628 
   11629 /*
   11630  * wm_gmii_statchg:	[mii interface function]
   11631  *
   11632  *	Callback from MII layer when media changes.
   11633  */
   11634 static void
   11635 wm_gmii_statchg(struct ifnet *ifp)
   11636 {
   11637 	struct wm_softc *sc = ifp->if_softc;
   11638 	struct mii_data *mii = &sc->sc_mii;
   11639 
   11640 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11641 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11642 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11643 
   11644 	/* Get flow control negotiation result. */
   11645 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11646 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11647 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11648 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11649 	}
   11650 
   11651 	if (sc->sc_flowflags & IFM_FLOW) {
   11652 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11653 			sc->sc_ctrl |= CTRL_TFCE;
   11654 			sc->sc_fcrtl |= FCRTL_XONE;
   11655 		}
   11656 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11657 			sc->sc_ctrl |= CTRL_RFCE;
   11658 	}
   11659 
   11660 	if (mii->mii_media_active & IFM_FDX) {
   11661 		DPRINTF(WM_DEBUG_LINK,
   11662 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11663 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11664 	} else {
   11665 		DPRINTF(WM_DEBUG_LINK,
   11666 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11667 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11668 	}
   11669 
   11670 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11671 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11672 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11673 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11674 	if (sc->sc_type == WM_T_80003) {
   11675 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   11676 		case IFM_1000_T:
   11677 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11678 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11679 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11680 			break;
   11681 		default:
   11682 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11683 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11684 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11685 			break;
   11686 		}
   11687 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11688 	}
   11689 }
   11690 
   11691 /* kumeran related (80003, ICH* and PCH*) */
   11692 
   11693 /*
   11694  * wm_kmrn_readreg:
   11695  *
   11696  *	Read a kumeran register
   11697  */
   11698 static int
   11699 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11700 {
   11701 	int rv;
   11702 
   11703 	if (sc->sc_type == WM_T_80003)
   11704 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11705 	else
   11706 		rv = sc->phy.acquire(sc);
   11707 	if (rv != 0) {
   11708 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11709 		    __func__);
   11710 		return rv;
   11711 	}
   11712 
   11713 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11714 
   11715 	if (sc->sc_type == WM_T_80003)
   11716 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11717 	else
   11718 		sc->phy.release(sc);
   11719 
   11720 	return rv;
   11721 }
   11722 
   11723 static int
   11724 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11725 {
   11726 
   11727 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11728 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11729 	    KUMCTRLSTA_REN);
   11730 	CSR_WRITE_FLUSH(sc);
   11731 	delay(2);
   11732 
   11733 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11734 
   11735 	return 0;
   11736 }
   11737 
   11738 /*
   11739  * wm_kmrn_writereg:
   11740  *
   11741  *	Write a kumeran register
   11742  */
   11743 static int
   11744 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11745 {
   11746 	int rv;
   11747 
   11748 	if (sc->sc_type == WM_T_80003)
   11749 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11750 	else
   11751 		rv = sc->phy.acquire(sc);
   11752 	if (rv != 0) {
   11753 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11754 		    __func__);
   11755 		return rv;
   11756 	}
   11757 
   11758 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11759 
   11760 	if (sc->sc_type == WM_T_80003)
   11761 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11762 	else
   11763 		sc->phy.release(sc);
   11764 
   11765 	return rv;
   11766 }
   11767 
   11768 static int
   11769 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11770 {
   11771 
   11772 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11773 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11774 
   11775 	return 0;
   11776 }
   11777 
   11778 /*
   11779  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11780  * This access method is different from IEEE MMD.
   11781  */
   11782 static int
   11783 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11784 {
   11785 	struct wm_softc *sc = device_private(dev);
   11786 	int rv;
   11787 
   11788 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11789 	if (rv != 0)
   11790 		return rv;
   11791 
   11792 	if (rd)
   11793 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11794 	else
   11795 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11796 	return rv;
   11797 }
   11798 
   11799 static int
   11800 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11801 {
   11802 
   11803 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11804 }
   11805 
   11806 static int
   11807 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11808 {
   11809 
   11810 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11811 }
   11812 
   11813 /* SGMII related */
   11814 
   11815 /*
   11816  * wm_sgmii_uses_mdio
   11817  *
   11818  * Check whether the transaction is to the internal PHY or the external
   11819  * MDIO interface. Return true if it's MDIO.
   11820  */
   11821 static bool
   11822 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11823 {
   11824 	uint32_t reg;
   11825 	bool ismdio = false;
   11826 
   11827 	switch (sc->sc_type) {
   11828 	case WM_T_82575:
   11829 	case WM_T_82576:
   11830 		reg = CSR_READ(sc, WMREG_MDIC);
   11831 		ismdio = ((reg & MDIC_DEST) != 0);
   11832 		break;
   11833 	case WM_T_82580:
   11834 	case WM_T_I350:
   11835 	case WM_T_I354:
   11836 	case WM_T_I210:
   11837 	case WM_T_I211:
   11838 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11839 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11840 		break;
   11841 	default:
   11842 		break;
   11843 	}
   11844 
   11845 	return ismdio;
   11846 }
   11847 
   11848 /* Setup internal SGMII PHY for SFP */
   11849 static void
   11850 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   11851 {
   11852 	uint16_t id1, id2, phyreg;
   11853 	int i, rv;
   11854 
   11855 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   11856 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   11857 		return;
   11858 
   11859 	for (i = 0; i < MII_NPHY; i++) {
   11860 		sc->phy.no_errprint = true;
   11861 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   11862 		if (rv != 0)
   11863 			continue;
   11864 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   11865 		if (rv != 0)
   11866 			continue;
   11867 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   11868 			continue;
   11869 		sc->phy.no_errprint = false;
   11870 
   11871 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   11872 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   11873 		phyreg |= ESSR_SGMII_WOC_COPPER;
   11874 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   11875 		break;
   11876 	}
   11877 
   11878 }
   11879 
   11880 /*
   11881  * wm_sgmii_readreg:	[mii interface function]
   11882  *
   11883  *	Read a PHY register on the SGMII
   11884  * This could be handled by the PHY layer if we didn't have to lock the
   11885  * resource ...
   11886  */
   11887 static int
   11888 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11889 {
   11890 	struct wm_softc *sc = device_private(dev);
   11891 	int rv;
   11892 
   11893 	if (sc->phy.acquire(sc)) {
   11894 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11895 		return -1;
   11896 	}
   11897 
   11898 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11899 
   11900 	sc->phy.release(sc);
   11901 	return rv;
   11902 }
   11903 
   11904 static int
   11905 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11906 {
   11907 	struct wm_softc *sc = device_private(dev);
   11908 	uint32_t i2ccmd;
   11909 	int i, rv = 0;
   11910 
   11911 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11912 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11913 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11914 
   11915 	/* Poll the ready bit */
   11916 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11917 		delay(50);
   11918 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11919 		if (i2ccmd & I2CCMD_READY)
   11920 			break;
   11921 	}
   11922 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11923 		device_printf(dev, "I2CCMD Read did not complete\n");
   11924 		rv = ETIMEDOUT;
   11925 	}
   11926 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11927 		if (!sc->phy.no_errprint)
   11928 			device_printf(dev, "I2CCMD Error bit set\n");
   11929 		rv = EIO;
   11930 	}
   11931 
   11932 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11933 
   11934 	return rv;
   11935 }
   11936 
   11937 /*
   11938  * wm_sgmii_writereg:	[mii interface function]
   11939  *
   11940  *	Write a PHY register on the SGMII.
   11941  * This could be handled by the PHY layer if we didn't have to lock the
   11942  * resource ...
   11943  */
   11944 static int
   11945 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11946 {
   11947 	struct wm_softc *sc = device_private(dev);
   11948 	int rv;
   11949 
   11950 	if (sc->phy.acquire(sc) != 0) {
   11951 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11952 		return -1;
   11953 	}
   11954 
   11955 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11956 
   11957 	sc->phy.release(sc);
   11958 
   11959 	return rv;
   11960 }
   11961 
   11962 static int
   11963 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11964 {
   11965 	struct wm_softc *sc = device_private(dev);
   11966 	uint32_t i2ccmd;
   11967 	uint16_t swapdata;
   11968 	int rv = 0;
   11969 	int i;
   11970 
   11971 	/* Swap the data bytes for the I2C interface */
   11972 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11973 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11974 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11975 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11976 
   11977 	/* Poll the ready bit */
   11978 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11979 		delay(50);
   11980 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11981 		if (i2ccmd & I2CCMD_READY)
   11982 			break;
   11983 	}
   11984 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11985 		device_printf(dev, "I2CCMD Write did not complete\n");
   11986 		rv = ETIMEDOUT;
   11987 	}
   11988 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11989 		device_printf(dev, "I2CCMD Error bit set\n");
   11990 		rv = EIO;
   11991 	}
   11992 
   11993 	return rv;
   11994 }
   11995 
   11996 /* TBI related */
   11997 
   11998 static bool
   11999 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   12000 {
   12001 	bool sig;
   12002 
   12003 	sig = ctrl & CTRL_SWDPIN(1);
   12004 
   12005 	/*
   12006 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   12007 	 * detect a signal, 1 if they don't.
   12008 	 */
   12009 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   12010 		sig = !sig;
   12011 
   12012 	return sig;
   12013 }
   12014 
   12015 /*
   12016  * wm_tbi_mediainit:
   12017  *
   12018  *	Initialize media for use on 1000BASE-X devices.
   12019  */
   12020 static void
   12021 wm_tbi_mediainit(struct wm_softc *sc)
   12022 {
   12023 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12024 	const char *sep = "";
   12025 
   12026 	if (sc->sc_type < WM_T_82543)
   12027 		sc->sc_tipg = TIPG_WM_DFLT;
   12028 	else
   12029 		sc->sc_tipg = TIPG_LG_DFLT;
   12030 
   12031 	sc->sc_tbi_serdes_anegticks = 5;
   12032 
   12033 	/* Initialize our media structures */
   12034 	sc->sc_mii.mii_ifp = ifp;
   12035 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12036 
   12037 	ifp->if_baudrate = IF_Gbps(1);
   12038 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12039 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12040 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12041 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12042 		    sc->sc_core_lock);
   12043 	} else {
   12044 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12045 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12046 	}
   12047 
   12048 	/*
   12049 	 * SWD Pins:
   12050 	 *
   12051 	 *	0 = Link LED (output)
   12052 	 *	1 = Loss Of Signal (input)
   12053 	 */
   12054 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12055 
   12056 	/* XXX Perhaps this is only for TBI */
   12057 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12058 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12059 
   12060 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12061 		sc->sc_ctrl &= ~CTRL_LRST;
   12062 
   12063 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12064 
   12065 #define	ADD(ss, mm, dd)							\
   12066 do {									\
   12067 	aprint_normal("%s%s", sep, ss);					\
   12068 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12069 	sep = ", ";							\
   12070 } while (/*CONSTCOND*/0)
   12071 
   12072 	aprint_normal_dev(sc->sc_dev, "");
   12073 
   12074 	if (sc->sc_type == WM_T_I354) {
   12075 		uint32_t status;
   12076 
   12077 		status = CSR_READ(sc, WMREG_STATUS);
   12078 		if (((status & STATUS_2P5_SKU) != 0)
   12079 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12080 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12081 		} else
   12082 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12083 	} else if (sc->sc_type == WM_T_82545) {
   12084 		/* Only 82545 is LX (XXX except SFP) */
   12085 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12086 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12087 	} else if (sc->sc_sfptype != 0) {
   12088 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12089 		switch (sc->sc_sfptype) {
   12090 		default:
   12091 		case SFF_SFP_ETH_FLAGS_1000SX:
   12092 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12093 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12094 			break;
   12095 		case SFF_SFP_ETH_FLAGS_1000LX:
   12096 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12097 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12098 			break;
   12099 		case SFF_SFP_ETH_FLAGS_1000CX:
   12100 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12101 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12102 			break;
   12103 		case SFF_SFP_ETH_FLAGS_1000T:
   12104 			ADD("1000baseT", IFM_1000_T, 0);
   12105 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12106 			break;
   12107 		case SFF_SFP_ETH_FLAGS_100FX:
   12108 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12109 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12110 			break;
   12111 		}
   12112 	} else {
   12113 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12114 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12115 	}
   12116 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12117 	aprint_normal("\n");
   12118 
   12119 #undef ADD
   12120 
   12121 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12122 }
   12123 
   12124 /*
   12125  * wm_tbi_mediachange:	[ifmedia interface function]
   12126  *
   12127  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12128  */
   12129 static int
   12130 wm_tbi_mediachange(struct ifnet *ifp)
   12131 {
   12132 	struct wm_softc *sc = ifp->if_softc;
   12133 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12134 	uint32_t status, ctrl;
   12135 	bool signal;
   12136 	int i;
   12137 
   12138 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12139 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12140 		/* XXX need some work for >= 82571 and < 82575 */
   12141 		if (sc->sc_type < WM_T_82575)
   12142 			return 0;
   12143 	}
   12144 
   12145 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12146 	    || (sc->sc_type >= WM_T_82575))
   12147 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12148 
   12149 	sc->sc_ctrl &= ~CTRL_LRST;
   12150 	sc->sc_txcw = TXCW_ANE;
   12151 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12152 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12153 	else if (ife->ifm_media & IFM_FDX)
   12154 		sc->sc_txcw |= TXCW_FD;
   12155 	else
   12156 		sc->sc_txcw |= TXCW_HD;
   12157 
   12158 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12159 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12160 
   12161 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12162 		device_xname(sc->sc_dev), sc->sc_txcw));
   12163 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12164 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12165 	CSR_WRITE_FLUSH(sc);
   12166 	delay(1000);
   12167 
   12168 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12169 	signal = wm_tbi_havesignal(sc, ctrl);
   12170 
   12171 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   12172 		signal));
   12173 
   12174 	if (signal) {
   12175 		/* Have signal; wait for the link to come up. */
   12176 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12177 			delay(10000);
   12178 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12179 				break;
   12180 		}
   12181 
   12182 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   12183 			device_xname(sc->sc_dev), i));
   12184 
   12185 		status = CSR_READ(sc, WMREG_STATUS);
   12186 		DPRINTF(WM_DEBUG_LINK,
   12187 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   12188 			device_xname(sc->sc_dev), status, STATUS_LU));
   12189 		if (status & STATUS_LU) {
   12190 			/* Link is up. */
   12191 			DPRINTF(WM_DEBUG_LINK,
   12192 			    ("%s: LINK: set media -> link up %s\n",
   12193 				device_xname(sc->sc_dev),
   12194 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12195 
   12196 			/*
   12197 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12198 			 * so we should update sc->sc_ctrl
   12199 			 */
   12200 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12201 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12202 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12203 			if (status & STATUS_FD)
   12204 				sc->sc_tctl |=
   12205 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12206 			else
   12207 				sc->sc_tctl |=
   12208 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12209 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12210 				sc->sc_fcrtl |= FCRTL_XONE;
   12211 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12212 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12213 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12214 			sc->sc_tbi_linkup = 1;
   12215 		} else {
   12216 			if (i == WM_LINKUP_TIMEOUT)
   12217 				wm_check_for_link(sc);
   12218 			/* Link is down. */
   12219 			DPRINTF(WM_DEBUG_LINK,
   12220 			    ("%s: LINK: set media -> link down\n",
   12221 				device_xname(sc->sc_dev)));
   12222 			sc->sc_tbi_linkup = 0;
   12223 		}
   12224 	} else {
   12225 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   12226 			device_xname(sc->sc_dev)));
   12227 		sc->sc_tbi_linkup = 0;
   12228 	}
   12229 
   12230 	wm_tbi_serdes_set_linkled(sc);
   12231 
   12232 	return 0;
   12233 }
   12234 
   12235 /*
   12236  * wm_tbi_mediastatus:	[ifmedia interface function]
   12237  *
   12238  *	Get the current interface media status on a 1000BASE-X device.
   12239  */
   12240 static void
   12241 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12242 {
   12243 	struct wm_softc *sc = ifp->if_softc;
   12244 	uint32_t ctrl, status;
   12245 
   12246 	ifmr->ifm_status = IFM_AVALID;
   12247 	ifmr->ifm_active = IFM_ETHER;
   12248 
   12249 	status = CSR_READ(sc, WMREG_STATUS);
   12250 	if ((status & STATUS_LU) == 0) {
   12251 		ifmr->ifm_active |= IFM_NONE;
   12252 		return;
   12253 	}
   12254 
   12255 	ifmr->ifm_status |= IFM_ACTIVE;
   12256 	/* Only 82545 is LX */
   12257 	if (sc->sc_type == WM_T_82545)
   12258 		ifmr->ifm_active |= IFM_1000_LX;
   12259 	else
   12260 		ifmr->ifm_active |= IFM_1000_SX;
   12261 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   12262 		ifmr->ifm_active |= IFM_FDX;
   12263 	else
   12264 		ifmr->ifm_active |= IFM_HDX;
   12265 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12266 	if (ctrl & CTRL_RFCE)
   12267 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   12268 	if (ctrl & CTRL_TFCE)
   12269 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   12270 }
   12271 
   12272 /* XXX TBI only */
   12273 static int
   12274 wm_check_for_link(struct wm_softc *sc)
   12275 {
   12276 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12277 	uint32_t rxcw;
   12278 	uint32_t ctrl;
   12279 	uint32_t status;
   12280 	bool signal;
   12281 
   12282 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   12283 		device_xname(sc->sc_dev), __func__));
   12284 
   12285 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12286 		/* XXX need some work for >= 82571 */
   12287 		if (sc->sc_type >= WM_T_82571) {
   12288 			sc->sc_tbi_linkup = 1;
   12289 			return 0;
   12290 		}
   12291 	}
   12292 
   12293 	rxcw = CSR_READ(sc, WMREG_RXCW);
   12294 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12295 	status = CSR_READ(sc, WMREG_STATUS);
   12296 	signal = wm_tbi_havesignal(sc, ctrl);
   12297 
   12298 	DPRINTF(WM_DEBUG_LINK,
   12299 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   12300 		device_xname(sc->sc_dev), __func__, signal,
   12301 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   12302 
   12303 	/*
   12304 	 * SWDPIN   LU RXCW
   12305 	 *	0    0	  0
   12306 	 *	0    0	  1	(should not happen)
   12307 	 *	0    1	  0	(should not happen)
   12308 	 *	0    1	  1	(should not happen)
   12309 	 *	1    0	  0	Disable autonego and force linkup
   12310 	 *	1    0	  1	got /C/ but not linkup yet
   12311 	 *	1    1	  0	(linkup)
   12312 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12313 	 *
   12314 	 */
   12315 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12316 		DPRINTF(WM_DEBUG_LINK,
   12317 		    ("%s: %s: force linkup and fullduplex\n",
   12318 			device_xname(sc->sc_dev), __func__));
   12319 		sc->sc_tbi_linkup = 0;
   12320 		/* Disable auto-negotiation in the TXCW register */
   12321 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12322 
   12323 		/*
   12324 		 * Force link-up and also force full-duplex.
   12325 		 *
   12326 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12327 		 * so we should update sc->sc_ctrl
   12328 		 */
   12329 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12330 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12331 	} else if (((status & STATUS_LU) != 0)
   12332 	    && ((rxcw & RXCW_C) != 0)
   12333 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12334 		sc->sc_tbi_linkup = 1;
   12335 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12336 			device_xname(sc->sc_dev),
   12337 			__func__));
   12338 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12339 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12340 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12341 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   12342 			device_xname(sc->sc_dev), __func__));
   12343 	} else {
   12344 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12345 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12346 			status));
   12347 	}
   12348 
   12349 	return 0;
   12350 }
   12351 
   12352 /*
   12353  * wm_tbi_tick:
   12354  *
   12355  *	Check the link on TBI devices.
   12356  *	This function acts as mii_tick().
   12357  */
   12358 static void
   12359 wm_tbi_tick(struct wm_softc *sc)
   12360 {
   12361 	struct mii_data *mii = &sc->sc_mii;
   12362 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12363 	uint32_t status;
   12364 
   12365 	KASSERT(WM_CORE_LOCKED(sc));
   12366 
   12367 	status = CSR_READ(sc, WMREG_STATUS);
   12368 
   12369 	/* XXX is this needed? */
   12370 	(void)CSR_READ(sc, WMREG_RXCW);
   12371 	(void)CSR_READ(sc, WMREG_CTRL);
   12372 
   12373 	/* set link status */
   12374 	if ((status & STATUS_LU) == 0) {
   12375 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12376 			device_xname(sc->sc_dev)));
   12377 		sc->sc_tbi_linkup = 0;
   12378 	} else if (sc->sc_tbi_linkup == 0) {
   12379 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12380 			device_xname(sc->sc_dev),
   12381 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12382 		sc->sc_tbi_linkup = 1;
   12383 		sc->sc_tbi_serdes_ticks = 0;
   12384 	}
   12385 
   12386 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12387 		goto setled;
   12388 
   12389 	if ((status & STATUS_LU) == 0) {
   12390 		sc->sc_tbi_linkup = 0;
   12391 		/* If the timer expired, retry autonegotiation */
   12392 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12393 		    && (++sc->sc_tbi_serdes_ticks
   12394 			>= sc->sc_tbi_serdes_anegticks)) {
   12395 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12396 				device_xname(sc->sc_dev), __func__));
   12397 			sc->sc_tbi_serdes_ticks = 0;
   12398 			/*
   12399 			 * Reset the link, and let autonegotiation do
   12400 			 * its thing
   12401 			 */
   12402 			sc->sc_ctrl |= CTRL_LRST;
   12403 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12404 			CSR_WRITE_FLUSH(sc);
   12405 			delay(1000);
   12406 			sc->sc_ctrl &= ~CTRL_LRST;
   12407 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12408 			CSR_WRITE_FLUSH(sc);
   12409 			delay(1000);
   12410 			CSR_WRITE(sc, WMREG_TXCW,
   12411 			    sc->sc_txcw & ~TXCW_ANE);
   12412 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12413 		}
   12414 	}
   12415 
   12416 setled:
   12417 	wm_tbi_serdes_set_linkled(sc);
   12418 }
   12419 
   12420 /* SERDES related */
   12421 static void
   12422 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12423 {
   12424 	uint32_t reg;
   12425 
   12426 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12427 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12428 		return;
   12429 
   12430 	/* Enable PCS to turn on link */
   12431 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12432 	reg |= PCS_CFG_PCS_EN;
   12433 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12434 
   12435 	/* Power up the laser */
   12436 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12437 	reg &= ~CTRL_EXT_SWDPIN(3);
   12438 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12439 
   12440 	/* Flush the write to verify completion */
   12441 	CSR_WRITE_FLUSH(sc);
   12442 	delay(1000);
   12443 }
   12444 
   12445 static int
   12446 wm_serdes_mediachange(struct ifnet *ifp)
   12447 {
   12448 	struct wm_softc *sc = ifp->if_softc;
   12449 	bool pcs_autoneg = true; /* XXX */
   12450 	uint32_t ctrl_ext, pcs_lctl, reg;
   12451 
   12452 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12453 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12454 		return 0;
   12455 
   12456 	/* XXX Currently, this function is not called on 8257[12] */
   12457 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12458 	    || (sc->sc_type >= WM_T_82575))
   12459 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12460 
   12461 	/* Power on the sfp cage if present */
   12462 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12463 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12464 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   12465 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12466 
   12467 	sc->sc_ctrl |= CTRL_SLU;
   12468 
   12469 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   12470 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12471 
   12472 		reg = CSR_READ(sc, WMREG_CONNSW);
   12473 		reg |= CONNSW_ENRGSRC;
   12474 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   12475 	}
   12476 
   12477 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12478 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12479 	case CTRL_EXT_LINK_MODE_SGMII:
   12480 		/* SGMII mode lets the phy handle forcing speed/duplex */
   12481 		pcs_autoneg = true;
   12482 		/* Autoneg time out should be disabled for SGMII mode */
   12483 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12484 		break;
   12485 	case CTRL_EXT_LINK_MODE_1000KX:
   12486 		pcs_autoneg = false;
   12487 		/* FALLTHROUGH */
   12488 	default:
   12489 		if ((sc->sc_type == WM_T_82575)
   12490 		    || (sc->sc_type == WM_T_82576)) {
   12491 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12492 				pcs_autoneg = false;
   12493 		}
   12494 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12495 		    | CTRL_FRCFDX;
   12496 
   12497 		/* Set speed of 1000/Full if speed/duplex is forced */
   12498 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12499 	}
   12500 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12501 
   12502 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   12503 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   12504 
   12505 	if (pcs_autoneg) {
   12506 		/* Set PCS register for autoneg */
   12507 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12508 
   12509 		/* Disable force flow control for autoneg */
   12510 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12511 
   12512 		/* Configure flow control advertisement for autoneg */
   12513 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12514 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12515 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12516 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12517 	} else
   12518 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12519 
   12520 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12521 
   12522 	return 0;
   12523 }
   12524 
   12525 static void
   12526 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12527 {
   12528 	struct wm_softc *sc = ifp->if_softc;
   12529 	struct mii_data *mii = &sc->sc_mii;
   12530 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12531 	uint32_t pcs_adv, pcs_lpab, reg;
   12532 
   12533 	ifmr->ifm_status = IFM_AVALID;
   12534 	ifmr->ifm_active = IFM_ETHER;
   12535 
   12536 	/* Check PCS */
   12537 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12538 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12539 		ifmr->ifm_active |= IFM_NONE;
   12540 		sc->sc_tbi_linkup = 0;
   12541 		goto setled;
   12542 	}
   12543 
   12544 	sc->sc_tbi_linkup = 1;
   12545 	ifmr->ifm_status |= IFM_ACTIVE;
   12546 	if (sc->sc_type == WM_T_I354) {
   12547 		uint32_t status;
   12548 
   12549 		status = CSR_READ(sc, WMREG_STATUS);
   12550 		if (((status & STATUS_2P5_SKU) != 0)
   12551 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12552 			ifmr->ifm_active |= IFM_2500_KX;
   12553 		} else
   12554 			ifmr->ifm_active |= IFM_1000_KX;
   12555 	} else {
   12556 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12557 		case PCS_LSTS_SPEED_10:
   12558 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12559 			break;
   12560 		case PCS_LSTS_SPEED_100:
   12561 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12562 			break;
   12563 		case PCS_LSTS_SPEED_1000:
   12564 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12565 			break;
   12566 		default:
   12567 			device_printf(sc->sc_dev, "Unknown speed\n");
   12568 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12569 			break;
   12570 		}
   12571 	}
   12572 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   12573 	if ((reg & PCS_LSTS_FDX) != 0)
   12574 		ifmr->ifm_active |= IFM_FDX;
   12575 	else
   12576 		ifmr->ifm_active |= IFM_HDX;
   12577 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12578 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12579 		/* Check flow */
   12580 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12581 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12582 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12583 			goto setled;
   12584 		}
   12585 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12586 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12587 		DPRINTF(WM_DEBUG_LINK,
   12588 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12589 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12590 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12591 			mii->mii_media_active |= IFM_FLOW
   12592 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12593 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12594 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12595 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12596 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12597 			mii->mii_media_active |= IFM_FLOW
   12598 			    | IFM_ETH_TXPAUSE;
   12599 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12600 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12601 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12602 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12603 			mii->mii_media_active |= IFM_FLOW
   12604 			    | IFM_ETH_RXPAUSE;
   12605 		}
   12606 	}
   12607 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12608 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12609 setled:
   12610 	wm_tbi_serdes_set_linkled(sc);
   12611 }
   12612 
   12613 /*
   12614  * wm_serdes_tick:
   12615  *
   12616  *	Check the link on serdes devices.
   12617  */
   12618 static void
   12619 wm_serdes_tick(struct wm_softc *sc)
   12620 {
   12621 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12622 	struct mii_data *mii = &sc->sc_mii;
   12623 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12624 	uint32_t reg;
   12625 
   12626 	KASSERT(WM_CORE_LOCKED(sc));
   12627 
   12628 	mii->mii_media_status = IFM_AVALID;
   12629 	mii->mii_media_active = IFM_ETHER;
   12630 
   12631 	/* Check PCS */
   12632 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12633 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12634 		mii->mii_media_status |= IFM_ACTIVE;
   12635 		sc->sc_tbi_linkup = 1;
   12636 		sc->sc_tbi_serdes_ticks = 0;
   12637 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12638 		if ((reg & PCS_LSTS_FDX) != 0)
   12639 			mii->mii_media_active |= IFM_FDX;
   12640 		else
   12641 			mii->mii_media_active |= IFM_HDX;
   12642 	} else {
   12643 		mii->mii_media_status |= IFM_NONE;
   12644 		sc->sc_tbi_linkup = 0;
   12645 		/* If the timer expired, retry autonegotiation */
   12646 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12647 		    && (++sc->sc_tbi_serdes_ticks
   12648 			>= sc->sc_tbi_serdes_anegticks)) {
   12649 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12650 				device_xname(sc->sc_dev), __func__));
   12651 			sc->sc_tbi_serdes_ticks = 0;
   12652 			/* XXX */
   12653 			wm_serdes_mediachange(ifp);
   12654 		}
   12655 	}
   12656 
   12657 	wm_tbi_serdes_set_linkled(sc);
   12658 }
   12659 
   12660 /* SFP related */
   12661 
   12662 static int
   12663 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12664 {
   12665 	uint32_t i2ccmd;
   12666 	int i;
   12667 
   12668 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12669 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12670 
   12671 	/* Poll the ready bit */
   12672 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12673 		delay(50);
   12674 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12675 		if (i2ccmd & I2CCMD_READY)
   12676 			break;
   12677 	}
   12678 	if ((i2ccmd & I2CCMD_READY) == 0)
   12679 		return -1;
   12680 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12681 		return -1;
   12682 
   12683 	*data = i2ccmd & 0x00ff;
   12684 
   12685 	return 0;
   12686 }
   12687 
   12688 static uint32_t
   12689 wm_sfp_get_media_type(struct wm_softc *sc)
   12690 {
   12691 	uint32_t ctrl_ext;
   12692 	uint8_t val = 0;
   12693 	int timeout = 3;
   12694 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12695 	int rv = -1;
   12696 
   12697 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12698 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12699 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12700 	CSR_WRITE_FLUSH(sc);
   12701 
   12702 	/* Read SFP module data */
   12703 	while (timeout) {
   12704 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12705 		if (rv == 0)
   12706 			break;
   12707 		delay(100*1000); /* XXX too big */
   12708 		timeout--;
   12709 	}
   12710 	if (rv != 0)
   12711 		goto out;
   12712 
   12713 	switch (val) {
   12714 	case SFF_SFP_ID_SFF:
   12715 		aprint_normal_dev(sc->sc_dev,
   12716 		    "Module/Connector soldered to board\n");
   12717 		break;
   12718 	case SFF_SFP_ID_SFP:
   12719 		sc->sc_flags |= WM_F_SFP;
   12720 		break;
   12721 	case SFF_SFP_ID_UNKNOWN:
   12722 		goto out;
   12723 	default:
   12724 		break;
   12725 	}
   12726 
   12727 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12728 	if (rv != 0)
   12729 		goto out;
   12730 
   12731 	sc->sc_sfptype = val;
   12732 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12733 		mediatype = WM_MEDIATYPE_SERDES;
   12734 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12735 		sc->sc_flags |= WM_F_SGMII;
   12736 		mediatype = WM_MEDIATYPE_COPPER;
   12737 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12738 		sc->sc_flags |= WM_F_SGMII;
   12739 		mediatype = WM_MEDIATYPE_SERDES;
   12740 	} else {
   12741 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   12742 		    __func__, sc->sc_sfptype);
   12743 		sc->sc_sfptype = 0; /* XXX unknown */
   12744 	}
   12745 
   12746 out:
   12747 	/* Restore I2C interface setting */
   12748 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12749 
   12750 	return mediatype;
   12751 }
   12752 
   12753 /*
   12754  * NVM related.
   12755  * Microwire, SPI (w/wo EERD) and Flash.
   12756  */
   12757 
   12758 /* Both spi and uwire */
   12759 
   12760 /*
   12761  * wm_eeprom_sendbits:
   12762  *
   12763  *	Send a series of bits to the EEPROM.
   12764  */
   12765 static void
   12766 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12767 {
   12768 	uint32_t reg;
   12769 	int x;
   12770 
   12771 	reg = CSR_READ(sc, WMREG_EECD);
   12772 
   12773 	for (x = nbits; x > 0; x--) {
   12774 		if (bits & (1U << (x - 1)))
   12775 			reg |= EECD_DI;
   12776 		else
   12777 			reg &= ~EECD_DI;
   12778 		CSR_WRITE(sc, WMREG_EECD, reg);
   12779 		CSR_WRITE_FLUSH(sc);
   12780 		delay(2);
   12781 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12782 		CSR_WRITE_FLUSH(sc);
   12783 		delay(2);
   12784 		CSR_WRITE(sc, WMREG_EECD, reg);
   12785 		CSR_WRITE_FLUSH(sc);
   12786 		delay(2);
   12787 	}
   12788 }
   12789 
   12790 /*
   12791  * wm_eeprom_recvbits:
   12792  *
   12793  *	Receive a series of bits from the EEPROM.
   12794  */
   12795 static void
   12796 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12797 {
   12798 	uint32_t reg, val;
   12799 	int x;
   12800 
   12801 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12802 
   12803 	val = 0;
   12804 	for (x = nbits; x > 0; x--) {
   12805 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12806 		CSR_WRITE_FLUSH(sc);
   12807 		delay(2);
   12808 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12809 			val |= (1U << (x - 1));
   12810 		CSR_WRITE(sc, WMREG_EECD, reg);
   12811 		CSR_WRITE_FLUSH(sc);
   12812 		delay(2);
   12813 	}
   12814 	*valp = val;
   12815 }
   12816 
   12817 /* Microwire */
   12818 
   12819 /*
   12820  * wm_nvm_read_uwire:
   12821  *
   12822  *	Read a word from the EEPROM using the MicroWire protocol.
   12823  */
   12824 static int
   12825 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12826 {
   12827 	uint32_t reg, val;
   12828 	int i;
   12829 
   12830 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12831 		device_xname(sc->sc_dev), __func__));
   12832 
   12833 	if (sc->nvm.acquire(sc) != 0)
   12834 		return -1;
   12835 
   12836 	for (i = 0; i < wordcnt; i++) {
   12837 		/* Clear SK and DI. */
   12838 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12839 		CSR_WRITE(sc, WMREG_EECD, reg);
   12840 
   12841 		/*
   12842 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12843 		 * and Xen.
   12844 		 *
   12845 		 * We use this workaround only for 82540 because qemu's
   12846 		 * e1000 act as 82540.
   12847 		 */
   12848 		if (sc->sc_type == WM_T_82540) {
   12849 			reg |= EECD_SK;
   12850 			CSR_WRITE(sc, WMREG_EECD, reg);
   12851 			reg &= ~EECD_SK;
   12852 			CSR_WRITE(sc, WMREG_EECD, reg);
   12853 			CSR_WRITE_FLUSH(sc);
   12854 			delay(2);
   12855 		}
   12856 		/* XXX: end of workaround */
   12857 
   12858 		/* Set CHIP SELECT. */
   12859 		reg |= EECD_CS;
   12860 		CSR_WRITE(sc, WMREG_EECD, reg);
   12861 		CSR_WRITE_FLUSH(sc);
   12862 		delay(2);
   12863 
   12864 		/* Shift in the READ command. */
   12865 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12866 
   12867 		/* Shift in address. */
   12868 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12869 
   12870 		/* Shift out the data. */
   12871 		wm_eeprom_recvbits(sc, &val, 16);
   12872 		data[i] = val & 0xffff;
   12873 
   12874 		/* Clear CHIP SELECT. */
   12875 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12876 		CSR_WRITE(sc, WMREG_EECD, reg);
   12877 		CSR_WRITE_FLUSH(sc);
   12878 		delay(2);
   12879 	}
   12880 
   12881 	sc->nvm.release(sc);
   12882 	return 0;
   12883 }
   12884 
   12885 /* SPI */
   12886 
   12887 /*
   12888  * Set SPI and FLASH related information from the EECD register.
   12889  * For 82541 and 82547, the word size is taken from EEPROM.
   12890  */
   12891 static int
   12892 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12893 {
   12894 	int size;
   12895 	uint32_t reg;
   12896 	uint16_t data;
   12897 
   12898 	reg = CSR_READ(sc, WMREG_EECD);
   12899 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12900 
   12901 	/* Read the size of NVM from EECD by default */
   12902 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12903 	switch (sc->sc_type) {
   12904 	case WM_T_82541:
   12905 	case WM_T_82541_2:
   12906 	case WM_T_82547:
   12907 	case WM_T_82547_2:
   12908 		/* Set dummy value to access EEPROM */
   12909 		sc->sc_nvm_wordsize = 64;
   12910 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12911 			aprint_error_dev(sc->sc_dev,
   12912 			    "%s: failed to read EEPROM size\n", __func__);
   12913 		}
   12914 		reg = data;
   12915 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12916 		if (size == 0)
   12917 			size = 6; /* 64 word size */
   12918 		else
   12919 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12920 		break;
   12921 	case WM_T_80003:
   12922 	case WM_T_82571:
   12923 	case WM_T_82572:
   12924 	case WM_T_82573: /* SPI case */
   12925 	case WM_T_82574: /* SPI case */
   12926 	case WM_T_82583: /* SPI case */
   12927 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12928 		if (size > 14)
   12929 			size = 14;
   12930 		break;
   12931 	case WM_T_82575:
   12932 	case WM_T_82576:
   12933 	case WM_T_82580:
   12934 	case WM_T_I350:
   12935 	case WM_T_I354:
   12936 	case WM_T_I210:
   12937 	case WM_T_I211:
   12938 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12939 		if (size > 15)
   12940 			size = 15;
   12941 		break;
   12942 	default:
   12943 		aprint_error_dev(sc->sc_dev,
   12944 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12945 		return -1;
   12946 		break;
   12947 	}
   12948 
   12949 	sc->sc_nvm_wordsize = 1 << size;
   12950 
   12951 	return 0;
   12952 }
   12953 
   12954 /*
   12955  * wm_nvm_ready_spi:
   12956  *
   12957  *	Wait for a SPI EEPROM to be ready for commands.
   12958  */
   12959 static int
   12960 wm_nvm_ready_spi(struct wm_softc *sc)
   12961 {
   12962 	uint32_t val;
   12963 	int usec;
   12964 
   12965 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12966 		device_xname(sc->sc_dev), __func__));
   12967 
   12968 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12969 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12970 		wm_eeprom_recvbits(sc, &val, 8);
   12971 		if ((val & SPI_SR_RDY) == 0)
   12972 			break;
   12973 	}
   12974 	if (usec >= SPI_MAX_RETRIES) {
   12975 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12976 		return -1;
   12977 	}
   12978 	return 0;
   12979 }
   12980 
   12981 /*
   12982  * wm_nvm_read_spi:
   12983  *
   12984  *	Read a work from the EEPROM using the SPI protocol.
   12985  */
   12986 static int
   12987 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12988 {
   12989 	uint32_t reg, val;
   12990 	int i;
   12991 	uint8_t opc;
   12992 	int rv = 0;
   12993 
   12994 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12995 		device_xname(sc->sc_dev), __func__));
   12996 
   12997 	if (sc->nvm.acquire(sc) != 0)
   12998 		return -1;
   12999 
   13000 	/* Clear SK and CS. */
   13001 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   13002 	CSR_WRITE(sc, WMREG_EECD, reg);
   13003 	CSR_WRITE_FLUSH(sc);
   13004 	delay(2);
   13005 
   13006 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   13007 		goto out;
   13008 
   13009 	/* Toggle CS to flush commands. */
   13010 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   13011 	CSR_WRITE_FLUSH(sc);
   13012 	delay(2);
   13013 	CSR_WRITE(sc, WMREG_EECD, reg);
   13014 	CSR_WRITE_FLUSH(sc);
   13015 	delay(2);
   13016 
   13017 	opc = SPI_OPC_READ;
   13018 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13019 		opc |= SPI_OPC_A8;
   13020 
   13021 	wm_eeprom_sendbits(sc, opc, 8);
   13022 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13023 
   13024 	for (i = 0; i < wordcnt; i++) {
   13025 		wm_eeprom_recvbits(sc, &val, 16);
   13026 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13027 	}
   13028 
   13029 	/* Raise CS and clear SK. */
   13030 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13031 	CSR_WRITE(sc, WMREG_EECD, reg);
   13032 	CSR_WRITE_FLUSH(sc);
   13033 	delay(2);
   13034 
   13035 out:
   13036 	sc->nvm.release(sc);
   13037 	return rv;
   13038 }
   13039 
   13040 /* Using with EERD */
   13041 
   13042 static int
   13043 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13044 {
   13045 	uint32_t attempts = 100000;
   13046 	uint32_t i, reg = 0;
   13047 	int32_t done = -1;
   13048 
   13049 	for (i = 0; i < attempts; i++) {
   13050 		reg = CSR_READ(sc, rw);
   13051 
   13052 		if (reg & EERD_DONE) {
   13053 			done = 0;
   13054 			break;
   13055 		}
   13056 		delay(5);
   13057 	}
   13058 
   13059 	return done;
   13060 }
   13061 
   13062 static int
   13063 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13064 {
   13065 	int i, eerd = 0;
   13066 	int rv = 0;
   13067 
   13068 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13069 		device_xname(sc->sc_dev), __func__));
   13070 
   13071 	if (sc->nvm.acquire(sc) != 0)
   13072 		return -1;
   13073 
   13074 	for (i = 0; i < wordcnt; i++) {
   13075 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13076 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13077 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13078 		if (rv != 0) {
   13079 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13080 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13081 			break;
   13082 		}
   13083 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13084 	}
   13085 
   13086 	sc->nvm.release(sc);
   13087 	return rv;
   13088 }
   13089 
   13090 /* Flash */
   13091 
   13092 static int
   13093 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13094 {
   13095 	uint32_t eecd;
   13096 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13097 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13098 	uint32_t nvm_dword = 0;
   13099 	uint8_t sig_byte = 0;
   13100 	int rv;
   13101 
   13102 	switch (sc->sc_type) {
   13103 	case WM_T_PCH_SPT:
   13104 	case WM_T_PCH_CNP:
   13105 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13106 		act_offset = ICH_NVM_SIG_WORD * 2;
   13107 
   13108 		/* Set bank to 0 in case flash read fails. */
   13109 		*bank = 0;
   13110 
   13111 		/* Check bank 0 */
   13112 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13113 		if (rv != 0)
   13114 			return rv;
   13115 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13116 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13117 			*bank = 0;
   13118 			return 0;
   13119 		}
   13120 
   13121 		/* Check bank 1 */
   13122 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13123 		    &nvm_dword);
   13124 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13125 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13126 			*bank = 1;
   13127 			return 0;
   13128 		}
   13129 		aprint_error_dev(sc->sc_dev,
   13130 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13131 		return -1;
   13132 	case WM_T_ICH8:
   13133 	case WM_T_ICH9:
   13134 		eecd = CSR_READ(sc, WMREG_EECD);
   13135 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13136 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13137 			return 0;
   13138 		}
   13139 		/* FALLTHROUGH */
   13140 	default:
   13141 		/* Default to 0 */
   13142 		*bank = 0;
   13143 
   13144 		/* Check bank 0 */
   13145 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13146 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13147 			*bank = 0;
   13148 			return 0;
   13149 		}
   13150 
   13151 		/* Check bank 1 */
   13152 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13153 		    &sig_byte);
   13154 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13155 			*bank = 1;
   13156 			return 0;
   13157 		}
   13158 	}
   13159 
   13160 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13161 		device_xname(sc->sc_dev)));
   13162 	return -1;
   13163 }
   13164 
   13165 /******************************************************************************
   13166  * This function does initial flash setup so that a new read/write/erase cycle
   13167  * can be started.
   13168  *
   13169  * sc - The pointer to the hw structure
   13170  ****************************************************************************/
   13171 static int32_t
   13172 wm_ich8_cycle_init(struct wm_softc *sc)
   13173 {
   13174 	uint16_t hsfsts;
   13175 	int32_t error = 1;
   13176 	int32_t i     = 0;
   13177 
   13178 	if (sc->sc_type >= WM_T_PCH_SPT)
   13179 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13180 	else
   13181 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13182 
   13183 	/* May be check the Flash Des Valid bit in Hw status */
   13184 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13185 		return error;
   13186 
   13187 	/* Clear FCERR in Hw status by writing 1 */
   13188 	/* Clear DAEL in Hw status by writing a 1 */
   13189 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13190 
   13191 	if (sc->sc_type >= WM_T_PCH_SPT)
   13192 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13193 	else
   13194 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13195 
   13196 	/*
   13197 	 * Either we should have a hardware SPI cycle in progress bit to check
   13198 	 * against, in order to start a new cycle or FDONE bit should be
   13199 	 * changed in the hardware so that it is 1 after hardware reset, which
   13200 	 * can then be used as an indication whether a cycle is in progress or
   13201 	 * has been completed .. we should also have some software semaphore
   13202 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13203 	 * threads access to those bits can be sequentiallized or a way so that
   13204 	 * 2 threads don't start the cycle at the same time
   13205 	 */
   13206 
   13207 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13208 		/*
   13209 		 * There is no cycle running at present, so we can start a
   13210 		 * cycle
   13211 		 */
   13212 
   13213 		/* Begin by setting Flash Cycle Done. */
   13214 		hsfsts |= HSFSTS_DONE;
   13215 		if (sc->sc_type >= WM_T_PCH_SPT)
   13216 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13217 			    hsfsts & 0xffffUL);
   13218 		else
   13219 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13220 		error = 0;
   13221 	} else {
   13222 		/*
   13223 		 * Otherwise poll for sometime so the current cycle has a
   13224 		 * chance to end before giving up.
   13225 		 */
   13226 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   13227 			if (sc->sc_type >= WM_T_PCH_SPT)
   13228 				hsfsts = ICH8_FLASH_READ32(sc,
   13229 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13230 			else
   13231 				hsfsts = ICH8_FLASH_READ16(sc,
   13232 				    ICH_FLASH_HSFSTS);
   13233 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13234 				error = 0;
   13235 				break;
   13236 			}
   13237 			delay(1);
   13238 		}
   13239 		if (error == 0) {
   13240 			/*
   13241 			 * Successful in waiting for previous cycle to timeout,
   13242 			 * now set the Flash Cycle Done.
   13243 			 */
   13244 			hsfsts |= HSFSTS_DONE;
   13245 			if (sc->sc_type >= WM_T_PCH_SPT)
   13246 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13247 				    hsfsts & 0xffffUL);
   13248 			else
   13249 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   13250 				    hsfsts);
   13251 		}
   13252 	}
   13253 	return error;
   13254 }
   13255 
   13256 /******************************************************************************
   13257  * This function starts a flash cycle and waits for its completion
   13258  *
   13259  * sc - The pointer to the hw structure
   13260  ****************************************************************************/
   13261 static int32_t
   13262 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   13263 {
   13264 	uint16_t hsflctl;
   13265 	uint16_t hsfsts;
   13266 	int32_t error = 1;
   13267 	uint32_t i = 0;
   13268 
   13269 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   13270 	if (sc->sc_type >= WM_T_PCH_SPT)
   13271 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   13272 	else
   13273 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13274 	hsflctl |= HSFCTL_GO;
   13275 	if (sc->sc_type >= WM_T_PCH_SPT)
   13276 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13277 		    (uint32_t)hsflctl << 16);
   13278 	else
   13279 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13280 
   13281 	/* Wait till FDONE bit is set to 1 */
   13282 	do {
   13283 		if (sc->sc_type >= WM_T_PCH_SPT)
   13284 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13285 			    & 0xffffUL;
   13286 		else
   13287 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13288 		if (hsfsts & HSFSTS_DONE)
   13289 			break;
   13290 		delay(1);
   13291 		i++;
   13292 	} while (i < timeout);
   13293 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   13294 		error = 0;
   13295 
   13296 	return error;
   13297 }
   13298 
   13299 /******************************************************************************
   13300  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   13301  *
   13302  * sc - The pointer to the hw structure
   13303  * index - The index of the byte or word to read.
   13304  * size - Size of data to read, 1=byte 2=word, 4=dword
   13305  * data - Pointer to the word to store the value read.
   13306  *****************************************************************************/
   13307 static int32_t
   13308 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   13309     uint32_t size, uint32_t *data)
   13310 {
   13311 	uint16_t hsfsts;
   13312 	uint16_t hsflctl;
   13313 	uint32_t flash_linear_address;
   13314 	uint32_t flash_data = 0;
   13315 	int32_t error = 1;
   13316 	int32_t count = 0;
   13317 
   13318 	if (size < 1  || size > 4 || data == 0x0 ||
   13319 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   13320 		return error;
   13321 
   13322 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   13323 	    sc->sc_ich8_flash_base;
   13324 
   13325 	do {
   13326 		delay(1);
   13327 		/* Steps */
   13328 		error = wm_ich8_cycle_init(sc);
   13329 		if (error)
   13330 			break;
   13331 
   13332 		if (sc->sc_type >= WM_T_PCH_SPT)
   13333 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13334 			    >> 16;
   13335 		else
   13336 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13337 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13338 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13339 		    & HSFCTL_BCOUNT_MASK;
   13340 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13341 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13342 			/*
   13343 			 * In SPT, This register is in Lan memory space, not
   13344 			 * flash. Therefore, only 32 bit access is supported.
   13345 			 */
   13346 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13347 			    (uint32_t)hsflctl << 16);
   13348 		} else
   13349 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13350 
   13351 		/*
   13352 		 * Write the last 24 bits of index into Flash Linear address
   13353 		 * field in Flash Address
   13354 		 */
   13355 		/* TODO: TBD maybe check the index against the size of flash */
   13356 
   13357 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13358 
   13359 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13360 
   13361 		/*
   13362 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13363 		 * the whole sequence a few more times, else read in (shift in)
   13364 		 * the Flash Data0, the order is least significant byte first
   13365 		 * msb to lsb
   13366 		 */
   13367 		if (error == 0) {
   13368 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13369 			if (size == 1)
   13370 				*data = (uint8_t)(flash_data & 0x000000FF);
   13371 			else if (size == 2)
   13372 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13373 			else if (size == 4)
   13374 				*data = (uint32_t)flash_data;
   13375 			break;
   13376 		} else {
   13377 			/*
   13378 			 * If we've gotten here, then things are probably
   13379 			 * completely hosed, but if the error condition is
   13380 			 * detected, it won't hurt to give it another try...
   13381 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13382 			 */
   13383 			if (sc->sc_type >= WM_T_PCH_SPT)
   13384 				hsfsts = ICH8_FLASH_READ32(sc,
   13385 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13386 			else
   13387 				hsfsts = ICH8_FLASH_READ16(sc,
   13388 				    ICH_FLASH_HSFSTS);
   13389 
   13390 			if (hsfsts & HSFSTS_ERR) {
   13391 				/* Repeat for some time before giving up. */
   13392 				continue;
   13393 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13394 				break;
   13395 		}
   13396 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13397 
   13398 	return error;
   13399 }
   13400 
   13401 /******************************************************************************
   13402  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13403  *
   13404  * sc - pointer to wm_hw structure
   13405  * index - The index of the byte to read.
   13406  * data - Pointer to a byte to store the value read.
   13407  *****************************************************************************/
   13408 static int32_t
   13409 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13410 {
   13411 	int32_t status;
   13412 	uint32_t word = 0;
   13413 
   13414 	status = wm_read_ich8_data(sc, index, 1, &word);
   13415 	if (status == 0)
   13416 		*data = (uint8_t)word;
   13417 	else
   13418 		*data = 0;
   13419 
   13420 	return status;
   13421 }
   13422 
   13423 /******************************************************************************
   13424  * Reads a word from the NVM using the ICH8 flash access registers.
   13425  *
   13426  * sc - pointer to wm_hw structure
   13427  * index - The starting byte index of the word to read.
   13428  * data - Pointer to a word to store the value read.
   13429  *****************************************************************************/
   13430 static int32_t
   13431 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13432 {
   13433 	int32_t status;
   13434 	uint32_t word = 0;
   13435 
   13436 	status = wm_read_ich8_data(sc, index, 2, &word);
   13437 	if (status == 0)
   13438 		*data = (uint16_t)word;
   13439 	else
   13440 		*data = 0;
   13441 
   13442 	return status;
   13443 }
   13444 
   13445 /******************************************************************************
   13446  * Reads a dword from the NVM using the ICH8 flash access registers.
   13447  *
   13448  * sc - pointer to wm_hw structure
   13449  * index - The starting byte index of the word to read.
   13450  * data - Pointer to a word to store the value read.
   13451  *****************************************************************************/
   13452 static int32_t
   13453 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13454 {
   13455 	int32_t status;
   13456 
   13457 	status = wm_read_ich8_data(sc, index, 4, data);
   13458 	return status;
   13459 }
   13460 
   13461 /******************************************************************************
   13462  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13463  * register.
   13464  *
   13465  * sc - Struct containing variables accessed by shared code
   13466  * offset - offset of word in the EEPROM to read
   13467  * data - word read from the EEPROM
   13468  * words - number of words to read
   13469  *****************************************************************************/
   13470 static int
   13471 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13472 {
   13473 	int32_t	 rv = 0;
   13474 	uint32_t flash_bank = 0;
   13475 	uint32_t act_offset = 0;
   13476 	uint32_t bank_offset = 0;
   13477 	uint16_t word = 0;
   13478 	uint16_t i = 0;
   13479 
   13480 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13481 		device_xname(sc->sc_dev), __func__));
   13482 
   13483 	if (sc->nvm.acquire(sc) != 0)
   13484 		return -1;
   13485 
   13486 	/*
   13487 	 * We need to know which is the valid flash bank.  In the event
   13488 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13489 	 * managing flash_bank. So it cannot be trusted and needs
   13490 	 * to be updated with each read.
   13491 	 */
   13492 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13493 	if (rv) {
   13494 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13495 			device_xname(sc->sc_dev)));
   13496 		flash_bank = 0;
   13497 	}
   13498 
   13499 	/*
   13500 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13501 	 * size
   13502 	 */
   13503 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13504 
   13505 	for (i = 0; i < words; i++) {
   13506 		/* The NVM part needs a byte offset, hence * 2 */
   13507 		act_offset = bank_offset + ((offset + i) * 2);
   13508 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13509 		if (rv) {
   13510 			aprint_error_dev(sc->sc_dev,
   13511 			    "%s: failed to read NVM\n", __func__);
   13512 			break;
   13513 		}
   13514 		data[i] = word;
   13515 	}
   13516 
   13517 	sc->nvm.release(sc);
   13518 	return rv;
   13519 }
   13520 
   13521 /******************************************************************************
   13522  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13523  * register.
   13524  *
   13525  * sc - Struct containing variables accessed by shared code
   13526  * offset - offset of word in the EEPROM to read
   13527  * data - word read from the EEPROM
   13528  * words - number of words to read
   13529  *****************************************************************************/
   13530 static int
   13531 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13532 {
   13533 	int32_t	 rv = 0;
   13534 	uint32_t flash_bank = 0;
   13535 	uint32_t act_offset = 0;
   13536 	uint32_t bank_offset = 0;
   13537 	uint32_t dword = 0;
   13538 	uint16_t i = 0;
   13539 
   13540 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13541 		device_xname(sc->sc_dev), __func__));
   13542 
   13543 	if (sc->nvm.acquire(sc) != 0)
   13544 		return -1;
   13545 
   13546 	/*
   13547 	 * We need to know which is the valid flash bank.  In the event
   13548 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13549 	 * managing flash_bank. So it cannot be trusted and needs
   13550 	 * to be updated with each read.
   13551 	 */
   13552 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13553 	if (rv) {
   13554 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13555 			device_xname(sc->sc_dev)));
   13556 		flash_bank = 0;
   13557 	}
   13558 
   13559 	/*
   13560 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13561 	 * size
   13562 	 */
   13563 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13564 
   13565 	for (i = 0; i < words; i++) {
   13566 		/* The NVM part needs a byte offset, hence * 2 */
   13567 		act_offset = bank_offset + ((offset + i) * 2);
   13568 		/* but we must read dword aligned, so mask ... */
   13569 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13570 		if (rv) {
   13571 			aprint_error_dev(sc->sc_dev,
   13572 			    "%s: failed to read NVM\n", __func__);
   13573 			break;
   13574 		}
   13575 		/* ... and pick out low or high word */
   13576 		if ((act_offset & 0x2) == 0)
   13577 			data[i] = (uint16_t)(dword & 0xFFFF);
   13578 		else
   13579 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13580 	}
   13581 
   13582 	sc->nvm.release(sc);
   13583 	return rv;
   13584 }
   13585 
   13586 /* iNVM */
   13587 
   13588 static int
   13589 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13590 {
   13591 	int32_t	 rv = 0;
   13592 	uint32_t invm_dword;
   13593 	uint16_t i;
   13594 	uint8_t record_type, word_address;
   13595 
   13596 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13597 		device_xname(sc->sc_dev), __func__));
   13598 
   13599 	for (i = 0; i < INVM_SIZE; i++) {
   13600 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13601 		/* Get record type */
   13602 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13603 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13604 			break;
   13605 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13606 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13607 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13608 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13609 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13610 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13611 			if (word_address == address) {
   13612 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13613 				rv = 0;
   13614 				break;
   13615 			}
   13616 		}
   13617 	}
   13618 
   13619 	return rv;
   13620 }
   13621 
   13622 static int
   13623 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13624 {
   13625 	int rv = 0;
   13626 	int i;
   13627 
   13628 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13629 		device_xname(sc->sc_dev), __func__));
   13630 
   13631 	if (sc->nvm.acquire(sc) != 0)
   13632 		return -1;
   13633 
   13634 	for (i = 0; i < words; i++) {
   13635 		switch (offset + i) {
   13636 		case NVM_OFF_MACADDR:
   13637 		case NVM_OFF_MACADDR1:
   13638 		case NVM_OFF_MACADDR2:
   13639 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13640 			if (rv != 0) {
   13641 				data[i] = 0xffff;
   13642 				rv = -1;
   13643 			}
   13644 			break;
   13645 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   13646 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13647 			if (rv != 0) {
   13648 				*data = INVM_DEFAULT_AL;
   13649 				rv = 0;
   13650 			}
   13651 			break;
   13652 		case NVM_OFF_CFG2:
   13653 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13654 			if (rv != 0) {
   13655 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13656 				rv = 0;
   13657 			}
   13658 			break;
   13659 		case NVM_OFF_CFG4:
   13660 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13661 			if (rv != 0) {
   13662 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13663 				rv = 0;
   13664 			}
   13665 			break;
   13666 		case NVM_OFF_LED_1_CFG:
   13667 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13668 			if (rv != 0) {
   13669 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13670 				rv = 0;
   13671 			}
   13672 			break;
   13673 		case NVM_OFF_LED_0_2_CFG:
   13674 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13675 			if (rv != 0) {
   13676 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13677 				rv = 0;
   13678 			}
   13679 			break;
   13680 		case NVM_OFF_ID_LED_SETTINGS:
   13681 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13682 			if (rv != 0) {
   13683 				*data = ID_LED_RESERVED_FFFF;
   13684 				rv = 0;
   13685 			}
   13686 			break;
   13687 		default:
   13688 			DPRINTF(WM_DEBUG_NVM,
   13689 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13690 			*data = NVM_RESERVED_WORD;
   13691 			break;
   13692 		}
   13693 	}
   13694 
   13695 	sc->nvm.release(sc);
   13696 	return rv;
   13697 }
   13698 
   13699 /* Lock, detecting NVM type, validate checksum, version and read */
   13700 
   13701 static int
   13702 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13703 {
   13704 	uint32_t eecd = 0;
   13705 
   13706 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13707 	    || sc->sc_type == WM_T_82583) {
   13708 		eecd = CSR_READ(sc, WMREG_EECD);
   13709 
   13710 		/* Isolate bits 15 & 16 */
   13711 		eecd = ((eecd >> 15) & 0x03);
   13712 
   13713 		/* If both bits are set, device is Flash type */
   13714 		if (eecd == 0x03)
   13715 			return 0;
   13716 	}
   13717 	return 1;
   13718 }
   13719 
   13720 static int
   13721 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13722 {
   13723 	uint32_t eec;
   13724 
   13725 	eec = CSR_READ(sc, WMREG_EEC);
   13726 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13727 		return 1;
   13728 
   13729 	return 0;
   13730 }
   13731 
   13732 /*
   13733  * wm_nvm_validate_checksum
   13734  *
   13735  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13736  */
   13737 static int
   13738 wm_nvm_validate_checksum(struct wm_softc *sc)
   13739 {
   13740 	uint16_t checksum;
   13741 	uint16_t eeprom_data;
   13742 #ifdef WM_DEBUG
   13743 	uint16_t csum_wordaddr, valid_checksum;
   13744 #endif
   13745 	int i;
   13746 
   13747 	checksum = 0;
   13748 
   13749 	/* Don't check for I211 */
   13750 	if (sc->sc_type == WM_T_I211)
   13751 		return 0;
   13752 
   13753 #ifdef WM_DEBUG
   13754 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13755 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13756 		csum_wordaddr = NVM_OFF_COMPAT;
   13757 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13758 	} else {
   13759 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13760 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13761 	}
   13762 
   13763 	/* Dump EEPROM image for debug */
   13764 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13765 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13766 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13767 		/* XXX PCH_SPT? */
   13768 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13769 		if ((eeprom_data & valid_checksum) == 0)
   13770 			DPRINTF(WM_DEBUG_NVM,
   13771 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13772 				device_xname(sc->sc_dev), eeprom_data,
   13773 				    valid_checksum));
   13774 	}
   13775 
   13776 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13777 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13778 		for (i = 0; i < NVM_SIZE; i++) {
   13779 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13780 				printf("XXXX ");
   13781 			else
   13782 				printf("%04hx ", eeprom_data);
   13783 			if (i % 8 == 7)
   13784 				printf("\n");
   13785 		}
   13786 	}
   13787 
   13788 #endif /* WM_DEBUG */
   13789 
   13790 	for (i = 0; i < NVM_SIZE; i++) {
   13791 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13792 			return 1;
   13793 		checksum += eeprom_data;
   13794 	}
   13795 
   13796 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13797 #ifdef WM_DEBUG
   13798 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13799 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13800 #endif
   13801 	}
   13802 
   13803 	return 0;
   13804 }
   13805 
   13806 static void
   13807 wm_nvm_version_invm(struct wm_softc *sc)
   13808 {
   13809 	uint32_t dword;
   13810 
   13811 	/*
   13812 	 * Linux's code to decode version is very strange, so we don't
   13813 	 * obey that algorithm and just use word 61 as the document.
   13814 	 * Perhaps it's not perfect though...
   13815 	 *
   13816 	 * Example:
   13817 	 *
   13818 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13819 	 */
   13820 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13821 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13822 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13823 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13824 }
   13825 
   13826 static void
   13827 wm_nvm_version(struct wm_softc *sc)
   13828 {
   13829 	uint16_t major, minor, build, patch;
   13830 	uint16_t uid0, uid1;
   13831 	uint16_t nvm_data;
   13832 	uint16_t off;
   13833 	bool check_version = false;
   13834 	bool check_optionrom = false;
   13835 	bool have_build = false;
   13836 	bool have_uid = true;
   13837 
   13838 	/*
   13839 	 * Version format:
   13840 	 *
   13841 	 * XYYZ
   13842 	 * X0YZ
   13843 	 * X0YY
   13844 	 *
   13845 	 * Example:
   13846 	 *
   13847 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13848 	 *	82571	0x50a6	5.10.6?
   13849 	 *	82572	0x506a	5.6.10?
   13850 	 *	82572EI	0x5069	5.6.9?
   13851 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13852 	 *		0x2013	2.1.3?
   13853 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   13854 	 * ICH8+82567	0x0040	0.4.0?
   13855 	 * ICH9+82566	0x1040	1.4.0?
   13856 	 *ICH10+82567	0x0043	0.4.3?
   13857 	 *  PCH+82577	0x00c1	0.12.1?
   13858 	 * PCH2+82579	0x00d3	0.13.3?
   13859 	 *		0x00d4	0.13.4?
   13860 	 *  LPT+I218	0x0023	0.2.3?
   13861 	 *  SPT+I219	0x0084	0.8.4?
   13862 	 *  CNP+I219	0x0054	0.5.4?
   13863 	 */
   13864 
   13865 	/*
   13866 	 * XXX
   13867 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13868 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13869 	 */
   13870 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13871 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13872 		have_uid = false;
   13873 
   13874 	switch (sc->sc_type) {
   13875 	case WM_T_82571:
   13876 	case WM_T_82572:
   13877 	case WM_T_82574:
   13878 	case WM_T_82583:
   13879 		check_version = true;
   13880 		check_optionrom = true;
   13881 		have_build = true;
   13882 		break;
   13883 	case WM_T_ICH8:
   13884 	case WM_T_ICH9:
   13885 	case WM_T_ICH10:
   13886 	case WM_T_PCH:
   13887 	case WM_T_PCH2:
   13888 	case WM_T_PCH_LPT:
   13889 	case WM_T_PCH_SPT:
   13890 	case WM_T_PCH_CNP:
   13891 		check_version = true;
   13892 		have_build = true;
   13893 		have_uid = false;
   13894 		break;
   13895 	case WM_T_82575:
   13896 	case WM_T_82576:
   13897 	case WM_T_82580:
   13898 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13899 			check_version = true;
   13900 		break;
   13901 	case WM_T_I211:
   13902 		wm_nvm_version_invm(sc);
   13903 		have_uid = false;
   13904 		goto printver;
   13905 	case WM_T_I210:
   13906 		if (!wm_nvm_flash_presence_i210(sc)) {
   13907 			wm_nvm_version_invm(sc);
   13908 			have_uid = false;
   13909 			goto printver;
   13910 		}
   13911 		/* FALLTHROUGH */
   13912 	case WM_T_I350:
   13913 	case WM_T_I354:
   13914 		check_version = true;
   13915 		check_optionrom = true;
   13916 		break;
   13917 	default:
   13918 		return;
   13919 	}
   13920 	if (check_version
   13921 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13922 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13923 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13924 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13925 			build = nvm_data & NVM_BUILD_MASK;
   13926 			have_build = true;
   13927 		} else
   13928 			minor = nvm_data & 0x00ff;
   13929 
   13930 		/* Decimal */
   13931 		minor = (minor / 16) * 10 + (minor % 16);
   13932 		sc->sc_nvm_ver_major = major;
   13933 		sc->sc_nvm_ver_minor = minor;
   13934 
   13935 printver:
   13936 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13937 		    sc->sc_nvm_ver_minor);
   13938 		if (have_build) {
   13939 			sc->sc_nvm_ver_build = build;
   13940 			aprint_verbose(".%d", build);
   13941 		}
   13942 	}
   13943 
   13944 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13945 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13946 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13947 		/* Option ROM Version */
   13948 		if ((off != 0x0000) && (off != 0xffff)) {
   13949 			int rv;
   13950 
   13951 			off += NVM_COMBO_VER_OFF;
   13952 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13953 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13954 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13955 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13956 				/* 16bits */
   13957 				major = uid0 >> 8;
   13958 				build = (uid0 << 8) | (uid1 >> 8);
   13959 				patch = uid1 & 0x00ff;
   13960 				aprint_verbose(", option ROM Version %d.%d.%d",
   13961 				    major, build, patch);
   13962 			}
   13963 		}
   13964 	}
   13965 
   13966 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13967 		aprint_verbose(", Image Unique ID %08x",
   13968 		    ((uint32_t)uid1 << 16) | uid0);
   13969 }
   13970 
   13971 /*
   13972  * wm_nvm_read:
   13973  *
   13974  *	Read data from the serial EEPROM.
   13975  */
   13976 static int
   13977 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13978 {
   13979 	int rv;
   13980 
   13981 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13982 		device_xname(sc->sc_dev), __func__));
   13983 
   13984 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13985 		return -1;
   13986 
   13987 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13988 
   13989 	return rv;
   13990 }
   13991 
   13992 /*
   13993  * Hardware semaphores.
   13994  * Very complexed...
   13995  */
   13996 
   13997 static int
   13998 wm_get_null(struct wm_softc *sc)
   13999 {
   14000 
   14001 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14002 		device_xname(sc->sc_dev), __func__));
   14003 	return 0;
   14004 }
   14005 
   14006 static void
   14007 wm_put_null(struct wm_softc *sc)
   14008 {
   14009 
   14010 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14011 		device_xname(sc->sc_dev), __func__));
   14012 	return;
   14013 }
   14014 
   14015 static int
   14016 wm_get_eecd(struct wm_softc *sc)
   14017 {
   14018 	uint32_t reg;
   14019 	int x;
   14020 
   14021 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14022 		device_xname(sc->sc_dev), __func__));
   14023 
   14024 	reg = CSR_READ(sc, WMREG_EECD);
   14025 
   14026 	/* Request EEPROM access. */
   14027 	reg |= EECD_EE_REQ;
   14028 	CSR_WRITE(sc, WMREG_EECD, reg);
   14029 
   14030 	/* ..and wait for it to be granted. */
   14031 	for (x = 0; x < 1000; x++) {
   14032 		reg = CSR_READ(sc, WMREG_EECD);
   14033 		if (reg & EECD_EE_GNT)
   14034 			break;
   14035 		delay(5);
   14036 	}
   14037 	if ((reg & EECD_EE_GNT) == 0) {
   14038 		aprint_error_dev(sc->sc_dev,
   14039 		    "could not acquire EEPROM GNT\n");
   14040 		reg &= ~EECD_EE_REQ;
   14041 		CSR_WRITE(sc, WMREG_EECD, reg);
   14042 		return -1;
   14043 	}
   14044 
   14045 	return 0;
   14046 }
   14047 
   14048 static void
   14049 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14050 {
   14051 
   14052 	*eecd |= EECD_SK;
   14053 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14054 	CSR_WRITE_FLUSH(sc);
   14055 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14056 		delay(1);
   14057 	else
   14058 		delay(50);
   14059 }
   14060 
   14061 static void
   14062 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14063 {
   14064 
   14065 	*eecd &= ~EECD_SK;
   14066 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14067 	CSR_WRITE_FLUSH(sc);
   14068 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14069 		delay(1);
   14070 	else
   14071 		delay(50);
   14072 }
   14073 
   14074 static void
   14075 wm_put_eecd(struct wm_softc *sc)
   14076 {
   14077 	uint32_t reg;
   14078 
   14079 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14080 		device_xname(sc->sc_dev), __func__));
   14081 
   14082 	/* Stop nvm */
   14083 	reg = CSR_READ(sc, WMREG_EECD);
   14084 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14085 		/* Pull CS high */
   14086 		reg |= EECD_CS;
   14087 		wm_nvm_eec_clock_lower(sc, &reg);
   14088 	} else {
   14089 		/* CS on Microwire is active-high */
   14090 		reg &= ~(EECD_CS | EECD_DI);
   14091 		CSR_WRITE(sc, WMREG_EECD, reg);
   14092 		wm_nvm_eec_clock_raise(sc, &reg);
   14093 		wm_nvm_eec_clock_lower(sc, &reg);
   14094 	}
   14095 
   14096 	reg = CSR_READ(sc, WMREG_EECD);
   14097 	reg &= ~EECD_EE_REQ;
   14098 	CSR_WRITE(sc, WMREG_EECD, reg);
   14099 
   14100 	return;
   14101 }
   14102 
   14103 /*
   14104  * Get hardware semaphore.
   14105  * Same as e1000_get_hw_semaphore_generic()
   14106  */
   14107 static int
   14108 wm_get_swsm_semaphore(struct wm_softc *sc)
   14109 {
   14110 	int32_t timeout;
   14111 	uint32_t swsm;
   14112 
   14113 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14114 		device_xname(sc->sc_dev), __func__));
   14115 	KASSERT(sc->sc_nvm_wordsize > 0);
   14116 
   14117 retry:
   14118 	/* Get the SW semaphore. */
   14119 	timeout = sc->sc_nvm_wordsize + 1;
   14120 	while (timeout) {
   14121 		swsm = CSR_READ(sc, WMREG_SWSM);
   14122 
   14123 		if ((swsm & SWSM_SMBI) == 0)
   14124 			break;
   14125 
   14126 		delay(50);
   14127 		timeout--;
   14128 	}
   14129 
   14130 	if (timeout == 0) {
   14131 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14132 			/*
   14133 			 * In rare circumstances, the SW semaphore may already
   14134 			 * be held unintentionally. Clear the semaphore once
   14135 			 * before giving up.
   14136 			 */
   14137 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14138 			wm_put_swsm_semaphore(sc);
   14139 			goto retry;
   14140 		}
   14141 		aprint_error_dev(sc->sc_dev,
   14142 		    "could not acquire SWSM SMBI\n");
   14143 		return 1;
   14144 	}
   14145 
   14146 	/* Get the FW semaphore. */
   14147 	timeout = sc->sc_nvm_wordsize + 1;
   14148 	while (timeout) {
   14149 		swsm = CSR_READ(sc, WMREG_SWSM);
   14150 		swsm |= SWSM_SWESMBI;
   14151 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14152 		/* If we managed to set the bit we got the semaphore. */
   14153 		swsm = CSR_READ(sc, WMREG_SWSM);
   14154 		if (swsm & SWSM_SWESMBI)
   14155 			break;
   14156 
   14157 		delay(50);
   14158 		timeout--;
   14159 	}
   14160 
   14161 	if (timeout == 0) {
   14162 		aprint_error_dev(sc->sc_dev,
   14163 		    "could not acquire SWSM SWESMBI\n");
   14164 		/* Release semaphores */
   14165 		wm_put_swsm_semaphore(sc);
   14166 		return 1;
   14167 	}
   14168 	return 0;
   14169 }
   14170 
   14171 /*
   14172  * Put hardware semaphore.
   14173  * Same as e1000_put_hw_semaphore_generic()
   14174  */
   14175 static void
   14176 wm_put_swsm_semaphore(struct wm_softc *sc)
   14177 {
   14178 	uint32_t swsm;
   14179 
   14180 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14181 		device_xname(sc->sc_dev), __func__));
   14182 
   14183 	swsm = CSR_READ(sc, WMREG_SWSM);
   14184 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14185 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14186 }
   14187 
   14188 /*
   14189  * Get SW/FW semaphore.
   14190  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14191  */
   14192 static int
   14193 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14194 {
   14195 	uint32_t swfw_sync;
   14196 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14197 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14198 	int timeout;
   14199 
   14200 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14201 		device_xname(sc->sc_dev), __func__));
   14202 
   14203 	if (sc->sc_type == WM_T_80003)
   14204 		timeout = 50;
   14205 	else
   14206 		timeout = 200;
   14207 
   14208 	while (timeout) {
   14209 		if (wm_get_swsm_semaphore(sc)) {
   14210 			aprint_error_dev(sc->sc_dev,
   14211 			    "%s: failed to get semaphore\n",
   14212 			    __func__);
   14213 			return 1;
   14214 		}
   14215 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14216 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14217 			swfw_sync |= swmask;
   14218 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14219 			wm_put_swsm_semaphore(sc);
   14220 			return 0;
   14221 		}
   14222 		wm_put_swsm_semaphore(sc);
   14223 		delay(5000);
   14224 		timeout--;
   14225 	}
   14226 	device_printf(sc->sc_dev,
   14227 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   14228 	    mask, swfw_sync);
   14229 	return 1;
   14230 }
   14231 
   14232 static void
   14233 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14234 {
   14235 	uint32_t swfw_sync;
   14236 
   14237 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14238 		device_xname(sc->sc_dev), __func__));
   14239 
   14240 	while (wm_get_swsm_semaphore(sc) != 0)
   14241 		continue;
   14242 
   14243 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14244 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   14245 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14246 
   14247 	wm_put_swsm_semaphore(sc);
   14248 }
   14249 
   14250 static int
   14251 wm_get_nvm_80003(struct wm_softc *sc)
   14252 {
   14253 	int rv;
   14254 
   14255 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14256 		device_xname(sc->sc_dev), __func__));
   14257 
   14258 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   14259 		aprint_error_dev(sc->sc_dev,
   14260 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   14261 		return rv;
   14262 	}
   14263 
   14264 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14265 	    && (rv = wm_get_eecd(sc)) != 0) {
   14266 		aprint_error_dev(sc->sc_dev,
   14267 		    "%s: failed to get semaphore(EECD)\n", __func__);
   14268 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14269 		return rv;
   14270 	}
   14271 
   14272 	return 0;
   14273 }
   14274 
   14275 static void
   14276 wm_put_nvm_80003(struct wm_softc *sc)
   14277 {
   14278 
   14279 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14280 		device_xname(sc->sc_dev), __func__));
   14281 
   14282 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14283 		wm_put_eecd(sc);
   14284 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14285 }
   14286 
   14287 static int
   14288 wm_get_nvm_82571(struct wm_softc *sc)
   14289 {
   14290 	int rv;
   14291 
   14292 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14293 		device_xname(sc->sc_dev), __func__));
   14294 
   14295 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   14296 		return rv;
   14297 
   14298 	switch (sc->sc_type) {
   14299 	case WM_T_82573:
   14300 		break;
   14301 	default:
   14302 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14303 			rv = wm_get_eecd(sc);
   14304 		break;
   14305 	}
   14306 
   14307 	if (rv != 0) {
   14308 		aprint_error_dev(sc->sc_dev,
   14309 		    "%s: failed to get semaphore\n",
   14310 		    __func__);
   14311 		wm_put_swsm_semaphore(sc);
   14312 	}
   14313 
   14314 	return rv;
   14315 }
   14316 
   14317 static void
   14318 wm_put_nvm_82571(struct wm_softc *sc)
   14319 {
   14320 
   14321 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14322 		device_xname(sc->sc_dev), __func__));
   14323 
   14324 	switch (sc->sc_type) {
   14325 	case WM_T_82573:
   14326 		break;
   14327 	default:
   14328 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14329 			wm_put_eecd(sc);
   14330 		break;
   14331 	}
   14332 
   14333 	wm_put_swsm_semaphore(sc);
   14334 }
   14335 
   14336 static int
   14337 wm_get_phy_82575(struct wm_softc *sc)
   14338 {
   14339 
   14340 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14341 		device_xname(sc->sc_dev), __func__));
   14342 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14343 }
   14344 
   14345 static void
   14346 wm_put_phy_82575(struct wm_softc *sc)
   14347 {
   14348 
   14349 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14350 		device_xname(sc->sc_dev), __func__));
   14351 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14352 }
   14353 
   14354 static int
   14355 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14356 {
   14357 	uint32_t ext_ctrl;
   14358 	int timeout = 200;
   14359 
   14360 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14361 		device_xname(sc->sc_dev), __func__));
   14362 
   14363 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14364 	for (timeout = 0; timeout < 200; timeout++) {
   14365 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14366 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14367 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14368 
   14369 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14370 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14371 			return 0;
   14372 		delay(5000);
   14373 	}
   14374 	device_printf(sc->sc_dev,
   14375 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14376 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14377 	return 1;
   14378 }
   14379 
   14380 static void
   14381 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14382 {
   14383 	uint32_t ext_ctrl;
   14384 
   14385 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14386 		device_xname(sc->sc_dev), __func__));
   14387 
   14388 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14389 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14390 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14391 
   14392 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14393 }
   14394 
   14395 static int
   14396 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14397 {
   14398 	uint32_t ext_ctrl;
   14399 	int timeout;
   14400 
   14401 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14402 		device_xname(sc->sc_dev), __func__));
   14403 	mutex_enter(sc->sc_ich_phymtx);
   14404 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14405 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14406 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14407 			break;
   14408 		delay(1000);
   14409 	}
   14410 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14411 		device_printf(sc->sc_dev,
   14412 		    "SW has already locked the resource\n");
   14413 		goto out;
   14414 	}
   14415 
   14416 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14417 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14418 	for (timeout = 0; timeout < 1000; timeout++) {
   14419 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14420 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14421 			break;
   14422 		delay(1000);
   14423 	}
   14424 	if (timeout >= 1000) {
   14425 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14426 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14427 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14428 		goto out;
   14429 	}
   14430 	return 0;
   14431 
   14432 out:
   14433 	mutex_exit(sc->sc_ich_phymtx);
   14434 	return 1;
   14435 }
   14436 
   14437 static void
   14438 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14439 {
   14440 	uint32_t ext_ctrl;
   14441 
   14442 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14443 		device_xname(sc->sc_dev), __func__));
   14444 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14445 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14446 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14447 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14448 	} else {
   14449 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14450 	}
   14451 
   14452 	mutex_exit(sc->sc_ich_phymtx);
   14453 }
   14454 
   14455 static int
   14456 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14457 {
   14458 
   14459 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14460 		device_xname(sc->sc_dev), __func__));
   14461 	mutex_enter(sc->sc_ich_nvmmtx);
   14462 
   14463 	return 0;
   14464 }
   14465 
   14466 static void
   14467 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14468 {
   14469 
   14470 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14471 		device_xname(sc->sc_dev), __func__));
   14472 	mutex_exit(sc->sc_ich_nvmmtx);
   14473 }
   14474 
   14475 static int
   14476 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14477 {
   14478 	int i = 0;
   14479 	uint32_t reg;
   14480 
   14481 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14482 		device_xname(sc->sc_dev), __func__));
   14483 
   14484 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14485 	do {
   14486 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14487 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14488 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14489 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14490 			break;
   14491 		delay(2*1000);
   14492 		i++;
   14493 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14494 
   14495 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14496 		wm_put_hw_semaphore_82573(sc);
   14497 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14498 		    device_xname(sc->sc_dev));
   14499 		return -1;
   14500 	}
   14501 
   14502 	return 0;
   14503 }
   14504 
   14505 static void
   14506 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14507 {
   14508 	uint32_t reg;
   14509 
   14510 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14511 		device_xname(sc->sc_dev), __func__));
   14512 
   14513 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14514 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14515 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14516 }
   14517 
   14518 /*
   14519  * Management mode and power management related subroutines.
   14520  * BMC, AMT, suspend/resume and EEE.
   14521  */
   14522 
   14523 #ifdef WM_WOL
   14524 static int
   14525 wm_check_mng_mode(struct wm_softc *sc)
   14526 {
   14527 	int rv;
   14528 
   14529 	switch (sc->sc_type) {
   14530 	case WM_T_ICH8:
   14531 	case WM_T_ICH9:
   14532 	case WM_T_ICH10:
   14533 	case WM_T_PCH:
   14534 	case WM_T_PCH2:
   14535 	case WM_T_PCH_LPT:
   14536 	case WM_T_PCH_SPT:
   14537 	case WM_T_PCH_CNP:
   14538 		rv = wm_check_mng_mode_ich8lan(sc);
   14539 		break;
   14540 	case WM_T_82574:
   14541 	case WM_T_82583:
   14542 		rv = wm_check_mng_mode_82574(sc);
   14543 		break;
   14544 	case WM_T_82571:
   14545 	case WM_T_82572:
   14546 	case WM_T_82573:
   14547 	case WM_T_80003:
   14548 		rv = wm_check_mng_mode_generic(sc);
   14549 		break;
   14550 	default:
   14551 		/* Noting to do */
   14552 		rv = 0;
   14553 		break;
   14554 	}
   14555 
   14556 	return rv;
   14557 }
   14558 
   14559 static int
   14560 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14561 {
   14562 	uint32_t fwsm;
   14563 
   14564 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14565 
   14566 	if (((fwsm & FWSM_FW_VALID) != 0)
   14567 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14568 		return 1;
   14569 
   14570 	return 0;
   14571 }
   14572 
   14573 static int
   14574 wm_check_mng_mode_82574(struct wm_softc *sc)
   14575 {
   14576 	uint16_t data;
   14577 
   14578 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14579 
   14580 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14581 		return 1;
   14582 
   14583 	return 0;
   14584 }
   14585 
   14586 static int
   14587 wm_check_mng_mode_generic(struct wm_softc *sc)
   14588 {
   14589 	uint32_t fwsm;
   14590 
   14591 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14592 
   14593 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14594 		return 1;
   14595 
   14596 	return 0;
   14597 }
   14598 #endif /* WM_WOL */
   14599 
   14600 static int
   14601 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14602 {
   14603 	uint32_t manc, fwsm, factps;
   14604 
   14605 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14606 		return 0;
   14607 
   14608 	manc = CSR_READ(sc, WMREG_MANC);
   14609 
   14610 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14611 		device_xname(sc->sc_dev), manc));
   14612 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14613 		return 0;
   14614 
   14615 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14616 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14617 		factps = CSR_READ(sc, WMREG_FACTPS);
   14618 		if (((factps & FACTPS_MNGCG) == 0)
   14619 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14620 			return 1;
   14621 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14622 		uint16_t data;
   14623 
   14624 		factps = CSR_READ(sc, WMREG_FACTPS);
   14625 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14626 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14627 			device_xname(sc->sc_dev), factps, data));
   14628 		if (((factps & FACTPS_MNGCG) == 0)
   14629 		    && ((data & NVM_CFG2_MNGM_MASK)
   14630 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14631 			return 1;
   14632 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14633 	    && ((manc & MANC_ASF_EN) == 0))
   14634 		return 1;
   14635 
   14636 	return 0;
   14637 }
   14638 
   14639 static bool
   14640 wm_phy_resetisblocked(struct wm_softc *sc)
   14641 {
   14642 	bool blocked = false;
   14643 	uint32_t reg;
   14644 	int i = 0;
   14645 
   14646 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14647 		device_xname(sc->sc_dev), __func__));
   14648 
   14649 	switch (sc->sc_type) {
   14650 	case WM_T_ICH8:
   14651 	case WM_T_ICH9:
   14652 	case WM_T_ICH10:
   14653 	case WM_T_PCH:
   14654 	case WM_T_PCH2:
   14655 	case WM_T_PCH_LPT:
   14656 	case WM_T_PCH_SPT:
   14657 	case WM_T_PCH_CNP:
   14658 		do {
   14659 			reg = CSR_READ(sc, WMREG_FWSM);
   14660 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14661 				blocked = true;
   14662 				delay(10*1000);
   14663 				continue;
   14664 			}
   14665 			blocked = false;
   14666 		} while (blocked && (i++ < 30));
   14667 		return blocked;
   14668 		break;
   14669 	case WM_T_82571:
   14670 	case WM_T_82572:
   14671 	case WM_T_82573:
   14672 	case WM_T_82574:
   14673 	case WM_T_82583:
   14674 	case WM_T_80003:
   14675 		reg = CSR_READ(sc, WMREG_MANC);
   14676 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14677 			return true;
   14678 		else
   14679 			return false;
   14680 		break;
   14681 	default:
   14682 		/* No problem */
   14683 		break;
   14684 	}
   14685 
   14686 	return false;
   14687 }
   14688 
   14689 static void
   14690 wm_get_hw_control(struct wm_softc *sc)
   14691 {
   14692 	uint32_t reg;
   14693 
   14694 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14695 		device_xname(sc->sc_dev), __func__));
   14696 
   14697 	if (sc->sc_type == WM_T_82573) {
   14698 		reg = CSR_READ(sc, WMREG_SWSM);
   14699 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14700 	} else if (sc->sc_type >= WM_T_82571) {
   14701 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14702 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14703 	}
   14704 }
   14705 
   14706 static void
   14707 wm_release_hw_control(struct wm_softc *sc)
   14708 {
   14709 	uint32_t reg;
   14710 
   14711 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14712 		device_xname(sc->sc_dev), __func__));
   14713 
   14714 	if (sc->sc_type == WM_T_82573) {
   14715 		reg = CSR_READ(sc, WMREG_SWSM);
   14716 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14717 	} else if (sc->sc_type >= WM_T_82571) {
   14718 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14719 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14720 	}
   14721 }
   14722 
   14723 static void
   14724 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14725 {
   14726 	uint32_t reg;
   14727 
   14728 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14729 		device_xname(sc->sc_dev), __func__));
   14730 
   14731 	if (sc->sc_type < WM_T_PCH2)
   14732 		return;
   14733 
   14734 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14735 
   14736 	if (gate)
   14737 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14738 	else
   14739 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14740 
   14741 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14742 }
   14743 
   14744 static int
   14745 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14746 {
   14747 	uint32_t fwsm, reg;
   14748 	int rv = 0;
   14749 
   14750 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14751 		device_xname(sc->sc_dev), __func__));
   14752 
   14753 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14754 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14755 
   14756 	/* Disable ULP */
   14757 	wm_ulp_disable(sc);
   14758 
   14759 	/* Acquire PHY semaphore */
   14760 	rv = sc->phy.acquire(sc);
   14761 	if (rv != 0) {
   14762 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14763 		device_xname(sc->sc_dev), __func__));
   14764 		return -1;
   14765 	}
   14766 
   14767 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14768 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14769 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14770 	 */
   14771 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14772 	switch (sc->sc_type) {
   14773 	case WM_T_PCH_LPT:
   14774 	case WM_T_PCH_SPT:
   14775 	case WM_T_PCH_CNP:
   14776 		if (wm_phy_is_accessible_pchlan(sc))
   14777 			break;
   14778 
   14779 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14780 		 * forcing MAC to SMBus mode first.
   14781 		 */
   14782 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14783 		reg |= CTRL_EXT_FORCE_SMBUS;
   14784 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14785 #if 0
   14786 		/* XXX Isn't this required??? */
   14787 		CSR_WRITE_FLUSH(sc);
   14788 #endif
   14789 		/* Wait 50 milliseconds for MAC to finish any retries
   14790 		 * that it might be trying to perform from previous
   14791 		 * attempts to acknowledge any phy read requests.
   14792 		 */
   14793 		delay(50 * 1000);
   14794 		/* FALLTHROUGH */
   14795 	case WM_T_PCH2:
   14796 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14797 			break;
   14798 		/* FALLTHROUGH */
   14799 	case WM_T_PCH:
   14800 		if (sc->sc_type == WM_T_PCH)
   14801 			if ((fwsm & FWSM_FW_VALID) != 0)
   14802 				break;
   14803 
   14804 		if (wm_phy_resetisblocked(sc) == true) {
   14805 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   14806 			break;
   14807 		}
   14808 
   14809 		/* Toggle LANPHYPC Value bit */
   14810 		wm_toggle_lanphypc_pch_lpt(sc);
   14811 
   14812 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14813 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14814 				break;
   14815 
   14816 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14817 			 * so ensure that the MAC is also out of SMBus mode
   14818 			 */
   14819 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14820 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14821 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14822 
   14823 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14824 				break;
   14825 			rv = -1;
   14826 		}
   14827 		break;
   14828 	default:
   14829 		break;
   14830 	}
   14831 
   14832 	/* Release semaphore */
   14833 	sc->phy.release(sc);
   14834 
   14835 	if (rv == 0) {
   14836 		/* Check to see if able to reset PHY.  Print error if not */
   14837 		if (wm_phy_resetisblocked(sc)) {
   14838 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14839 			goto out;
   14840 		}
   14841 
   14842 		/* Reset the PHY before any access to it.  Doing so, ensures
   14843 		 * that the PHY is in a known good state before we read/write
   14844 		 * PHY registers.  The generic reset is sufficient here,
   14845 		 * because we haven't determined the PHY type yet.
   14846 		 */
   14847 		if (wm_reset_phy(sc) != 0)
   14848 			goto out;
   14849 
   14850 		/* On a successful reset, possibly need to wait for the PHY
   14851 		 * to quiesce to an accessible state before returning control
   14852 		 * to the calling function.  If the PHY does not quiesce, then
   14853 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14854 		 *  the PHY is in.
   14855 		 */
   14856 		if (wm_phy_resetisblocked(sc))
   14857 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14858 	}
   14859 
   14860 out:
   14861 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14862 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14863 		delay(10*1000);
   14864 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14865 	}
   14866 
   14867 	return 0;
   14868 }
   14869 
   14870 static void
   14871 wm_init_manageability(struct wm_softc *sc)
   14872 {
   14873 
   14874 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14875 		device_xname(sc->sc_dev), __func__));
   14876 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14877 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14878 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14879 
   14880 		/* Disable hardware interception of ARP */
   14881 		manc &= ~MANC_ARP_EN;
   14882 
   14883 		/* Enable receiving management packets to the host */
   14884 		if (sc->sc_type >= WM_T_82571) {
   14885 			manc |= MANC_EN_MNG2HOST;
   14886 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14887 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14888 		}
   14889 
   14890 		CSR_WRITE(sc, WMREG_MANC, manc);
   14891 	}
   14892 }
   14893 
   14894 static void
   14895 wm_release_manageability(struct wm_softc *sc)
   14896 {
   14897 
   14898 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14899 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14900 
   14901 		manc |= MANC_ARP_EN;
   14902 		if (sc->sc_type >= WM_T_82571)
   14903 			manc &= ~MANC_EN_MNG2HOST;
   14904 
   14905 		CSR_WRITE(sc, WMREG_MANC, manc);
   14906 	}
   14907 }
   14908 
   14909 static void
   14910 wm_get_wakeup(struct wm_softc *sc)
   14911 {
   14912 
   14913 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14914 	switch (sc->sc_type) {
   14915 	case WM_T_82573:
   14916 	case WM_T_82583:
   14917 		sc->sc_flags |= WM_F_HAS_AMT;
   14918 		/* FALLTHROUGH */
   14919 	case WM_T_80003:
   14920 	case WM_T_82575:
   14921 	case WM_T_82576:
   14922 	case WM_T_82580:
   14923 	case WM_T_I350:
   14924 	case WM_T_I354:
   14925 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14926 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14927 		/* FALLTHROUGH */
   14928 	case WM_T_82541:
   14929 	case WM_T_82541_2:
   14930 	case WM_T_82547:
   14931 	case WM_T_82547_2:
   14932 	case WM_T_82571:
   14933 	case WM_T_82572:
   14934 	case WM_T_82574:
   14935 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14936 		break;
   14937 	case WM_T_ICH8:
   14938 	case WM_T_ICH9:
   14939 	case WM_T_ICH10:
   14940 	case WM_T_PCH:
   14941 	case WM_T_PCH2:
   14942 	case WM_T_PCH_LPT:
   14943 	case WM_T_PCH_SPT:
   14944 	case WM_T_PCH_CNP:
   14945 		sc->sc_flags |= WM_F_HAS_AMT;
   14946 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14947 		break;
   14948 	default:
   14949 		break;
   14950 	}
   14951 
   14952 	/* 1: HAS_MANAGE */
   14953 	if (wm_enable_mng_pass_thru(sc) != 0)
   14954 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14955 
   14956 	/*
   14957 	 * Note that the WOL flags is set after the resetting of the eeprom
   14958 	 * stuff
   14959 	 */
   14960 }
   14961 
   14962 /*
   14963  * Unconfigure Ultra Low Power mode.
   14964  * Only for I217 and newer (see below).
   14965  */
   14966 static int
   14967 wm_ulp_disable(struct wm_softc *sc)
   14968 {
   14969 	uint32_t reg;
   14970 	uint16_t phyreg;
   14971 	int i = 0, rv = 0;
   14972 
   14973 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14974 		device_xname(sc->sc_dev), __func__));
   14975 	/* Exclude old devices */
   14976 	if ((sc->sc_type < WM_T_PCH_LPT)
   14977 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14978 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14979 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14980 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14981 		return 0;
   14982 
   14983 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14984 		/* Request ME un-configure ULP mode in the PHY */
   14985 		reg = CSR_READ(sc, WMREG_H2ME);
   14986 		reg &= ~H2ME_ULP;
   14987 		reg |= H2ME_ENFORCE_SETTINGS;
   14988 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14989 
   14990 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14991 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14992 			if (i++ == 30) {
   14993 				device_printf(sc->sc_dev, "%s timed out\n",
   14994 				    __func__);
   14995 				return -1;
   14996 			}
   14997 			delay(10 * 1000);
   14998 		}
   14999 		reg = CSR_READ(sc, WMREG_H2ME);
   15000 		reg &= ~H2ME_ENFORCE_SETTINGS;
   15001 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15002 
   15003 		return 0;
   15004 	}
   15005 
   15006 	/* Acquire semaphore */
   15007 	rv = sc->phy.acquire(sc);
   15008 	if (rv != 0) {
   15009 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   15010 		device_xname(sc->sc_dev), __func__));
   15011 		return -1;
   15012 	}
   15013 
   15014 	/* Toggle LANPHYPC */
   15015 	wm_toggle_lanphypc_pch_lpt(sc);
   15016 
   15017 	/* Unforce SMBus mode in PHY */
   15018 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15019 	if (rv != 0) {
   15020 		uint32_t reg2;
   15021 
   15022 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15023 			__func__);
   15024 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15025 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15026 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15027 		delay(50 * 1000);
   15028 
   15029 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15030 		    &phyreg);
   15031 		if (rv != 0)
   15032 			goto release;
   15033 	}
   15034 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15035 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15036 
   15037 	/* Unforce SMBus mode in MAC */
   15038 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15039 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15040 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15041 
   15042 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15043 	if (rv != 0)
   15044 		goto release;
   15045 	phyreg |= HV_PM_CTRL_K1_ENA;
   15046 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15047 
   15048 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15049 		&phyreg);
   15050 	if (rv != 0)
   15051 		goto release;
   15052 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15053 	    | I218_ULP_CONFIG1_STICKY_ULP
   15054 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15055 	    | I218_ULP_CONFIG1_WOL_HOST
   15056 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15057 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15058 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15059 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15060 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15061 	phyreg |= I218_ULP_CONFIG1_START;
   15062 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15063 
   15064 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15065 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15066 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15067 
   15068 release:
   15069 	/* Release semaphore */
   15070 	sc->phy.release(sc);
   15071 	wm_gmii_reset(sc);
   15072 	delay(50 * 1000);
   15073 
   15074 	return rv;
   15075 }
   15076 
   15077 /* WOL in the newer chipset interfaces (pchlan) */
   15078 static int
   15079 wm_enable_phy_wakeup(struct wm_softc *sc)
   15080 {
   15081 	device_t dev = sc->sc_dev;
   15082 	uint32_t mreg, moff;
   15083 	uint16_t wuce, wuc, wufc, preg;
   15084 	int i, rv;
   15085 
   15086 	KASSERT(sc->sc_type >= WM_T_PCH);
   15087 
   15088 	/* Copy MAC RARs to PHY RARs */
   15089 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15090 
   15091 	/* Activate PHY wakeup */
   15092 	rv = sc->phy.acquire(sc);
   15093 	if (rv != 0) {
   15094 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15095 		    __func__);
   15096 		return rv;
   15097 	}
   15098 
   15099 	/*
   15100 	 * Enable access to PHY wakeup registers.
   15101 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15102 	 */
   15103 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15104 	if (rv != 0) {
   15105 		device_printf(dev,
   15106 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15107 		goto release;
   15108 	}
   15109 
   15110 	/* Copy MAC MTA to PHY MTA */
   15111 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15112 		uint16_t lo, hi;
   15113 
   15114 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15115 		lo = (uint16_t)(mreg & 0xffff);
   15116 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15117 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15118 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15119 	}
   15120 
   15121 	/* Configure PHY Rx Control register */
   15122 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15123 	mreg = CSR_READ(sc, WMREG_RCTL);
   15124 	if (mreg & RCTL_UPE)
   15125 		preg |= BM_RCTL_UPE;
   15126 	if (mreg & RCTL_MPE)
   15127 		preg |= BM_RCTL_MPE;
   15128 	preg &= ~(BM_RCTL_MO_MASK);
   15129 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15130 	if (moff != 0)
   15131 		preg |= moff << BM_RCTL_MO_SHIFT;
   15132 	if (mreg & RCTL_BAM)
   15133 		preg |= BM_RCTL_BAM;
   15134 	if (mreg & RCTL_PMCF)
   15135 		preg |= BM_RCTL_PMCF;
   15136 	mreg = CSR_READ(sc, WMREG_CTRL);
   15137 	if (mreg & CTRL_RFCE)
   15138 		preg |= BM_RCTL_RFCE;
   15139 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15140 
   15141 	wuc = WUC_APME | WUC_PME_EN;
   15142 	wufc = WUFC_MAG;
   15143 	/* Enable PHY wakeup in MAC register */
   15144 	CSR_WRITE(sc, WMREG_WUC,
   15145 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15146 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15147 
   15148 	/* Configure and enable PHY wakeup in PHY registers */
   15149 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15150 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15151 
   15152 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15153 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15154 
   15155 release:
   15156 	sc->phy.release(sc);
   15157 
   15158 	return 0;
   15159 }
   15160 
   15161 /* Power down workaround on D3 */
   15162 static void
   15163 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15164 {
   15165 	uint32_t reg;
   15166 	uint16_t phyreg;
   15167 	int i;
   15168 
   15169 	for (i = 0; i < 2; i++) {
   15170 		/* Disable link */
   15171 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15172 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15173 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15174 
   15175 		/*
   15176 		 * Call gig speed drop workaround on Gig disable before
   15177 		 * accessing any PHY registers
   15178 		 */
   15179 		if (sc->sc_type == WM_T_ICH8)
   15180 			wm_gig_downshift_workaround_ich8lan(sc);
   15181 
   15182 		/* Write VR power-down enable */
   15183 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15184 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15185 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15186 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15187 
   15188 		/* Read it back and test */
   15189 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15190 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15191 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15192 			break;
   15193 
   15194 		/* Issue PHY reset and repeat at most one more time */
   15195 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15196 	}
   15197 }
   15198 
   15199 /*
   15200  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15201  *  @sc: pointer to the HW structure
   15202  *
   15203  *  During S0 to Sx transition, it is possible the link remains at gig
   15204  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15205  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15206  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15207  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15208  *  needs to be written.
   15209  *  Parts that support (and are linked to a partner which support) EEE in
   15210  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15211  *  than 10Mbps w/o EEE.
   15212  */
   15213 static void
   15214 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15215 {
   15216 	device_t dev = sc->sc_dev;
   15217 	struct ethercom *ec = &sc->sc_ethercom;
   15218 	uint32_t phy_ctrl;
   15219 	int rv;
   15220 
   15221 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15222 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15223 
   15224 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   15225 
   15226 	if (sc->sc_phytype == WMPHY_I217) {
   15227 		uint16_t devid = sc->sc_pcidevid;
   15228 
   15229 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   15230 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   15231 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   15232 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   15233 		    (sc->sc_type >= WM_T_PCH_SPT))
   15234 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   15235 			    CSR_READ(sc, WMREG_FEXTNVM6)
   15236 			    & ~FEXTNVM6_REQ_PLL_CLK);
   15237 
   15238 		if (sc->phy.acquire(sc) != 0)
   15239 			goto out;
   15240 
   15241 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15242 			uint16_t eee_advert;
   15243 
   15244 			rv = wm_read_emi_reg_locked(dev,
   15245 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   15246 			if (rv)
   15247 				goto release;
   15248 
   15249 			/*
   15250 			 * Disable LPLU if both link partners support 100BaseT
   15251 			 * EEE and 100Full is advertised on both ends of the
   15252 			 * link, and enable Auto Enable LPI since there will
   15253 			 * be no driver to enable LPI while in Sx.
   15254 			 */
   15255 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   15256 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   15257 				uint16_t anar, phy_reg;
   15258 
   15259 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   15260 				    &anar);
   15261 				if (anar & ANAR_TX_FD) {
   15262 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   15263 					    PHY_CTRL_NOND0A_LPLU);
   15264 
   15265 					/* Set Auto Enable LPI after link up */
   15266 					sc->phy.readreg_locked(dev, 2,
   15267 					    I217_LPI_GPIO_CTRL, &phy_reg);
   15268 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15269 					sc->phy.writereg_locked(dev, 2,
   15270 					    I217_LPI_GPIO_CTRL, phy_reg);
   15271 				}
   15272 			}
   15273 		}
   15274 
   15275 		/*
   15276 		 * For i217 Intel Rapid Start Technology support,
   15277 		 * when the system is going into Sx and no manageability engine
   15278 		 * is present, the driver must configure proxy to reset only on
   15279 		 * power good.	LPI (Low Power Idle) state must also reset only
   15280 		 * on power good, as well as the MTA (Multicast table array).
   15281 		 * The SMBus release must also be disabled on LCD reset.
   15282 		 */
   15283 
   15284 		/*
   15285 		 * Enable MTA to reset for Intel Rapid Start Technology
   15286 		 * Support
   15287 		 */
   15288 
   15289 release:
   15290 		sc->phy.release(sc);
   15291 	}
   15292 out:
   15293 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   15294 
   15295 	if (sc->sc_type == WM_T_ICH8)
   15296 		wm_gig_downshift_workaround_ich8lan(sc);
   15297 
   15298 	if (sc->sc_type >= WM_T_PCH) {
   15299 		wm_oem_bits_config_ich8lan(sc, false);
   15300 
   15301 		/* Reset PHY to activate OEM bits on 82577/8 */
   15302 		if (sc->sc_type == WM_T_PCH)
   15303 			wm_reset_phy(sc);
   15304 
   15305 		if (sc->phy.acquire(sc) != 0)
   15306 			return;
   15307 		wm_write_smbus_addr(sc);
   15308 		sc->phy.release(sc);
   15309 	}
   15310 }
   15311 
   15312 /*
   15313  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   15314  *  @sc: pointer to the HW structure
   15315  *
   15316  *  During Sx to S0 transitions on non-managed devices or managed devices
   15317  *  on which PHY resets are not blocked, if the PHY registers cannot be
   15318  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   15319  *  the PHY.
   15320  *  On i217, setup Intel Rapid Start Technology.
   15321  */
   15322 static int
   15323 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   15324 {
   15325 	device_t dev = sc->sc_dev;
   15326 	int rv;
   15327 
   15328 	if (sc->sc_type < WM_T_PCH2)
   15329 		return 0;
   15330 
   15331 	rv = wm_init_phy_workarounds_pchlan(sc);
   15332 	if (rv != 0)
   15333 		return -1;
   15334 
   15335 	/* For i217 Intel Rapid Start Technology support when the system
   15336 	 * is transitioning from Sx and no manageability engine is present
   15337 	 * configure SMBus to restore on reset, disable proxy, and enable
   15338 	 * the reset on MTA (Multicast table array).
   15339 	 */
   15340 	if (sc->sc_phytype == WMPHY_I217) {
   15341 		uint16_t phy_reg;
   15342 
   15343 		if (sc->phy.acquire(sc) != 0)
   15344 			return -1;
   15345 
   15346 		/* Clear Auto Enable LPI after link up */
   15347 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15348 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15349 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15350 
   15351 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15352 			/* Restore clear on SMB if no manageability engine
   15353 			 * is present
   15354 			 */
   15355 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15356 			    &phy_reg);
   15357 			if (rv != 0)
   15358 				goto release;
   15359 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15360 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15361 
   15362 			/* Disable Proxy */
   15363 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15364 		}
   15365 		/* Enable reset on MTA */
   15366 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15367 		if (rv != 0)
   15368 			goto release;
   15369 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15370 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15371 
   15372 release:
   15373 		sc->phy.release(sc);
   15374 		return rv;
   15375 	}
   15376 
   15377 	return 0;
   15378 }
   15379 
   15380 static void
   15381 wm_enable_wakeup(struct wm_softc *sc)
   15382 {
   15383 	uint32_t reg, pmreg;
   15384 	pcireg_t pmode;
   15385 	int rv = 0;
   15386 
   15387 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15388 		device_xname(sc->sc_dev), __func__));
   15389 
   15390 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15391 	    &pmreg, NULL) == 0)
   15392 		return;
   15393 
   15394 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15395 		goto pme;
   15396 
   15397 	/* Advertise the wakeup capability */
   15398 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15399 	    | CTRL_SWDPIN(3));
   15400 
   15401 	/* Keep the laser running on fiber adapters */
   15402 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15403 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15404 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15405 		reg |= CTRL_EXT_SWDPIN(3);
   15406 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15407 	}
   15408 
   15409 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15410 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15411 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15412 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15413 		wm_suspend_workarounds_ich8lan(sc);
   15414 
   15415 #if 0	/* For the multicast packet */
   15416 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15417 	reg |= WUFC_MC;
   15418 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15419 #endif
   15420 
   15421 	if (sc->sc_type >= WM_T_PCH) {
   15422 		rv = wm_enable_phy_wakeup(sc);
   15423 		if (rv != 0)
   15424 			goto pme;
   15425 	} else {
   15426 		/* Enable wakeup by the MAC */
   15427 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15428 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15429 	}
   15430 
   15431 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15432 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15433 		|| (sc->sc_type == WM_T_PCH2))
   15434 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15435 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15436 
   15437 pme:
   15438 	/* Request PME */
   15439 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15440 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   15441 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15442 		/* For WOL */
   15443 		pmode |= PCI_PMCSR_PME_EN;
   15444 	} else {
   15445 		/* Disable WOL */
   15446 		pmode &= ~PCI_PMCSR_PME_EN;
   15447 	}
   15448 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15449 }
   15450 
   15451 /* Disable ASPM L0s and/or L1 for workaround */
   15452 static void
   15453 wm_disable_aspm(struct wm_softc *sc)
   15454 {
   15455 	pcireg_t reg, mask = 0;
   15456 	unsigned const char *str = "";
   15457 
   15458 	/*
   15459 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15460 	 * space.
   15461 	 */
   15462 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15463 		return;
   15464 
   15465 	switch (sc->sc_type) {
   15466 	case WM_T_82571:
   15467 	case WM_T_82572:
   15468 		/*
   15469 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15470 		 * State Power management L1 State (ASPM L1).
   15471 		 */
   15472 		mask = PCIE_LCSR_ASPM_L1;
   15473 		str = "L1 is";
   15474 		break;
   15475 	case WM_T_82573:
   15476 	case WM_T_82574:
   15477 	case WM_T_82583:
   15478 		/*
   15479 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15480 		 *
   15481 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15482 		 * some chipset.  The document of 82574 and 82583 says that
   15483 		 * disabling L0s with some specific chipset is sufficient,
   15484 		 * but we follow as of the Intel em driver does.
   15485 		 *
   15486 		 * References:
   15487 		 * Errata 8 of the Specification Update of i82573.
   15488 		 * Errata 20 of the Specification Update of i82574.
   15489 		 * Errata 9 of the Specification Update of i82583.
   15490 		 */
   15491 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15492 		str = "L0s and L1 are";
   15493 		break;
   15494 	default:
   15495 		return;
   15496 	}
   15497 
   15498 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15499 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15500 	reg &= ~mask;
   15501 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15502 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15503 
   15504 	/* Print only in wm_attach() */
   15505 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15506 		aprint_verbose_dev(sc->sc_dev,
   15507 		    "ASPM %s disabled to workaround the errata.\n", str);
   15508 }
   15509 
   15510 /* LPLU */
   15511 
   15512 static void
   15513 wm_lplu_d0_disable(struct wm_softc *sc)
   15514 {
   15515 	struct mii_data *mii = &sc->sc_mii;
   15516 	uint32_t reg;
   15517 	uint16_t phyval;
   15518 
   15519 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15520 		device_xname(sc->sc_dev), __func__));
   15521 
   15522 	if (sc->sc_phytype == WMPHY_IFE)
   15523 		return;
   15524 
   15525 	switch (sc->sc_type) {
   15526 	case WM_T_82571:
   15527 	case WM_T_82572:
   15528 	case WM_T_82573:
   15529 	case WM_T_82575:
   15530 	case WM_T_82576:
   15531 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   15532 		phyval &= ~PMR_D0_LPLU;
   15533 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   15534 		break;
   15535 	case WM_T_82580:
   15536 	case WM_T_I350:
   15537 	case WM_T_I210:
   15538 	case WM_T_I211:
   15539 		reg = CSR_READ(sc, WMREG_PHPM);
   15540 		reg &= ~PHPM_D0A_LPLU;
   15541 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15542 		break;
   15543 	case WM_T_82574:
   15544 	case WM_T_82583:
   15545 	case WM_T_ICH8:
   15546 	case WM_T_ICH9:
   15547 	case WM_T_ICH10:
   15548 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15549 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15550 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15551 		CSR_WRITE_FLUSH(sc);
   15552 		break;
   15553 	case WM_T_PCH:
   15554 	case WM_T_PCH2:
   15555 	case WM_T_PCH_LPT:
   15556 	case WM_T_PCH_SPT:
   15557 	case WM_T_PCH_CNP:
   15558 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15559 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15560 		if (wm_phy_resetisblocked(sc) == false)
   15561 			phyval |= HV_OEM_BITS_ANEGNOW;
   15562 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15563 		break;
   15564 	default:
   15565 		break;
   15566 	}
   15567 }
   15568 
   15569 /* EEE */
   15570 
   15571 static int
   15572 wm_set_eee_i350(struct wm_softc *sc)
   15573 {
   15574 	struct ethercom *ec = &sc->sc_ethercom;
   15575 	uint32_t ipcnfg, eeer;
   15576 	uint32_t ipcnfg_mask
   15577 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15578 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15579 
   15580 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15581 
   15582 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15583 	eeer = CSR_READ(sc, WMREG_EEER);
   15584 
   15585 	/* Enable or disable per user setting */
   15586 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15587 		ipcnfg |= ipcnfg_mask;
   15588 		eeer |= eeer_mask;
   15589 	} else {
   15590 		ipcnfg &= ~ipcnfg_mask;
   15591 		eeer &= ~eeer_mask;
   15592 	}
   15593 
   15594 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15595 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15596 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15597 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15598 
   15599 	return 0;
   15600 }
   15601 
   15602 static int
   15603 wm_set_eee_pchlan(struct wm_softc *sc)
   15604 {
   15605 	device_t dev = sc->sc_dev;
   15606 	struct ethercom *ec = &sc->sc_ethercom;
   15607 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15608 	int rv = 0;
   15609 
   15610 	switch (sc->sc_phytype) {
   15611 	case WMPHY_82579:
   15612 		lpa = I82579_EEE_LP_ABILITY;
   15613 		pcs_status = I82579_EEE_PCS_STATUS;
   15614 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15615 		break;
   15616 	case WMPHY_I217:
   15617 		lpa = I217_EEE_LP_ABILITY;
   15618 		pcs_status = I217_EEE_PCS_STATUS;
   15619 		adv_addr = I217_EEE_ADVERTISEMENT;
   15620 		break;
   15621 	default:
   15622 		return 0;
   15623 	}
   15624 
   15625 	if (sc->phy.acquire(sc)) {
   15626 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15627 		return 0;
   15628 	}
   15629 
   15630 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15631 	if (rv != 0)
   15632 		goto release;
   15633 
   15634 	/* Clear bits that enable EEE in various speeds */
   15635 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15636 
   15637 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15638 		/* Save off link partner's EEE ability */
   15639 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15640 		if (rv != 0)
   15641 			goto release;
   15642 
   15643 		/* Read EEE advertisement */
   15644 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15645 			goto release;
   15646 
   15647 		/*
   15648 		 * Enable EEE only for speeds in which the link partner is
   15649 		 * EEE capable and for which we advertise EEE.
   15650 		 */
   15651 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15652 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15653 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15654 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15655 			if ((data & ANLPAR_TX_FD) != 0)
   15656 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15657 			else {
   15658 				/*
   15659 				 * EEE is not supported in 100Half, so ignore
   15660 				 * partner's EEE in 100 ability if full-duplex
   15661 				 * is not advertised.
   15662 				 */
   15663 				sc->eee_lp_ability
   15664 				    &= ~AN_EEEADVERT_100_TX;
   15665 			}
   15666 		}
   15667 	}
   15668 
   15669 	if (sc->sc_phytype == WMPHY_82579) {
   15670 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15671 		if (rv != 0)
   15672 			goto release;
   15673 
   15674 		data &= ~I82579_LPI_PLL_SHUT_100;
   15675 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15676 	}
   15677 
   15678 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15679 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15680 		goto release;
   15681 
   15682 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15683 release:
   15684 	sc->phy.release(sc);
   15685 
   15686 	return rv;
   15687 }
   15688 
   15689 static int
   15690 wm_set_eee(struct wm_softc *sc)
   15691 {
   15692 	struct ethercom *ec = &sc->sc_ethercom;
   15693 
   15694 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15695 		return 0;
   15696 
   15697 	if (sc->sc_type == WM_T_I354) {
   15698 		/* I354 uses an external PHY */
   15699 		return 0; /* not yet */
   15700 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15701 		return wm_set_eee_i350(sc);
   15702 	else if (sc->sc_type >= WM_T_PCH2)
   15703 		return wm_set_eee_pchlan(sc);
   15704 
   15705 	return 0;
   15706 }
   15707 
   15708 /*
   15709  * Workarounds (mainly PHY related).
   15710  * Basically, PHY's workarounds are in the PHY drivers.
   15711  */
   15712 
   15713 /* Work-around for 82566 Kumeran PCS lock loss */
   15714 static int
   15715 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15716 {
   15717 	struct mii_data *mii = &sc->sc_mii;
   15718 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15719 	int i, reg, rv;
   15720 	uint16_t phyreg;
   15721 
   15722 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15723 		device_xname(sc->sc_dev), __func__));
   15724 
   15725 	/* If the link is not up, do nothing */
   15726 	if ((status & STATUS_LU) == 0)
   15727 		return 0;
   15728 
   15729 	/* Nothing to do if the link is other than 1Gbps */
   15730 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15731 		return 0;
   15732 
   15733 	for (i = 0; i < 10; i++) {
   15734 		/* read twice */
   15735 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15736 		if (rv != 0)
   15737 			return rv;
   15738 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15739 		if (rv != 0)
   15740 			return rv;
   15741 
   15742 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15743 			goto out;	/* GOOD! */
   15744 
   15745 		/* Reset the PHY */
   15746 		wm_reset_phy(sc);
   15747 		delay(5*1000);
   15748 	}
   15749 
   15750 	/* Disable GigE link negotiation */
   15751 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15752 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15753 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15754 
   15755 	/*
   15756 	 * Call gig speed drop workaround on Gig disable before accessing
   15757 	 * any PHY registers.
   15758 	 */
   15759 	wm_gig_downshift_workaround_ich8lan(sc);
   15760 
   15761 out:
   15762 	return 0;
   15763 }
   15764 
   15765 /*
   15766  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15767  *  @sc: pointer to the HW structure
   15768  *
   15769  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15770  *  LPLU, Gig disable, MDIC PHY reset):
   15771  *    1) Set Kumeran Near-end loopback
   15772  *    2) Clear Kumeran Near-end loopback
   15773  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15774  */
   15775 static void
   15776 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15777 {
   15778 	uint16_t kmreg;
   15779 
   15780 	/* Only for igp3 */
   15781 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15782 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15783 			return;
   15784 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15785 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15786 			return;
   15787 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15788 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15789 	}
   15790 }
   15791 
   15792 /*
   15793  * Workaround for pch's PHYs
   15794  * XXX should be moved to new PHY driver?
   15795  */
   15796 static int
   15797 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15798 {
   15799 	device_t dev = sc->sc_dev;
   15800 	struct mii_data *mii = &sc->sc_mii;
   15801 	struct mii_softc *child;
   15802 	uint16_t phy_data, phyrev = 0;
   15803 	int phytype = sc->sc_phytype;
   15804 	int rv;
   15805 
   15806 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15807 		device_xname(dev), __func__));
   15808 	KASSERT(sc->sc_type == WM_T_PCH);
   15809 
   15810 	/* Set MDIO slow mode before any other MDIO access */
   15811 	if (phytype == WMPHY_82577)
   15812 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15813 			return rv;
   15814 
   15815 	child = LIST_FIRST(&mii->mii_phys);
   15816 	if (child != NULL)
   15817 		phyrev = child->mii_mpd_rev;
   15818 
   15819 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15820 	if ((child != NULL) &&
   15821 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   15822 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   15823 		/* Disable generation of early preamble (0x4431) */
   15824 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15825 		    &phy_data);
   15826 		if (rv != 0)
   15827 			return rv;
   15828 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   15829 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   15830 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15831 		    phy_data);
   15832 		if (rv != 0)
   15833 			return rv;
   15834 
   15835 		/* Preamble tuning for SSC */
   15836 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   15837 		if (rv != 0)
   15838 			return rv;
   15839 	}
   15840 
   15841 	/* 82578 */
   15842 	if (phytype == WMPHY_82578) {
   15843 		/*
   15844 		 * Return registers to default by doing a soft reset then
   15845 		 * writing 0x3140 to the control register
   15846 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15847 		 */
   15848 		if ((child != NULL) && (phyrev < 2)) {
   15849 			PHY_RESET(child);
   15850 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   15851 			if (rv != 0)
   15852 				return rv;
   15853 		}
   15854 	}
   15855 
   15856 	/* Select page 0 */
   15857 	if ((rv = sc->phy.acquire(sc)) != 0)
   15858 		return rv;
   15859 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   15860 	sc->phy.release(sc);
   15861 	if (rv != 0)
   15862 		return rv;
   15863 
   15864 	/*
   15865 	 * Configure the K1 Si workaround during phy reset assuming there is
   15866 	 * link so that it disables K1 if link is in 1Gbps.
   15867 	 */
   15868 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15869 		return rv;
   15870 
   15871 	/* Workaround for link disconnects on a busy hub in half duplex */
   15872 	rv = sc->phy.acquire(sc);
   15873 	if (rv)
   15874 		return rv;
   15875 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   15876 	if (rv)
   15877 		goto release;
   15878 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   15879 	    phy_data & 0x00ff);
   15880 	if (rv)
   15881 		goto release;
   15882 
   15883 	/* Set MSE higher to enable link to stay up when noise is high */
   15884 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   15885 release:
   15886 	sc->phy.release(sc);
   15887 
   15888 	return rv;
   15889 }
   15890 
   15891 /*
   15892  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15893  *  @sc:   pointer to the HW structure
   15894  */
   15895 static void
   15896 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15897 {
   15898 
   15899 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15900 		device_xname(sc->sc_dev), __func__));
   15901 
   15902 	if (sc->phy.acquire(sc) != 0)
   15903 		return;
   15904 
   15905 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   15906 
   15907 	sc->phy.release(sc);
   15908 }
   15909 
   15910 static void
   15911 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   15912 {
   15913 	device_t dev = sc->sc_dev;
   15914 	uint32_t mac_reg;
   15915 	uint16_t i, wuce;
   15916 	int count;
   15917 
   15918 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15919 		device_xname(dev), __func__));
   15920 
   15921 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15922 		return;
   15923 
   15924 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15925 	count = wm_rar_count(sc);
   15926 	for (i = 0; i < count; i++) {
   15927 		uint16_t lo, hi;
   15928 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15929 		lo = (uint16_t)(mac_reg & 0xffff);
   15930 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15931 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15932 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15933 
   15934 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15935 		lo = (uint16_t)(mac_reg & 0xffff);
   15936 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15937 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15938 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15939 	}
   15940 
   15941 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15942 }
   15943 
   15944 /*
   15945  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   15946  *  with 82579 PHY
   15947  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   15948  */
   15949 static int
   15950 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   15951 {
   15952 	device_t dev = sc->sc_dev;
   15953 	int rar_count;
   15954 	int rv;
   15955 	uint32_t mac_reg;
   15956 	uint16_t dft_ctrl, data;
   15957 	uint16_t i;
   15958 
   15959 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15960 		device_xname(dev), __func__));
   15961 
   15962 	if (sc->sc_type < WM_T_PCH2)
   15963 		return 0;
   15964 
   15965 	/* Acquire PHY semaphore */
   15966 	rv = sc->phy.acquire(sc);
   15967 	if (rv != 0)
   15968 		return rv;
   15969 
   15970 	/* Disable Rx path while enabling/disabling workaround */
   15971 	sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   15972 	if (rv != 0)
   15973 		goto out;
   15974 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   15975 	    dft_ctrl | (1 << 14));
   15976 	if (rv != 0)
   15977 		goto out;
   15978 
   15979 	if (enable) {
   15980 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   15981 		 * SHRAL/H) and initial CRC values to the MAC
   15982 		 */
   15983 		rar_count = wm_rar_count(sc);
   15984 		for (i = 0; i < rar_count; i++) {
   15985 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   15986 			uint32_t addr_high, addr_low;
   15987 
   15988 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15989 			if (!(addr_high & RAL_AV))
   15990 				continue;
   15991 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15992 			mac_addr[0] = (addr_low & 0xFF);
   15993 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   15994 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   15995 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   15996 			mac_addr[4] = (addr_high & 0xFF);
   15997 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   15998 
   15999 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   16000 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   16001 		}
   16002 
   16003 		/* Write Rx addresses to the PHY */
   16004 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16005 	}
   16006 
   16007 	/*
   16008 	 * If enable ==
   16009 	 *	true: Enable jumbo frame workaround in the MAC.
   16010 	 *	false: Write MAC register values back to h/w defaults.
   16011 	 */
   16012 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   16013 	if (enable) {
   16014 		mac_reg &= ~(1 << 14);
   16015 		mac_reg |= (7 << 15);
   16016 	} else
   16017 		mac_reg &= ~(0xf << 14);
   16018 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   16019 
   16020 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   16021 	if (enable) {
   16022 		mac_reg |= RCTL_SECRC;
   16023 		sc->sc_rctl |= RCTL_SECRC;
   16024 		sc->sc_flags |= WM_F_CRC_STRIP;
   16025 	} else {
   16026 		mac_reg &= ~RCTL_SECRC;
   16027 		sc->sc_rctl &= ~RCTL_SECRC;
   16028 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   16029 	}
   16030 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   16031 
   16032 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   16033 	if (rv != 0)
   16034 		goto out;
   16035 	if (enable)
   16036 		data |= 1 << 0;
   16037 	else
   16038 		data &= ~(1 << 0);
   16039 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   16040 	if (rv != 0)
   16041 		goto out;
   16042 
   16043 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   16044 	if (rv != 0)
   16045 		goto out;
   16046 	/*
   16047 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   16048 	 * on both the enable case and the disable case. Is it correct?
   16049 	 */
   16050 	data &= ~(0xf << 8);
   16051 	data |= (0xb << 8);
   16052 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   16053 	if (rv != 0)
   16054 		goto out;
   16055 
   16056 	/*
   16057 	 * If enable ==
   16058 	 *	true: Enable jumbo frame workaround in the PHY.
   16059 	 *	false: Write PHY register values back to h/w defaults.
   16060 	 */
   16061 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   16062 	if (rv != 0)
   16063 		goto out;
   16064 	data &= ~(0x7F << 5);
   16065 	if (enable)
   16066 		data |= (0x37 << 5);
   16067 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   16068 	if (rv != 0)
   16069 		goto out;
   16070 
   16071 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   16072 	if (rv != 0)
   16073 		goto out;
   16074 	if (enable)
   16075 		data &= ~(1 << 13);
   16076 	else
   16077 		data |= (1 << 13);
   16078 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   16079 	if (rv != 0)
   16080 		goto out;
   16081 
   16082 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   16083 	if (rv != 0)
   16084 		goto out;
   16085 	data &= ~(0x3FF << 2);
   16086 	if (enable)
   16087 		data |= (I82579_TX_PTR_GAP << 2);
   16088 	else
   16089 		data |= (0x8 << 2);
   16090 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   16091 	if (rv != 0)
   16092 		goto out;
   16093 
   16094 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   16095 	    enable ? 0xf100 : 0x7e00);
   16096 	if (rv != 0)
   16097 		goto out;
   16098 
   16099 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   16100 	if (rv != 0)
   16101 		goto out;
   16102 	if (enable)
   16103 		data |= 1 << 10;
   16104 	else
   16105 		data &= ~(1 << 10);
   16106 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   16107 	if (rv != 0)
   16108 		goto out;
   16109 
   16110 	/* Re-enable Rx path after enabling/disabling workaround */
   16111 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16112 	    dft_ctrl & ~(1 << 14));
   16113 
   16114 out:
   16115 	sc->phy.release(sc);
   16116 
   16117 	return rv;
   16118 }
   16119 
   16120 /*
   16121  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   16122  *  done after every PHY reset.
   16123  */
   16124 static int
   16125 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16126 {
   16127 	device_t dev = sc->sc_dev;
   16128 	int rv;
   16129 
   16130 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16131 		device_xname(dev), __func__));
   16132 	KASSERT(sc->sc_type == WM_T_PCH2);
   16133 
   16134 	/* Set MDIO slow mode before any other MDIO access */
   16135 	rv = wm_set_mdio_slow_mode_hv(sc);
   16136 	if (rv != 0)
   16137 		return rv;
   16138 
   16139 	rv = sc->phy.acquire(sc);
   16140 	if (rv != 0)
   16141 		return rv;
   16142 	/* Set MSE higher to enable link to stay up when noise is high */
   16143 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   16144 	if (rv != 0)
   16145 		goto release;
   16146 	/* Drop link after 5 times MSE threshold was reached */
   16147 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   16148 release:
   16149 	sc->phy.release(sc);
   16150 
   16151 	return rv;
   16152 }
   16153 
   16154 /**
   16155  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   16156  *  @link: link up bool flag
   16157  *
   16158  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   16159  *  preventing further DMA write requests.  Workaround the issue by disabling
   16160  *  the de-assertion of the clock request when in 1Gpbs mode.
   16161  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   16162  *  speeds in order to avoid Tx hangs.
   16163  **/
   16164 static int
   16165 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   16166 {
   16167 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   16168 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16169 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   16170 	uint16_t phyreg;
   16171 
   16172 	if (link && (speed == STATUS_SPEED_1000)) {
   16173 		sc->phy.acquire(sc);
   16174 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16175 		    &phyreg);
   16176 		if (rv != 0)
   16177 			goto release;
   16178 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16179 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   16180 		if (rv != 0)
   16181 			goto release;
   16182 		delay(20);
   16183 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   16184 
   16185 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16186 		    &phyreg);
   16187 release:
   16188 		sc->phy.release(sc);
   16189 		return rv;
   16190 	}
   16191 
   16192 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   16193 
   16194 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   16195 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   16196 	    || !link
   16197 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   16198 		goto update_fextnvm6;
   16199 
   16200 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   16201 
   16202 	/* Clear link status transmit timeout */
   16203 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   16204 	if (speed == STATUS_SPEED_100) {
   16205 		/* Set inband Tx timeout to 5x10us for 100Half */
   16206 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16207 
   16208 		/* Do not extend the K1 entry latency for 100Half */
   16209 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16210 	} else {
   16211 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   16212 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16213 
   16214 		/* Extend the K1 entry latency for 10 Mbps */
   16215 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16216 	}
   16217 
   16218 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   16219 
   16220 update_fextnvm6:
   16221 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   16222 	return 0;
   16223 }
   16224 
   16225 /*
   16226  *  wm_k1_gig_workaround_hv - K1 Si workaround
   16227  *  @sc:   pointer to the HW structure
   16228  *  @link: link up bool flag
   16229  *
   16230  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   16231  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   16232  *  If link is down, the function will restore the default K1 setting located
   16233  *  in the NVM.
   16234  */
   16235 static int
   16236 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   16237 {
   16238 	int k1_enable = sc->sc_nvm_k1_enabled;
   16239 
   16240 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16241 		device_xname(sc->sc_dev), __func__));
   16242 
   16243 	if (sc->phy.acquire(sc) != 0)
   16244 		return -1;
   16245 
   16246 	if (link) {
   16247 		k1_enable = 0;
   16248 
   16249 		/* Link stall fix for link up */
   16250 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16251 		    0x0100);
   16252 	} else {
   16253 		/* Link stall fix for link down */
   16254 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16255 		    0x4100);
   16256 	}
   16257 
   16258 	wm_configure_k1_ich8lan(sc, k1_enable);
   16259 	sc->phy.release(sc);
   16260 
   16261 	return 0;
   16262 }
   16263 
   16264 /*
   16265  *  wm_k1_workaround_lv - K1 Si workaround
   16266  *  @sc:   pointer to the HW structure
   16267  *
   16268  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   16269  *  Disable K1 for 1000 and 100 speeds
   16270  */
   16271 static int
   16272 wm_k1_workaround_lv(struct wm_softc *sc)
   16273 {
   16274 	uint32_t reg;
   16275 	uint16_t phyreg;
   16276 	int rv;
   16277 
   16278 	if (sc->sc_type != WM_T_PCH2)
   16279 		return 0;
   16280 
   16281 	/* Set K1 beacon duration based on 10Mbps speed */
   16282 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   16283 	if (rv != 0)
   16284 		return rv;
   16285 
   16286 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   16287 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   16288 		if (phyreg &
   16289 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   16290 			/* LV 1G/100 Packet drop issue wa  */
   16291 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   16292 			    &phyreg);
   16293 			if (rv != 0)
   16294 				return rv;
   16295 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   16296 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   16297 			    phyreg);
   16298 			if (rv != 0)
   16299 				return rv;
   16300 		} else {
   16301 			/* For 10Mbps */
   16302 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   16303 			reg &= ~FEXTNVM4_BEACON_DURATION;
   16304 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   16305 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   16306 		}
   16307 	}
   16308 
   16309 	return 0;
   16310 }
   16311 
   16312 /*
   16313  *  wm_link_stall_workaround_hv - Si workaround
   16314  *  @sc: pointer to the HW structure
   16315  *
   16316  *  This function works around a Si bug where the link partner can get
   16317  *  a link up indication before the PHY does. If small packets are sent
   16318  *  by the link partner they can be placed in the packet buffer without
   16319  *  being properly accounted for by the PHY and will stall preventing
   16320  *  further packets from being received.  The workaround is to clear the
   16321  *  packet buffer after the PHY detects link up.
   16322  */
   16323 static int
   16324 wm_link_stall_workaround_hv(struct wm_softc *sc)
   16325 {
   16326 	uint16_t phyreg;
   16327 
   16328 	if (sc->sc_phytype != WMPHY_82578)
   16329 		return 0;
   16330 
   16331 	/* Do not apply workaround if in PHY loopback bit 14 set */
   16332 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   16333 	if ((phyreg & BMCR_LOOP) != 0)
   16334 		return 0;
   16335 
   16336 	/* Check if link is up and at 1Gbps */
   16337 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   16338 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16339 	    | BM_CS_STATUS_SPEED_MASK;
   16340 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16341 		| BM_CS_STATUS_SPEED_1000))
   16342 		return 0;
   16343 
   16344 	delay(200 * 1000);	/* XXX too big */
   16345 
   16346 	/* Flush the packets in the fifo buffer */
   16347 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16348 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   16349 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16350 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   16351 
   16352 	return 0;
   16353 }
   16354 
   16355 static int
   16356 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   16357 {
   16358 	int rv;
   16359 	uint16_t reg;
   16360 
   16361 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   16362 	if (rv != 0)
   16363 		return rv;
   16364 
   16365 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   16366 	    reg | HV_KMRN_MDIO_SLOW);
   16367 }
   16368 
   16369 /*
   16370  *  wm_configure_k1_ich8lan - Configure K1 power state
   16371  *  @sc: pointer to the HW structure
   16372  *  @enable: K1 state to configure
   16373  *
   16374  *  Configure the K1 power state based on the provided parameter.
   16375  *  Assumes semaphore already acquired.
   16376  */
   16377 static void
   16378 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   16379 {
   16380 	uint32_t ctrl, ctrl_ext, tmp;
   16381 	uint16_t kmreg;
   16382 	int rv;
   16383 
   16384 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16385 
   16386 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   16387 	if (rv != 0)
   16388 		return;
   16389 
   16390 	if (k1_enable)
   16391 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   16392 	else
   16393 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   16394 
   16395 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   16396 	if (rv != 0)
   16397 		return;
   16398 
   16399 	delay(20);
   16400 
   16401 	ctrl = CSR_READ(sc, WMREG_CTRL);
   16402 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   16403 
   16404 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   16405 	tmp |= CTRL_FRCSPD;
   16406 
   16407 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   16408 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   16409 	CSR_WRITE_FLUSH(sc);
   16410 	delay(20);
   16411 
   16412 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   16413 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   16414 	CSR_WRITE_FLUSH(sc);
   16415 	delay(20);
   16416 
   16417 	return;
   16418 }
   16419 
   16420 /* special case - for 82575 - need to do manual init ... */
   16421 static void
   16422 wm_reset_init_script_82575(struct wm_softc *sc)
   16423 {
   16424 	/*
   16425 	 * Remark: this is untested code - we have no board without EEPROM
   16426 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   16427 	 */
   16428 
   16429 	/* SerDes configuration via SERDESCTRL */
   16430 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   16431 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   16432 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   16433 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   16434 
   16435 	/* CCM configuration via CCMCTL register */
   16436 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   16437 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   16438 
   16439 	/* PCIe lanes configuration */
   16440 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   16441 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   16442 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   16443 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   16444 
   16445 	/* PCIe PLL Configuration */
   16446 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   16447 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   16448 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   16449 }
   16450 
   16451 static void
   16452 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   16453 {
   16454 	uint32_t reg;
   16455 	uint16_t nvmword;
   16456 	int rv;
   16457 
   16458 	if (sc->sc_type != WM_T_82580)
   16459 		return;
   16460 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   16461 		return;
   16462 
   16463 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   16464 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   16465 	if (rv != 0) {
   16466 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   16467 		    __func__);
   16468 		return;
   16469 	}
   16470 
   16471 	reg = CSR_READ(sc, WMREG_MDICNFG);
   16472 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   16473 		reg |= MDICNFG_DEST;
   16474 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   16475 		reg |= MDICNFG_COM_MDIO;
   16476 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16477 }
   16478 
   16479 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   16480 
   16481 static bool
   16482 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   16483 {
   16484 	uint32_t reg;
   16485 	uint16_t id1, id2;
   16486 	int i, rv;
   16487 
   16488 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16489 		device_xname(sc->sc_dev), __func__));
   16490 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16491 
   16492 	id1 = id2 = 0xffff;
   16493 	for (i = 0; i < 2; i++) {
   16494 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   16495 		    &id1);
   16496 		if ((rv != 0) || MII_INVALIDID(id1))
   16497 			continue;
   16498 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   16499 		    &id2);
   16500 		if ((rv != 0) || MII_INVALIDID(id2))
   16501 			continue;
   16502 		break;
   16503 	}
   16504 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   16505 		goto out;
   16506 
   16507 	/*
   16508 	 * In case the PHY needs to be in mdio slow mode,
   16509 	 * set slow mode and try to get the PHY id again.
   16510 	 */
   16511 	rv = 0;
   16512 	if (sc->sc_type < WM_T_PCH_LPT) {
   16513 		sc->phy.release(sc);
   16514 		wm_set_mdio_slow_mode_hv(sc);
   16515 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   16516 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   16517 		sc->phy.acquire(sc);
   16518 	}
   16519 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   16520 		device_printf(sc->sc_dev, "XXX return with false\n");
   16521 		return false;
   16522 	}
   16523 out:
   16524 	if (sc->sc_type >= WM_T_PCH_LPT) {
   16525 		/* Only unforce SMBus if ME is not active */
   16526 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16527 			uint16_t phyreg;
   16528 
   16529 			/* Unforce SMBus mode in PHY */
   16530 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   16531 			    CV_SMB_CTRL, &phyreg);
   16532 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16533 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16534 			    CV_SMB_CTRL, phyreg);
   16535 
   16536 			/* Unforce SMBus mode in MAC */
   16537 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16538 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16539 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16540 		}
   16541 	}
   16542 	return true;
   16543 }
   16544 
   16545 static void
   16546 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16547 {
   16548 	uint32_t reg;
   16549 	int i;
   16550 
   16551 	/* Set PHY Config Counter to 50msec */
   16552 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16553 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16554 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16555 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16556 
   16557 	/* Toggle LANPHYPC */
   16558 	reg = CSR_READ(sc, WMREG_CTRL);
   16559 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16560 	reg &= ~CTRL_LANPHYPC_VALUE;
   16561 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16562 	CSR_WRITE_FLUSH(sc);
   16563 	delay(1000);
   16564 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16565 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16566 	CSR_WRITE_FLUSH(sc);
   16567 
   16568 	if (sc->sc_type < WM_T_PCH_LPT)
   16569 		delay(50 * 1000);
   16570 	else {
   16571 		i = 20;
   16572 
   16573 		do {
   16574 			delay(5 * 1000);
   16575 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16576 		    && i--);
   16577 
   16578 		delay(30 * 1000);
   16579 	}
   16580 }
   16581 
   16582 static int
   16583 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16584 {
   16585 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16586 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16587 	uint32_t rxa;
   16588 	uint16_t scale = 0, lat_enc = 0;
   16589 	int32_t obff_hwm = 0;
   16590 	int64_t lat_ns, value;
   16591 
   16592 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16593 		device_xname(sc->sc_dev), __func__));
   16594 
   16595 	if (link) {
   16596 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16597 		uint32_t status;
   16598 		uint16_t speed;
   16599 		pcireg_t preg;
   16600 
   16601 		status = CSR_READ(sc, WMREG_STATUS);
   16602 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16603 		case STATUS_SPEED_10:
   16604 			speed = 10;
   16605 			break;
   16606 		case STATUS_SPEED_100:
   16607 			speed = 100;
   16608 			break;
   16609 		case STATUS_SPEED_1000:
   16610 			speed = 1000;
   16611 			break;
   16612 		default:
   16613 			device_printf(sc->sc_dev, "Unknown speed "
   16614 			    "(status = %08x)\n", status);
   16615 			return -1;
   16616 		}
   16617 
   16618 		/* Rx Packet Buffer Allocation size (KB) */
   16619 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16620 
   16621 		/*
   16622 		 * Determine the maximum latency tolerated by the device.
   16623 		 *
   16624 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16625 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16626 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16627 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16628 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16629 		 */
   16630 		lat_ns = ((int64_t)rxa * 1024 -
   16631 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16632 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16633 		if (lat_ns < 0)
   16634 			lat_ns = 0;
   16635 		else
   16636 			lat_ns /= speed;
   16637 		value = lat_ns;
   16638 
   16639 		while (value > LTRV_VALUE) {
   16640 			scale ++;
   16641 			value = howmany(value, __BIT(5));
   16642 		}
   16643 		if (scale > LTRV_SCALE_MAX) {
   16644 			device_printf(sc->sc_dev,
   16645 			    "Invalid LTR latency scale %d\n", scale);
   16646 			return -1;
   16647 		}
   16648 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16649 
   16650 		/* Determine the maximum latency tolerated by the platform */
   16651 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16652 		    WM_PCI_LTR_CAP_LPT);
   16653 		max_snoop = preg & 0xffff;
   16654 		max_nosnoop = preg >> 16;
   16655 
   16656 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16657 
   16658 		if (lat_enc > max_ltr_enc) {
   16659 			lat_enc = max_ltr_enc;
   16660 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16661 			    * PCI_LTR_SCALETONS(
   16662 				    __SHIFTOUT(lat_enc,
   16663 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16664 		}
   16665 
   16666 		if (lat_ns) {
   16667 			lat_ns *= speed * 1000;
   16668 			lat_ns /= 8;
   16669 			lat_ns /= 1000000000;
   16670 			obff_hwm = (int32_t)(rxa - lat_ns);
   16671 		}
   16672 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16673 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16674 			    "(rxa = %d, lat_ns = %d)\n",
   16675 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16676 			return -1;
   16677 		}
   16678 	}
   16679 	/* Snoop and No-Snoop latencies the same */
   16680 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16681 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16682 
   16683 	/* Set OBFF high water mark */
   16684 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16685 	reg |= obff_hwm;
   16686 	CSR_WRITE(sc, WMREG_SVT, reg);
   16687 
   16688 	/* Enable OBFF */
   16689 	reg = CSR_READ(sc, WMREG_SVCR);
   16690 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16691 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16692 
   16693 	return 0;
   16694 }
   16695 
   16696 /*
   16697  * I210 Errata 25 and I211 Errata 10
   16698  * Slow System Clock.
   16699  *
   16700  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   16701  */
   16702 static int
   16703 wm_pll_workaround_i210(struct wm_softc *sc)
   16704 {
   16705 	uint32_t mdicnfg, wuc;
   16706 	uint32_t reg;
   16707 	pcireg_t pcireg;
   16708 	uint32_t pmreg;
   16709 	uint16_t nvmword, tmp_nvmword;
   16710 	uint16_t phyval;
   16711 	bool wa_done = false;
   16712 	int i, rv = 0;
   16713 
   16714 	/* Get Power Management cap offset */
   16715 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16716 	    &pmreg, NULL) == 0)
   16717 		return -1;
   16718 
   16719 	/* Save WUC and MDICNFG registers */
   16720 	wuc = CSR_READ(sc, WMREG_WUC);
   16721 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16722 
   16723 	reg = mdicnfg & ~MDICNFG_DEST;
   16724 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16725 
   16726 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   16727 		/*
   16728 		 * The default value of the Initialization Control Word 1
   16729 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   16730 		 */
   16731 		nvmword = INVM_DEFAULT_AL;
   16732 	}
   16733 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16734 
   16735 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16736 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16737 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16738 
   16739 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16740 			rv = 0;
   16741 			break; /* OK */
   16742 		} else
   16743 			rv = -1;
   16744 
   16745 		wa_done = true;
   16746 		/* Directly reset the internal PHY */
   16747 		reg = CSR_READ(sc, WMREG_CTRL);
   16748 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16749 
   16750 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16751 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16752 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16753 
   16754 		CSR_WRITE(sc, WMREG_WUC, 0);
   16755 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16756 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16757 
   16758 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16759 		    pmreg + PCI_PMCSR);
   16760 		pcireg |= PCI_PMCSR_STATE_D3;
   16761 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16762 		    pmreg + PCI_PMCSR, pcireg);
   16763 		delay(1000);
   16764 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16765 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16766 		    pmreg + PCI_PMCSR, pcireg);
   16767 
   16768 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16769 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16770 
   16771 		/* Restore WUC register */
   16772 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16773 	}
   16774 
   16775 	/* Restore MDICNFG setting */
   16776 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16777 	if (wa_done)
   16778 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16779 	return rv;
   16780 }
   16781 
   16782 static void
   16783 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16784 {
   16785 	uint32_t reg;
   16786 
   16787 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16788 		device_xname(sc->sc_dev), __func__));
   16789 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16790 	    || (sc->sc_type == WM_T_PCH_CNP));
   16791 
   16792 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16793 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16794 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16795 
   16796 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16797 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16798 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16799 }
   16800