Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.679
      1 /*	$NetBSD: if_wm.c,v 1.679 2020/06/27 13:32:00 jmcneill Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.679 2020/06/27 13:32:00 jmcneill Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/workqueue.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <net/rss_config.h>
    121 
    122 #include <netinet/in.h>			/* XXX for struct ip */
    123 #include <netinet/in_systm.h>		/* XXX for struct ip */
    124 #include <netinet/ip.h>			/* XXX for struct ip */
    125 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    126 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    127 
    128 #include <sys/bus.h>
    129 #include <sys/intr.h>
    130 #include <machine/endian.h>
    131 
    132 #include <dev/mii/mii.h>
    133 #include <dev/mii/mdio.h>
    134 #include <dev/mii/miivar.h>
    135 #include <dev/mii/miidevs.h>
    136 #include <dev/mii/mii_bitbang.h>
    137 #include <dev/mii/ikphyreg.h>
    138 #include <dev/mii/igphyreg.h>
    139 #include <dev/mii/igphyvar.h>
    140 #include <dev/mii/inbmphyreg.h>
    141 #include <dev/mii/ihphyreg.h>
    142 
    143 #include <dev/pci/pcireg.h>
    144 #include <dev/pci/pcivar.h>
    145 #include <dev/pci/pcidevs.h>
    146 
    147 #include <dev/pci/if_wmreg.h>
    148 #include <dev/pci/if_wmvar.h>
    149 
    150 #ifdef WM_DEBUG
    151 #define	WM_DEBUG_LINK		__BIT(0)
    152 #define	WM_DEBUG_TX		__BIT(1)
    153 #define	WM_DEBUG_RX		__BIT(2)
    154 #define	WM_DEBUG_GMII		__BIT(3)
    155 #define	WM_DEBUG_MANAGE		__BIT(4)
    156 #define	WM_DEBUG_NVM		__BIT(5)
    157 #define	WM_DEBUG_INIT		__BIT(6)
    158 #define	WM_DEBUG_LOCK		__BIT(7)
    159 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    160     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    161 #define	DPRINTF(x, y)	do { if (wm_debug & (x)) printf y; } while (0)
    162 #else
    163 #define	DPRINTF(x, y)	__nothing
    164 #endif /* WM_DEBUG */
    165 
    166 #ifdef NET_MPSAFE
    167 #define WM_MPSAFE	1
    168 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
    169 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
    170 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    171 #else
    172 #define WM_CALLOUT_FLAGS	0
    173 #define WM_SOFTINT_FLAGS	0
    174 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    175 #endif
    176 
    177 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    178 
    179 /*
    180  * This device driver's max interrupt numbers.
    181  */
    182 #define WM_MAX_NQUEUEINTR	16
    183 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    184 
    185 #ifndef WM_DISABLE_MSI
    186 #define	WM_DISABLE_MSI 0
    187 #endif
    188 #ifndef WM_DISABLE_MSIX
    189 #define	WM_DISABLE_MSIX 0
    190 #endif
    191 
    192 int wm_disable_msi = WM_DISABLE_MSI;
    193 int wm_disable_msix = WM_DISABLE_MSIX;
    194 
    195 #ifndef WM_WATCHDOG_TIMEOUT
    196 #define WM_WATCHDOG_TIMEOUT 5
    197 #endif
    198 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    199 
    200 /*
    201  * Transmit descriptor list size.  Due to errata, we can only have
    202  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    203  * on >= 82544. We tell the upper layers that they can queue a lot
    204  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    205  * of them at a time.
    206  *
    207  * We allow up to 64 DMA segments per packet.  Pathological packet
    208  * chains containing many small mbufs have been observed in zero-copy
    209  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    210  * m_defrag() is called to reduce it.
    211  */
    212 #define	WM_NTXSEGS		64
    213 #define	WM_IFQUEUELEN		256
    214 #define	WM_TXQUEUELEN_MAX	64
    215 #define	WM_TXQUEUELEN_MAX_82547	16
    216 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    217 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    218 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    219 #define	WM_NTXDESC_82542	256
    220 #define	WM_NTXDESC_82544	4096
    221 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    222 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    223 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    224 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    225 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    226 
    227 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    228 
    229 #define	WM_TXINTERQSIZE		256
    230 
    231 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    232 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    233 #endif
    234 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    235 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    236 #endif
    237 
    238 /*
    239  * Receive descriptor list size.  We have one Rx buffer for normal
    240  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    241  * packet.  We allocate 256 receive descriptors, each with a 2k
    242  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    243  */
    244 #define	WM_NRXDESC		256U
    245 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    246 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    247 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    248 
    249 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    250 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    251 #endif
    252 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    253 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    254 #endif
    255 
    256 typedef union txdescs {
    257 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    258 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    259 } txdescs_t;
    260 
    261 typedef union rxdescs {
    262 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    263 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    264 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    265 } rxdescs_t;
    266 
    267 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    268 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    269 
    270 /*
    271  * Software state for transmit jobs.
    272  */
    273 struct wm_txsoft {
    274 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    275 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    276 	int txs_firstdesc;		/* first descriptor in packet */
    277 	int txs_lastdesc;		/* last descriptor in packet */
    278 	int txs_ndesc;			/* # of descriptors used */
    279 };
    280 
    281 /*
    282  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    283  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    284  * them together.
    285  */
    286 struct wm_rxsoft {
    287 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    288 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    289 };
    290 
    291 #define WM_LINKUP_TIMEOUT	50
    292 
    293 static uint16_t swfwphysem[] = {
    294 	SWFW_PHY0_SM,
    295 	SWFW_PHY1_SM,
    296 	SWFW_PHY2_SM,
    297 	SWFW_PHY3_SM
    298 };
    299 
    300 static const uint32_t wm_82580_rxpbs_table[] = {
    301 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    302 };
    303 
    304 struct wm_softc;
    305 
    306 #ifdef WM_EVENT_COUNTERS
    307 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    308 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    309 	struct evcnt qname##_ev_##evname;
    310 
    311 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    312 	do {								\
    313 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    314 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    315 		    "%s%02d%s", #qname, (qnum), #evname);		\
    316 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    317 		    (evtype), NULL, (xname),				\
    318 		    (q)->qname##_##evname##_evcnt_name);		\
    319 	} while (0)
    320 
    321 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    322 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    323 
    324 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    325 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    326 
    327 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    328 	evcnt_detach(&(q)->qname##_ev_##evname);
    329 #endif /* WM_EVENT_COUNTERS */
    330 
    331 struct wm_txqueue {
    332 	kmutex_t *txq_lock;		/* lock for tx operations */
    333 
    334 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    335 
    336 	/* Software state for the transmit descriptors. */
    337 	int txq_num;			/* must be a power of two */
    338 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    339 
    340 	/* TX control data structures. */
    341 	int txq_ndesc;			/* must be a power of two */
    342 	size_t txq_descsize;		/* a tx descriptor size */
    343 	txdescs_t *txq_descs_u;
    344 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    345 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    346 	int txq_desc_rseg;		/* real number of control segment */
    347 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    348 #define	txq_descs	txq_descs_u->sctxu_txdescs
    349 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    350 
    351 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    352 
    353 	int txq_free;			/* number of free Tx descriptors */
    354 	int txq_next;			/* next ready Tx descriptor */
    355 
    356 	int txq_sfree;			/* number of free Tx jobs */
    357 	int txq_snext;			/* next free Tx job */
    358 	int txq_sdirty;			/* dirty Tx jobs */
    359 
    360 	/* These 4 variables are used only on the 82547. */
    361 	int txq_fifo_size;		/* Tx FIFO size */
    362 	int txq_fifo_head;		/* current head of FIFO */
    363 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    364 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    365 
    366 	/*
    367 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    368 	 * CPUs. This queue intermediate them without block.
    369 	 */
    370 	pcq_t *txq_interq;
    371 
    372 	/*
    373 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    374 	 * to manage Tx H/W queue's busy flag.
    375 	 */
    376 	int txq_flags;			/* flags for H/W queue, see below */
    377 #define	WM_TXQ_NO_SPACE	0x1
    378 
    379 	bool txq_stopping;
    380 
    381 	bool txq_sending;
    382 	time_t txq_lastsent;
    383 
    384 	/* Checksum flags used for previous packet */
    385 	uint32_t 	txq_last_hw_cmd;
    386 	uint8_t 	txq_last_hw_fields;
    387 	uint16_t	txq_last_hw_ipcs;
    388 	uint16_t	txq_last_hw_tucs;
    389 
    390 	uint32_t txq_packets;		/* for AIM */
    391 	uint32_t txq_bytes;		/* for AIM */
    392 #ifdef WM_EVENT_COUNTERS
    393 	/* TX event counters */
    394 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    395 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    396 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    397 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    398 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    399 					    /* XXX not used? */
    400 
    401 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    402 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    403 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    404 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    405 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    406 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    407 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    408 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    409 					    /* other than toomanyseg */
    410 
    411 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    412 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    413 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    414 	WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
    415 
    416 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    417 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    418 #endif /* WM_EVENT_COUNTERS */
    419 };
    420 
    421 struct wm_rxqueue {
    422 	kmutex_t *rxq_lock;		/* lock for rx operations */
    423 
    424 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    425 
    426 	/* Software state for the receive descriptors. */
    427 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    428 
    429 	/* RX control data structures. */
    430 	int rxq_ndesc;			/* must be a power of two */
    431 	size_t rxq_descsize;		/* a rx descriptor size */
    432 	rxdescs_t *rxq_descs_u;
    433 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    434 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    435 	int rxq_desc_rseg;		/* real number of control segment */
    436 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    437 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    438 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    439 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    440 
    441 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    442 
    443 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    444 	int rxq_discard;
    445 	int rxq_len;
    446 	struct mbuf *rxq_head;
    447 	struct mbuf *rxq_tail;
    448 	struct mbuf **rxq_tailp;
    449 
    450 	bool rxq_stopping;
    451 
    452 	uint32_t rxq_packets;		/* for AIM */
    453 	uint32_t rxq_bytes;		/* for AIM */
    454 #ifdef WM_EVENT_COUNTERS
    455 	/* RX event counters */
    456 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    457 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    458 
    459 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    460 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    461 #endif
    462 };
    463 
    464 struct wm_queue {
    465 	int wmq_id;			/* index of TX/RX queues */
    466 	int wmq_intr_idx;		/* index of MSI-X tables */
    467 
    468 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    469 	bool wmq_set_itr;
    470 
    471 	struct wm_txqueue wmq_txq;
    472 	struct wm_rxqueue wmq_rxq;
    473 
    474 	bool wmq_txrx_use_workqueue;
    475 	struct work wmq_cookie;
    476 	void *wmq_si;
    477 };
    478 
    479 struct wm_phyop {
    480 	int (*acquire)(struct wm_softc *);
    481 	void (*release)(struct wm_softc *);
    482 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    483 	int (*writereg_locked)(device_t, int, int, uint16_t);
    484 	int reset_delay_us;
    485 	bool no_errprint;
    486 };
    487 
    488 struct wm_nvmop {
    489 	int (*acquire)(struct wm_softc *);
    490 	void (*release)(struct wm_softc *);
    491 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    492 };
    493 
    494 /*
    495  * Software state per device.
    496  */
    497 struct wm_softc {
    498 	device_t sc_dev;		/* generic device information */
    499 	bus_space_tag_t sc_st;		/* bus space tag */
    500 	bus_space_handle_t sc_sh;	/* bus space handle */
    501 	bus_size_t sc_ss;		/* bus space size */
    502 	bus_space_tag_t sc_iot;		/* I/O space tag */
    503 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    504 	bus_size_t sc_ios;		/* I/O space size */
    505 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    506 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    507 	bus_size_t sc_flashs;		/* flash registers space size */
    508 	off_t sc_flashreg_offset;	/*
    509 					 * offset to flash registers from
    510 					 * start of BAR
    511 					 */
    512 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    513 
    514 	struct ethercom sc_ethercom;	/* ethernet common data */
    515 	struct mii_data sc_mii;		/* MII/media information */
    516 
    517 	pci_chipset_tag_t sc_pc;
    518 	pcitag_t sc_pcitag;
    519 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    520 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    521 
    522 	uint16_t sc_pcidevid;		/* PCI device ID */
    523 	wm_chip_type sc_type;		/* MAC type */
    524 	int sc_rev;			/* MAC revision */
    525 	wm_phy_type sc_phytype;		/* PHY type */
    526 	uint8_t sc_sfptype;		/* SFP type */
    527 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    528 #define	WM_MEDIATYPE_UNKNOWN		0x00
    529 #define	WM_MEDIATYPE_FIBER		0x01
    530 #define	WM_MEDIATYPE_COPPER		0x02
    531 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    532 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    533 	int sc_flags;			/* flags; see below */
    534 	u_short sc_if_flags;		/* last if_flags */
    535 	int sc_ec_capenable;		/* last ec_capenable */
    536 	int sc_flowflags;		/* 802.3x flow control flags */
    537 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    538 	int sc_align_tweak;
    539 
    540 	void *sc_ihs[WM_MAX_NINTR];	/*
    541 					 * interrupt cookie.
    542 					 * - legacy and msi use sc_ihs[0] only
    543 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    544 					 */
    545 	pci_intr_handle_t *sc_intrs;	/*
    546 					 * legacy and msi use sc_intrs[0] only
    547 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    548 					 */
    549 	int sc_nintrs;			/* number of interrupts */
    550 
    551 	int sc_link_intr_idx;		/* index of MSI-X tables */
    552 
    553 	callout_t sc_tick_ch;		/* tick callout */
    554 	bool sc_core_stopping;
    555 
    556 	int sc_nvm_ver_major;
    557 	int sc_nvm_ver_minor;
    558 	int sc_nvm_ver_build;
    559 	int sc_nvm_addrbits;		/* NVM address bits */
    560 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    561 	int sc_ich8_flash_base;
    562 	int sc_ich8_flash_bank_size;
    563 	int sc_nvm_k1_enabled;
    564 
    565 	int sc_nqueues;
    566 	struct wm_queue *sc_queue;
    567 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    568 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    569 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    570 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    571 	struct workqueue *sc_queue_wq;
    572 	bool sc_txrx_use_workqueue;
    573 
    574 	int sc_affinity_offset;
    575 
    576 #ifdef WM_EVENT_COUNTERS
    577 	/* Event counters. */
    578 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    579 
    580 	/* WM_T_82542_2_1 only */
    581 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    582 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    583 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    584 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    585 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    586 #endif /* WM_EVENT_COUNTERS */
    587 
    588 	struct sysctllog *sc_sysctllog;
    589 
    590 	/* This variable are used only on the 82547. */
    591 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    592 
    593 	uint32_t sc_ctrl;		/* prototype CTRL register */
    594 #if 0
    595 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    596 #endif
    597 	uint32_t sc_icr;		/* prototype interrupt bits */
    598 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    599 	uint32_t sc_tctl;		/* prototype TCTL register */
    600 	uint32_t sc_rctl;		/* prototype RCTL register */
    601 	uint32_t sc_txcw;		/* prototype TXCW register */
    602 	uint32_t sc_tipg;		/* prototype TIPG register */
    603 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    604 	uint32_t sc_pba;		/* prototype PBA register */
    605 
    606 	int sc_tbi_linkup;		/* TBI link status */
    607 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    608 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    609 
    610 	int sc_mchash_type;		/* multicast filter offset */
    611 
    612 	krndsource_t rnd_source;	/* random source */
    613 
    614 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    615 
    616 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    617 	kmutex_t *sc_ich_phymtx;	/*
    618 					 * 82574/82583/ICH/PCH specific PHY
    619 					 * mutex. For 82574/82583, the mutex
    620 					 * is used for both PHY and NVM.
    621 					 */
    622 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    623 
    624 	struct wm_phyop phy;
    625 	struct wm_nvmop nvm;
    626 };
    627 
    628 #define WM_CORE_LOCK(_sc)						\
    629 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    630 #define WM_CORE_UNLOCK(_sc)						\
    631 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    632 #define WM_CORE_LOCKED(_sc)						\
    633 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    634 
    635 #define	WM_RXCHAIN_RESET(rxq)						\
    636 do {									\
    637 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    638 	*(rxq)->rxq_tailp = NULL;					\
    639 	(rxq)->rxq_len = 0;						\
    640 } while (/*CONSTCOND*/0)
    641 
    642 #define	WM_RXCHAIN_LINK(rxq, m)						\
    643 do {									\
    644 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    645 	(rxq)->rxq_tailp = &(m)->m_next;				\
    646 } while (/*CONSTCOND*/0)
    647 
    648 #ifdef WM_EVENT_COUNTERS
    649 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    650 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    651 
    652 #define WM_Q_EVCNT_INCR(qname, evname)			\
    653 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    654 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    655 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    656 #else /* !WM_EVENT_COUNTERS */
    657 #define	WM_EVCNT_INCR(ev)	/* nothing */
    658 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    659 
    660 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    661 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    662 #endif /* !WM_EVENT_COUNTERS */
    663 
    664 #define	CSR_READ(sc, reg)						\
    665 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    666 #define	CSR_WRITE(sc, reg, val)						\
    667 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    668 #define	CSR_WRITE_FLUSH(sc)						\
    669 	(void)CSR_READ((sc), WMREG_STATUS)
    670 
    671 #define ICH8_FLASH_READ32(sc, reg)					\
    672 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    673 	    (reg) + sc->sc_flashreg_offset)
    674 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    675 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    676 	    (reg) + sc->sc_flashreg_offset, (data))
    677 
    678 #define ICH8_FLASH_READ16(sc, reg)					\
    679 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    680 	    (reg) + sc->sc_flashreg_offset)
    681 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    682 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    683 	    (reg) + sc->sc_flashreg_offset, (data))
    684 
    685 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    686 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    687 
    688 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    689 #define	WM_CDTXADDR_HI(txq, x)						\
    690 	(sizeof(bus_addr_t) == 8 ?					\
    691 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    692 
    693 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    694 #define	WM_CDRXADDR_HI(rxq, x)						\
    695 	(sizeof(bus_addr_t) == 8 ?					\
    696 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    697 
    698 /*
    699  * Register read/write functions.
    700  * Other than CSR_{READ|WRITE}().
    701  */
    702 #if 0
    703 static inline uint32_t wm_io_read(struct wm_softc *, int);
    704 #endif
    705 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    706 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    707     uint32_t, uint32_t);
    708 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    709 
    710 /*
    711  * Descriptor sync/init functions.
    712  */
    713 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    714 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    715 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    716 
    717 /*
    718  * Device driver interface functions and commonly used functions.
    719  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    720  */
    721 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    722 static int	wm_match(device_t, cfdata_t, void *);
    723 static void	wm_attach(device_t, device_t, void *);
    724 static int	wm_detach(device_t, int);
    725 static bool	wm_suspend(device_t, const pmf_qual_t *);
    726 static bool	wm_resume(device_t, const pmf_qual_t *);
    727 static void	wm_watchdog(struct ifnet *);
    728 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    729     uint16_t *);
    730 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    731     uint16_t *);
    732 static void	wm_tick(void *);
    733 static int	wm_ifflags_cb(struct ethercom *);
    734 static int	wm_ioctl(struct ifnet *, u_long, void *);
    735 /* MAC address related */
    736 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    737 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    738 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    739 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    740 static int	wm_rar_count(struct wm_softc *);
    741 static void	wm_set_filter(struct wm_softc *);
    742 /* Reset and init related */
    743 static void	wm_set_vlan(struct wm_softc *);
    744 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    745 static void	wm_get_auto_rd_done(struct wm_softc *);
    746 static void	wm_lan_init_done(struct wm_softc *);
    747 static void	wm_get_cfg_done(struct wm_softc *);
    748 static int	wm_phy_post_reset(struct wm_softc *);
    749 static int	wm_write_smbus_addr(struct wm_softc *);
    750 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    751 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    752 static void	wm_initialize_hardware_bits(struct wm_softc *);
    753 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    754 static int	wm_reset_phy(struct wm_softc *);
    755 static void	wm_flush_desc_rings(struct wm_softc *);
    756 static void	wm_reset(struct wm_softc *);
    757 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    758 static void	wm_rxdrain(struct wm_rxqueue *);
    759 static void	wm_init_rss(struct wm_softc *);
    760 static void	wm_adjust_qnum(struct wm_softc *, int);
    761 static inline bool	wm_is_using_msix(struct wm_softc *);
    762 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    763 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    764 static int	wm_setup_legacy(struct wm_softc *);
    765 static int	wm_setup_msix(struct wm_softc *);
    766 static int	wm_init(struct ifnet *);
    767 static int	wm_init_locked(struct ifnet *);
    768 static void	wm_init_sysctls(struct wm_softc *);
    769 static void	wm_unset_stopping_flags(struct wm_softc *);
    770 static void	wm_set_stopping_flags(struct wm_softc *);
    771 static void	wm_stop(struct ifnet *, int);
    772 static void	wm_stop_locked(struct ifnet *, bool, bool);
    773 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    774 static void	wm_82547_txfifo_stall(void *);
    775 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    776 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    777 /* DMA related */
    778 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    779 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    780 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    781 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    782     struct wm_txqueue *);
    783 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    784 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    785 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    786     struct wm_rxqueue *);
    787 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    788 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    789 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    790 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    791 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    792 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    793 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    794     struct wm_txqueue *);
    795 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    796     struct wm_rxqueue *);
    797 static int	wm_alloc_txrx_queues(struct wm_softc *);
    798 static void	wm_free_txrx_queues(struct wm_softc *);
    799 static int	wm_init_txrx_queues(struct wm_softc *);
    800 /* Start */
    801 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    802     struct wm_txsoft *, uint32_t *, uint8_t *);
    803 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    804 static void	wm_start(struct ifnet *);
    805 static void	wm_start_locked(struct ifnet *);
    806 static int	wm_transmit(struct ifnet *, struct mbuf *);
    807 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    808 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    809 		    bool);
    810 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    811     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    812 static void	wm_nq_start(struct ifnet *);
    813 static void	wm_nq_start_locked(struct ifnet *);
    814 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    815 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    816 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    817 		    bool);
    818 static void	wm_deferred_start_locked(struct wm_txqueue *);
    819 static void	wm_handle_queue(void *);
    820 static void	wm_handle_queue_work(struct work *, void *);
    821 /* Interrupt */
    822 static bool	wm_txeof(struct wm_txqueue *, u_int);
    823 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    824 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    825 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    826 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    827 static void	wm_linkintr(struct wm_softc *, uint32_t);
    828 static int	wm_intr_legacy(void *);
    829 static inline void	wm_txrxintr_disable(struct wm_queue *);
    830 static inline void	wm_txrxintr_enable(struct wm_queue *);
    831 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    832 static int	wm_txrxintr_msix(void *);
    833 static int	wm_linkintr_msix(void *);
    834 
    835 /*
    836  * Media related.
    837  * GMII, SGMII, TBI, SERDES and SFP.
    838  */
    839 /* Common */
    840 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    841 /* GMII related */
    842 static void	wm_gmii_reset(struct wm_softc *);
    843 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    844 static int	wm_get_phy_id_82575(struct wm_softc *);
    845 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    846 static int	wm_gmii_mediachange(struct ifnet *);
    847 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    848 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    849 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    850 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    851 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    852 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    853 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    854 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    855 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    856 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    857 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    858 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    859 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    860 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    861 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    862 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    863 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    864 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    865 	bool);
    866 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    867 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    868 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    869 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    870 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    871 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    872 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    873 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    874 static void	wm_gmii_statchg(struct ifnet *);
    875 /*
    876  * kumeran related (80003, ICH* and PCH*).
    877  * These functions are not for accessing MII registers but for accessing
    878  * kumeran specific registers.
    879  */
    880 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    881 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    882 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    883 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    884 /* EMI register related */
    885 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    886 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    887 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    888 /* SGMII */
    889 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    890 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    891 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    892 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    893 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    894 /* TBI related */
    895 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    896 static void	wm_tbi_mediainit(struct wm_softc *);
    897 static int	wm_tbi_mediachange(struct ifnet *);
    898 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    899 static int	wm_check_for_link(struct wm_softc *);
    900 static void	wm_tbi_tick(struct wm_softc *);
    901 /* SERDES related */
    902 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    903 static int	wm_serdes_mediachange(struct ifnet *);
    904 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    905 static void	wm_serdes_tick(struct wm_softc *);
    906 /* SFP related */
    907 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    908 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    909 
    910 /*
    911  * NVM related.
    912  * Microwire, SPI (w/wo EERD) and Flash.
    913  */
    914 /* Misc functions */
    915 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    916 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    917 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    918 /* Microwire */
    919 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    920 /* SPI */
    921 static int	wm_nvm_ready_spi(struct wm_softc *);
    922 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    923 /* Using with EERD */
    924 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    925 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    926 /* Flash */
    927 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    928     unsigned int *);
    929 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    930 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    931 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    932     uint32_t *);
    933 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    934 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    935 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    936 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    937 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    938 /* iNVM */
    939 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    940 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    941 /* Lock, detecting NVM type, validate checksum and read */
    942 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    943 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    944 static int	wm_nvm_validate_checksum(struct wm_softc *);
    945 static void	wm_nvm_version_invm(struct wm_softc *);
    946 static void	wm_nvm_version(struct wm_softc *);
    947 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    948 
    949 /*
    950  * Hardware semaphores.
    951  * Very complexed...
    952  */
    953 static int	wm_get_null(struct wm_softc *);
    954 static void	wm_put_null(struct wm_softc *);
    955 static int	wm_get_eecd(struct wm_softc *);
    956 static void	wm_put_eecd(struct wm_softc *);
    957 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    958 static void	wm_put_swsm_semaphore(struct wm_softc *);
    959 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    960 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    961 static int	wm_get_nvm_80003(struct wm_softc *);
    962 static void	wm_put_nvm_80003(struct wm_softc *);
    963 static int	wm_get_nvm_82571(struct wm_softc *);
    964 static void	wm_put_nvm_82571(struct wm_softc *);
    965 static int	wm_get_phy_82575(struct wm_softc *);
    966 static void	wm_put_phy_82575(struct wm_softc *);
    967 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    968 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    969 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    970 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    971 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    972 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    973 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    974 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    975 
    976 /*
    977  * Management mode and power management related subroutines.
    978  * BMC, AMT, suspend/resume and EEE.
    979  */
    980 #if 0
    981 static int	wm_check_mng_mode(struct wm_softc *);
    982 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    983 static int	wm_check_mng_mode_82574(struct wm_softc *);
    984 static int	wm_check_mng_mode_generic(struct wm_softc *);
    985 #endif
    986 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    987 static bool	wm_phy_resetisblocked(struct wm_softc *);
    988 static void	wm_get_hw_control(struct wm_softc *);
    989 static void	wm_release_hw_control(struct wm_softc *);
    990 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    991 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    992 static void	wm_init_manageability(struct wm_softc *);
    993 static void	wm_release_manageability(struct wm_softc *);
    994 static void	wm_get_wakeup(struct wm_softc *);
    995 static int	wm_ulp_disable(struct wm_softc *);
    996 static int	wm_enable_phy_wakeup(struct wm_softc *);
    997 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    998 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    999 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1000 static void	wm_enable_wakeup(struct wm_softc *);
   1001 static void	wm_disable_aspm(struct wm_softc *);
   1002 /* LPLU (Low Power Link Up) */
   1003 static void	wm_lplu_d0_disable(struct wm_softc *);
   1004 /* EEE */
   1005 static int	wm_set_eee_i350(struct wm_softc *);
   1006 static int	wm_set_eee_pchlan(struct wm_softc *);
   1007 static int	wm_set_eee(struct wm_softc *);
   1008 
   1009 /*
   1010  * Workarounds (mainly PHY related).
   1011  * Basically, PHY's workarounds are in the PHY drivers.
   1012  */
   1013 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1014 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1015 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1016 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1017 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1018 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1019 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1020 static int	wm_k1_workaround_lv(struct wm_softc *);
   1021 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1022 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1023 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1024 static void	wm_reset_init_script_82575(struct wm_softc *);
   1025 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1026 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1027 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1028 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1029 static int	wm_pll_workaround_i210(struct wm_softc *);
   1030 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1031 
   1032 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1033     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1034 
   1035 /*
   1036  * Devices supported by this driver.
   1037  */
   1038 static const struct wm_product {
   1039 	pci_vendor_id_t		wmp_vendor;
   1040 	pci_product_id_t	wmp_product;
   1041 	const char		*wmp_name;
   1042 	wm_chip_type		wmp_type;
   1043 	uint32_t		wmp_flags;
   1044 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1045 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1046 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1047 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1048 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1049 } wm_products[] = {
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1051 	  "Intel i82542 1000BASE-X Ethernet",
   1052 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1053 
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1055 	  "Intel i82543GC 1000BASE-X Ethernet",
   1056 	  WM_T_82543,		WMP_F_FIBER },
   1057 
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1059 	  "Intel i82543GC 1000BASE-T Ethernet",
   1060 	  WM_T_82543,		WMP_F_COPPER },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1063 	  "Intel i82544EI 1000BASE-T Ethernet",
   1064 	  WM_T_82544,		WMP_F_COPPER },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1067 	  "Intel i82544EI 1000BASE-X Ethernet",
   1068 	  WM_T_82544,		WMP_F_FIBER },
   1069 
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1071 	  "Intel i82544GC 1000BASE-T Ethernet",
   1072 	  WM_T_82544,		WMP_F_COPPER },
   1073 
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1075 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1076 	  WM_T_82544,		WMP_F_COPPER },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1079 	  "Intel i82540EM 1000BASE-T Ethernet",
   1080 	  WM_T_82540,		WMP_F_COPPER },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1083 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1084 	  WM_T_82540,		WMP_F_COPPER },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1087 	  "Intel i82540EP 1000BASE-T Ethernet",
   1088 	  WM_T_82540,		WMP_F_COPPER },
   1089 
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1091 	  "Intel i82540EP 1000BASE-T Ethernet",
   1092 	  WM_T_82540,		WMP_F_COPPER },
   1093 
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1095 	  "Intel i82540EP 1000BASE-T Ethernet",
   1096 	  WM_T_82540,		WMP_F_COPPER },
   1097 
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1099 	  "Intel i82545EM 1000BASE-T Ethernet",
   1100 	  WM_T_82545,		WMP_F_COPPER },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1103 	  "Intel i82545GM 1000BASE-T Ethernet",
   1104 	  WM_T_82545_3,		WMP_F_COPPER },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1107 	  "Intel i82545GM 1000BASE-X Ethernet",
   1108 	  WM_T_82545_3,		WMP_F_FIBER },
   1109 
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1111 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1112 	  WM_T_82545_3,		WMP_F_SERDES },
   1113 
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1115 	  "Intel i82546EB 1000BASE-T Ethernet",
   1116 	  WM_T_82546,		WMP_F_COPPER },
   1117 
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1119 	  "Intel i82546EB 1000BASE-T Ethernet",
   1120 	  WM_T_82546,		WMP_F_COPPER },
   1121 
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1123 	  "Intel i82545EM 1000BASE-X Ethernet",
   1124 	  WM_T_82545,		WMP_F_FIBER },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1127 	  "Intel i82546EB 1000BASE-X Ethernet",
   1128 	  WM_T_82546,		WMP_F_FIBER },
   1129 
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1131 	  "Intel i82546GB 1000BASE-T Ethernet",
   1132 	  WM_T_82546_3,		WMP_F_COPPER },
   1133 
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1135 	  "Intel i82546GB 1000BASE-X Ethernet",
   1136 	  WM_T_82546_3,		WMP_F_FIBER },
   1137 
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1139 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1140 	  WM_T_82546_3,		WMP_F_SERDES },
   1141 
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1143 	  "i82546GB quad-port Gigabit Ethernet",
   1144 	  WM_T_82546_3,		WMP_F_COPPER },
   1145 
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1147 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1148 	  WM_T_82546_3,		WMP_F_COPPER },
   1149 
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1151 	  "Intel PRO/1000MT (82546GB)",
   1152 	  WM_T_82546_3,		WMP_F_COPPER },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1155 	  "Intel i82541EI 1000BASE-T Ethernet",
   1156 	  WM_T_82541,		WMP_F_COPPER },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1159 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1160 	  WM_T_82541,		WMP_F_COPPER },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1163 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1164 	  WM_T_82541,		WMP_F_COPPER },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1167 	  "Intel i82541ER 1000BASE-T Ethernet",
   1168 	  WM_T_82541_2,		WMP_F_COPPER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1171 	  "Intel i82541GI 1000BASE-T Ethernet",
   1172 	  WM_T_82541_2,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1175 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1176 	  WM_T_82541_2,		WMP_F_COPPER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1179 	  "Intel i82541PI 1000BASE-T Ethernet",
   1180 	  WM_T_82541_2,		WMP_F_COPPER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1183 	  "Intel i82547EI 1000BASE-T Ethernet",
   1184 	  WM_T_82547,		WMP_F_COPPER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1187 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1188 	  WM_T_82547,		WMP_F_COPPER },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1191 	  "Intel i82547GI 1000BASE-T Ethernet",
   1192 	  WM_T_82547_2,		WMP_F_COPPER },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1195 	  "Intel PRO/1000 PT (82571EB)",
   1196 	  WM_T_82571,		WMP_F_COPPER },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1199 	  "Intel PRO/1000 PF (82571EB)",
   1200 	  WM_T_82571,		WMP_F_FIBER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1203 	  "Intel PRO/1000 PB (82571EB)",
   1204 	  WM_T_82571,		WMP_F_SERDES },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1207 	  "Intel PRO/1000 QT (82571EB)",
   1208 	  WM_T_82571,		WMP_F_COPPER },
   1209 
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1211 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1212 	  WM_T_82571,		WMP_F_COPPER },
   1213 
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1215 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1216 	  WM_T_82571,		WMP_F_COPPER },
   1217 
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1219 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1220 	  WM_T_82571,		WMP_F_SERDES },
   1221 
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1223 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1224 	  WM_T_82571,		WMP_F_SERDES },
   1225 
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1227 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1228 	  WM_T_82571,		WMP_F_FIBER },
   1229 
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1231 	  "Intel i82572EI 1000baseT Ethernet",
   1232 	  WM_T_82572,		WMP_F_COPPER },
   1233 
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1235 	  "Intel i82572EI 1000baseX Ethernet",
   1236 	  WM_T_82572,		WMP_F_FIBER },
   1237 
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1239 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1240 	  WM_T_82572,		WMP_F_SERDES },
   1241 
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1243 	  "Intel i82572EI 1000baseT Ethernet",
   1244 	  WM_T_82572,		WMP_F_COPPER },
   1245 
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1247 	  "Intel i82573E",
   1248 	  WM_T_82573,		WMP_F_COPPER },
   1249 
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1251 	  "Intel i82573E IAMT",
   1252 	  WM_T_82573,		WMP_F_COPPER },
   1253 
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1255 	  "Intel i82573L Gigabit Ethernet",
   1256 	  WM_T_82573,		WMP_F_COPPER },
   1257 
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1259 	  "Intel i82574L",
   1260 	  WM_T_82574,		WMP_F_COPPER },
   1261 
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1263 	  "Intel i82574L",
   1264 	  WM_T_82574,		WMP_F_COPPER },
   1265 
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1267 	  "Intel i82583V",
   1268 	  WM_T_82583,		WMP_F_COPPER },
   1269 
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1271 	  "i80003 dual 1000baseT Ethernet",
   1272 	  WM_T_80003,		WMP_F_COPPER },
   1273 
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1275 	  "i80003 dual 1000baseX Ethernet",
   1276 	  WM_T_80003,		WMP_F_COPPER },
   1277 
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1279 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1280 	  WM_T_80003,		WMP_F_SERDES },
   1281 
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1283 	  "Intel i80003 1000baseT Ethernet",
   1284 	  WM_T_80003,		WMP_F_COPPER },
   1285 
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1287 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1288 	  WM_T_80003,		WMP_F_SERDES },
   1289 
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1291 	  "Intel i82801H (M_AMT) LAN Controller",
   1292 	  WM_T_ICH8,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1294 	  "Intel i82801H (AMT) LAN Controller",
   1295 	  WM_T_ICH8,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1297 	  "Intel i82801H LAN Controller",
   1298 	  WM_T_ICH8,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1300 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1301 	  WM_T_ICH8,		WMP_F_COPPER },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1303 	  "Intel i82801H (M) LAN Controller",
   1304 	  WM_T_ICH8,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1306 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1307 	  WM_T_ICH8,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1309 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1310 	  WM_T_ICH8,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1312 	  "82567V-3 LAN Controller",
   1313 	  WM_T_ICH8,		WMP_F_COPPER },
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1315 	  "82801I (AMT) LAN Controller",
   1316 	  WM_T_ICH9,		WMP_F_COPPER },
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1318 	  "82801I 10/100 LAN Controller",
   1319 	  WM_T_ICH9,		WMP_F_COPPER },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1321 	  "82801I (G) 10/100 LAN Controller",
   1322 	  WM_T_ICH9,		WMP_F_COPPER },
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1324 	  "82801I (GT) 10/100 LAN Controller",
   1325 	  WM_T_ICH9,		WMP_F_COPPER },
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1327 	  "82801I (C) LAN Controller",
   1328 	  WM_T_ICH9,		WMP_F_COPPER },
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1330 	  "82801I mobile LAN Controller",
   1331 	  WM_T_ICH9,		WMP_F_COPPER },
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1333 	  "82801I mobile (V) LAN Controller",
   1334 	  WM_T_ICH9,		WMP_F_COPPER },
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1336 	  "82801I mobile (AMT) LAN Controller",
   1337 	  WM_T_ICH9,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1339 	  "82567LM-4 LAN Controller",
   1340 	  WM_T_ICH9,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1342 	  "82567LM-2 LAN Controller",
   1343 	  WM_T_ICH10,		WMP_F_COPPER },
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1345 	  "82567LF-2 LAN Controller",
   1346 	  WM_T_ICH10,		WMP_F_COPPER },
   1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1348 	  "82567LM-3 LAN Controller",
   1349 	  WM_T_ICH10,		WMP_F_COPPER },
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1351 	  "82567LF-3 LAN Controller",
   1352 	  WM_T_ICH10,		WMP_F_COPPER },
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1354 	  "82567V-2 LAN Controller",
   1355 	  WM_T_ICH10,		WMP_F_COPPER },
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1357 	  "82567V-3? LAN Controller",
   1358 	  WM_T_ICH10,		WMP_F_COPPER },
   1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1360 	  "HANKSVILLE LAN Controller",
   1361 	  WM_T_ICH10,		WMP_F_COPPER },
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1363 	  "PCH LAN (82577LM) Controller",
   1364 	  WM_T_PCH,		WMP_F_COPPER },
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1366 	  "PCH LAN (82577LC) Controller",
   1367 	  WM_T_PCH,		WMP_F_COPPER },
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1369 	  "PCH LAN (82578DM) Controller",
   1370 	  WM_T_PCH,		WMP_F_COPPER },
   1371 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1372 	  "PCH LAN (82578DC) Controller",
   1373 	  WM_T_PCH,		WMP_F_COPPER },
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1375 	  "PCH2 LAN (82579LM) Controller",
   1376 	  WM_T_PCH2,		WMP_F_COPPER },
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1378 	  "PCH2 LAN (82579V) Controller",
   1379 	  WM_T_PCH2,		WMP_F_COPPER },
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1381 	  "82575EB dual-1000baseT Ethernet",
   1382 	  WM_T_82575,		WMP_F_COPPER },
   1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1384 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1385 	  WM_T_82575,		WMP_F_SERDES },
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1387 	  "82575GB quad-1000baseT Ethernet",
   1388 	  WM_T_82575,		WMP_F_COPPER },
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1390 	  "82575GB quad-1000baseT Ethernet (PM)",
   1391 	  WM_T_82575,		WMP_F_COPPER },
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1393 	  "82576 1000BaseT Ethernet",
   1394 	  WM_T_82576,		WMP_F_COPPER },
   1395 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1396 	  "82576 1000BaseX Ethernet",
   1397 	  WM_T_82576,		WMP_F_FIBER },
   1398 
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1400 	  "82576 gigabit Ethernet (SERDES)",
   1401 	  WM_T_82576,		WMP_F_SERDES },
   1402 
   1403 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1404 	  "82576 quad-1000BaseT Ethernet",
   1405 	  WM_T_82576,		WMP_F_COPPER },
   1406 
   1407 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1408 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1409 	  WM_T_82576,		WMP_F_COPPER },
   1410 
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1412 	  "82576 gigabit Ethernet",
   1413 	  WM_T_82576,		WMP_F_COPPER },
   1414 
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1416 	  "82576 gigabit Ethernet (SERDES)",
   1417 	  WM_T_82576,		WMP_F_SERDES },
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1419 	  "82576 quad-gigabit Ethernet (SERDES)",
   1420 	  WM_T_82576,		WMP_F_SERDES },
   1421 
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1423 	  "82580 1000BaseT Ethernet",
   1424 	  WM_T_82580,		WMP_F_COPPER },
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1426 	  "82580 1000BaseX Ethernet",
   1427 	  WM_T_82580,		WMP_F_FIBER },
   1428 
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1430 	  "82580 1000BaseT Ethernet (SERDES)",
   1431 	  WM_T_82580,		WMP_F_SERDES },
   1432 
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1434 	  "82580 gigabit Ethernet (SGMII)",
   1435 	  WM_T_82580,		WMP_F_COPPER },
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1437 	  "82580 dual-1000BaseT Ethernet",
   1438 	  WM_T_82580,		WMP_F_COPPER },
   1439 
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1441 	  "82580 quad-1000BaseX Ethernet",
   1442 	  WM_T_82580,		WMP_F_FIBER },
   1443 
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1445 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1446 	  WM_T_82580,		WMP_F_COPPER },
   1447 
   1448 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1449 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1450 	  WM_T_82580,		WMP_F_SERDES },
   1451 
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1453 	  "DH89XXCC 1000BASE-KX Ethernet",
   1454 	  WM_T_82580,		WMP_F_SERDES },
   1455 
   1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1457 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1458 	  WM_T_82580,		WMP_F_SERDES },
   1459 
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1461 	  "I350 Gigabit Network Connection",
   1462 	  WM_T_I350,		WMP_F_COPPER },
   1463 
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1465 	  "I350 Gigabit Fiber Network Connection",
   1466 	  WM_T_I350,		WMP_F_FIBER },
   1467 
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1469 	  "I350 Gigabit Backplane Connection",
   1470 	  WM_T_I350,		WMP_F_SERDES },
   1471 
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1473 	  "I350 Quad Port Gigabit Ethernet",
   1474 	  WM_T_I350,		WMP_F_SERDES },
   1475 
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1477 	  "I350 Gigabit Connection",
   1478 	  WM_T_I350,		WMP_F_COPPER },
   1479 
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1481 	  "I354 Gigabit Ethernet (KX)",
   1482 	  WM_T_I354,		WMP_F_SERDES },
   1483 
   1484 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1485 	  "I354 Gigabit Ethernet (SGMII)",
   1486 	  WM_T_I354,		WMP_F_COPPER },
   1487 
   1488 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1489 	  "I354 Gigabit Ethernet (2.5G)",
   1490 	  WM_T_I354,		WMP_F_COPPER },
   1491 
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1493 	  "I210-T1 Ethernet Server Adapter",
   1494 	  WM_T_I210,		WMP_F_COPPER },
   1495 
   1496 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1497 	  "I210 Ethernet (Copper OEM)",
   1498 	  WM_T_I210,		WMP_F_COPPER },
   1499 
   1500 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1501 	  "I210 Ethernet (Copper IT)",
   1502 	  WM_T_I210,		WMP_F_COPPER },
   1503 
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1505 	  "I210 Ethernet (Copper, FLASH less)",
   1506 	  WM_T_I210,		WMP_F_COPPER },
   1507 
   1508 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1509 	  "I210 Gigabit Ethernet (Fiber)",
   1510 	  WM_T_I210,		WMP_F_FIBER },
   1511 
   1512 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1513 	  "I210 Gigabit Ethernet (SERDES)",
   1514 	  WM_T_I210,		WMP_F_SERDES },
   1515 
   1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1517 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1518 	  WM_T_I210,		WMP_F_SERDES },
   1519 
   1520 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1521 	  "I210 Gigabit Ethernet (SGMII)",
   1522 	  WM_T_I210,		WMP_F_COPPER },
   1523 
   1524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1525 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1526 	  WM_T_I210,		WMP_F_COPPER },
   1527 
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1529 	  "I211 Ethernet (COPPER)",
   1530 	  WM_T_I211,		WMP_F_COPPER },
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1532 	  "I217 V Ethernet Connection",
   1533 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1534 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1535 	  "I217 LM Ethernet Connection",
   1536 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1538 	  "I218 V Ethernet Connection",
   1539 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1541 	  "I218 V Ethernet Connection",
   1542 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1544 	  "I218 V Ethernet Connection",
   1545 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1547 	  "I218 LM Ethernet Connection",
   1548 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1549 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1550 	  "I218 LM Ethernet Connection",
   1551 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1552 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1553 	  "I218 LM Ethernet Connection",
   1554 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1556 	  "I219 LM Ethernet Connection",
   1557 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1559 	  "I219 LM Ethernet Connection",
   1560 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1562 	  "I219 LM Ethernet Connection",
   1563 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1564 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1565 	  "I219 LM Ethernet Connection",
   1566 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1567 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1568 	  "I219 LM Ethernet Connection",
   1569 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1570 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1571 	  "I219 LM Ethernet Connection",
   1572 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1573 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1574 	  "I219 LM Ethernet Connection",
   1575 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1577 	  "I219 LM Ethernet Connection",
   1578 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1579 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1580 	  "I219 LM Ethernet Connection",
   1581 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1582 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1583 	  "I219 LM Ethernet Connection",
   1584 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1585 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1586 	  "I219 LM Ethernet Connection",
   1587 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1588 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1589 	  "I219 LM Ethernet Connection",
   1590 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1591 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1592 	  "I219 LM Ethernet Connection",
   1593 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1594 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1595 	  "I219 LM Ethernet Connection",
   1596 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1597 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1598 	  "I219 LM Ethernet Connection",
   1599 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1600 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1601 	  "I219 V Ethernet Connection",
   1602 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1603 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1604 	  "I219 V Ethernet Connection",
   1605 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1606 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1607 	  "I219 V Ethernet Connection",
   1608 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1609 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1610 	  "I219 V Ethernet Connection",
   1611 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1612 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1613 	  "I219 V Ethernet Connection",
   1614 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1615 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1616 	  "I219 V Ethernet Connection",
   1617 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1618 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1619 	  "I219 V Ethernet Connection",
   1620 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1621 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1622 	  "I219 V Ethernet Connection",
   1623 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1624 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1625 	  "I219 V Ethernet Connection",
   1626 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1627 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1628 	  "I219 V Ethernet Connection",
   1629 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1630 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1631 	  "I219 V Ethernet Connection",
   1632 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1633 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1634 	  "I219 V Ethernet Connection",
   1635 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1636 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1637 	  "I219 V Ethernet Connection",
   1638 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1639 	{ 0,			0,
   1640 	  NULL,
   1641 	  0,			0 },
   1642 };
   1643 
   1644 /*
   1645  * Register read/write functions.
   1646  * Other than CSR_{READ|WRITE}().
   1647  */
   1648 
   1649 #if 0 /* Not currently used */
   1650 static inline uint32_t
   1651 wm_io_read(struct wm_softc *sc, int reg)
   1652 {
   1653 
   1654 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1655 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1656 }
   1657 #endif
   1658 
   1659 static inline void
   1660 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1661 {
   1662 
   1663 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1664 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1665 }
   1666 
   1667 static inline void
   1668 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1669     uint32_t data)
   1670 {
   1671 	uint32_t regval;
   1672 	int i;
   1673 
   1674 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1675 
   1676 	CSR_WRITE(sc, reg, regval);
   1677 
   1678 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1679 		delay(5);
   1680 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1681 			break;
   1682 	}
   1683 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1684 		aprint_error("%s: WARNING:"
   1685 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1686 		    device_xname(sc->sc_dev), reg);
   1687 	}
   1688 }
   1689 
   1690 static inline void
   1691 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1692 {
   1693 	wa->wa_low = htole32(v & 0xffffffffU);
   1694 	if (sizeof(bus_addr_t) == 8)
   1695 		wa->wa_high = htole32((uint64_t) v >> 32);
   1696 	else
   1697 		wa->wa_high = 0;
   1698 }
   1699 
   1700 /*
   1701  * Descriptor sync/init functions.
   1702  */
   1703 static inline void
   1704 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1705 {
   1706 	struct wm_softc *sc = txq->txq_sc;
   1707 
   1708 	/* If it will wrap around, sync to the end of the ring. */
   1709 	if ((start + num) > WM_NTXDESC(txq)) {
   1710 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1711 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1712 		    (WM_NTXDESC(txq) - start), ops);
   1713 		num -= (WM_NTXDESC(txq) - start);
   1714 		start = 0;
   1715 	}
   1716 
   1717 	/* Now sync whatever is left. */
   1718 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1719 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1720 }
   1721 
   1722 static inline void
   1723 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1724 {
   1725 	struct wm_softc *sc = rxq->rxq_sc;
   1726 
   1727 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1728 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1729 }
   1730 
   1731 static inline void
   1732 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1733 {
   1734 	struct wm_softc *sc = rxq->rxq_sc;
   1735 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1736 	struct mbuf *m = rxs->rxs_mbuf;
   1737 
   1738 	/*
   1739 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1740 	 * so that the payload after the Ethernet header is aligned
   1741 	 * to a 4-byte boundary.
   1742 
   1743 	 * XXX BRAINDAMAGE ALERT!
   1744 	 * The stupid chip uses the same size for every buffer, which
   1745 	 * is set in the Receive Control register.  We are using the 2K
   1746 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1747 	 * reason, we can't "scoot" packets longer than the standard
   1748 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1749 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1750 	 * the upper layer copy the headers.
   1751 	 */
   1752 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1753 
   1754 	if (sc->sc_type == WM_T_82574) {
   1755 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1756 		rxd->erx_data.erxd_addr =
   1757 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1758 		rxd->erx_data.erxd_dd = 0;
   1759 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1760 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1761 
   1762 		rxd->nqrx_data.nrxd_paddr =
   1763 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1764 		/* Currently, split header is not supported. */
   1765 		rxd->nqrx_data.nrxd_haddr = 0;
   1766 	} else {
   1767 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1768 
   1769 		wm_set_dma_addr(&rxd->wrx_addr,
   1770 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1771 		rxd->wrx_len = 0;
   1772 		rxd->wrx_cksum = 0;
   1773 		rxd->wrx_status = 0;
   1774 		rxd->wrx_errors = 0;
   1775 		rxd->wrx_special = 0;
   1776 	}
   1777 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1778 
   1779 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1780 }
   1781 
   1782 /*
   1783  * Device driver interface functions and commonly used functions.
   1784  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1785  */
   1786 
   1787 /* Lookup supported device table */
   1788 static const struct wm_product *
   1789 wm_lookup(const struct pci_attach_args *pa)
   1790 {
   1791 	const struct wm_product *wmp;
   1792 
   1793 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1794 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1795 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1796 			return wmp;
   1797 	}
   1798 	return NULL;
   1799 }
   1800 
   1801 /* The match function (ca_match) */
   1802 static int
   1803 wm_match(device_t parent, cfdata_t cf, void *aux)
   1804 {
   1805 	struct pci_attach_args *pa = aux;
   1806 
   1807 	if (wm_lookup(pa) != NULL)
   1808 		return 1;
   1809 
   1810 	return 0;
   1811 }
   1812 
   1813 /* The attach function (ca_attach) */
   1814 static void
   1815 wm_attach(device_t parent, device_t self, void *aux)
   1816 {
   1817 	struct wm_softc *sc = device_private(self);
   1818 	struct pci_attach_args *pa = aux;
   1819 	prop_dictionary_t dict;
   1820 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1821 	pci_chipset_tag_t pc = pa->pa_pc;
   1822 	int counts[PCI_INTR_TYPE_SIZE];
   1823 	pci_intr_type_t max_type;
   1824 	const char *eetype, *xname;
   1825 	bus_space_tag_t memt;
   1826 	bus_space_handle_t memh;
   1827 	bus_size_t memsize;
   1828 	int memh_valid;
   1829 	int i, error;
   1830 	const struct wm_product *wmp;
   1831 	prop_data_t ea;
   1832 	prop_number_t pn;
   1833 	uint8_t enaddr[ETHER_ADDR_LEN];
   1834 	char buf[256];
   1835 	char wqname[MAXCOMLEN];
   1836 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1837 	pcireg_t preg, memtype;
   1838 	uint16_t eeprom_data, apme_mask;
   1839 	bool force_clear_smbi;
   1840 	uint32_t link_mode;
   1841 	uint32_t reg;
   1842 
   1843 	sc->sc_dev = self;
   1844 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
   1845 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1846 	sc->sc_core_stopping = false;
   1847 
   1848 	wmp = wm_lookup(pa);
   1849 #ifdef DIAGNOSTIC
   1850 	if (wmp == NULL) {
   1851 		printf("\n");
   1852 		panic("wm_attach: impossible");
   1853 	}
   1854 #endif
   1855 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1856 
   1857 	sc->sc_pc = pa->pa_pc;
   1858 	sc->sc_pcitag = pa->pa_tag;
   1859 
   1860 	if (pci_dma64_available(pa))
   1861 		sc->sc_dmat = pa->pa_dmat64;
   1862 	else
   1863 		sc->sc_dmat = pa->pa_dmat;
   1864 
   1865 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1866 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1867 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1868 
   1869 	sc->sc_type = wmp->wmp_type;
   1870 
   1871 	/* Set default function pointers */
   1872 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1873 	sc->phy.release = sc->nvm.release = wm_put_null;
   1874 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1875 
   1876 	if (sc->sc_type < WM_T_82543) {
   1877 		if (sc->sc_rev < 2) {
   1878 			aprint_error_dev(sc->sc_dev,
   1879 			    "i82542 must be at least rev. 2\n");
   1880 			return;
   1881 		}
   1882 		if (sc->sc_rev < 3)
   1883 			sc->sc_type = WM_T_82542_2_0;
   1884 	}
   1885 
   1886 	/*
   1887 	 * Disable MSI for Errata:
   1888 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1889 	 *
   1890 	 *  82544: Errata 25
   1891 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1892 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1893 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1894 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1895 	 *
   1896 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1897 	 *
   1898 	 *  82571 & 82572: Errata 63
   1899 	 */
   1900 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1901 	    || (sc->sc_type == WM_T_82572))
   1902 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1903 
   1904 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1905 	    || (sc->sc_type == WM_T_82580)
   1906 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1907 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1908 		sc->sc_flags |= WM_F_NEWQUEUE;
   1909 
   1910 	/* Set device properties (mactype) */
   1911 	dict = device_properties(sc->sc_dev);
   1912 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1913 
   1914 	/*
   1915 	 * Map the device.  All devices support memory-mapped acccess,
   1916 	 * and it is really required for normal operation.
   1917 	 */
   1918 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1919 	switch (memtype) {
   1920 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1921 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1922 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1923 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1924 		break;
   1925 	default:
   1926 		memh_valid = 0;
   1927 		break;
   1928 	}
   1929 
   1930 	if (memh_valid) {
   1931 		sc->sc_st = memt;
   1932 		sc->sc_sh = memh;
   1933 		sc->sc_ss = memsize;
   1934 	} else {
   1935 		aprint_error_dev(sc->sc_dev,
   1936 		    "unable to map device registers\n");
   1937 		return;
   1938 	}
   1939 
   1940 	/*
   1941 	 * In addition, i82544 and later support I/O mapped indirect
   1942 	 * register access.  It is not desirable (nor supported in
   1943 	 * this driver) to use it for normal operation, though it is
   1944 	 * required to work around bugs in some chip versions.
   1945 	 */
   1946 	if (sc->sc_type >= WM_T_82544) {
   1947 		/* First we have to find the I/O BAR. */
   1948 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1949 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1950 			if (memtype == PCI_MAPREG_TYPE_IO)
   1951 				break;
   1952 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1953 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1954 				i += 4;	/* skip high bits, too */
   1955 		}
   1956 		if (i < PCI_MAPREG_END) {
   1957 			/*
   1958 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1959 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1960 			 * It's no problem because newer chips has no this
   1961 			 * bug.
   1962 			 *
   1963 			 * The i8254x doesn't apparently respond when the
   1964 			 * I/O BAR is 0, which looks somewhat like it's not
   1965 			 * been configured.
   1966 			 */
   1967 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1968 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1969 				aprint_error_dev(sc->sc_dev,
   1970 				    "WARNING: I/O BAR at zero.\n");
   1971 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1972 					0, &sc->sc_iot, &sc->sc_ioh,
   1973 					NULL, &sc->sc_ios) == 0) {
   1974 				sc->sc_flags |= WM_F_IOH_VALID;
   1975 			} else
   1976 				aprint_error_dev(sc->sc_dev,
   1977 				    "WARNING: unable to map I/O space\n");
   1978 		}
   1979 
   1980 	}
   1981 
   1982 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1983 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1984 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1985 	if (sc->sc_type < WM_T_82542_2_1)
   1986 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1987 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1988 
   1989 	/* Power up chip */
   1990 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1991 	    && error != EOPNOTSUPP) {
   1992 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1993 		return;
   1994 	}
   1995 
   1996 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1997 	/*
   1998 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1999 	 * resource.
   2000 	 */
   2001 	if (sc->sc_nqueues > 1) {
   2002 		max_type = PCI_INTR_TYPE_MSIX;
   2003 		/*
   2004 		 *  82583 has a MSI-X capability in the PCI configuration space
   2005 		 * but it doesn't support it. At least the document doesn't
   2006 		 * say anything about MSI-X.
   2007 		 */
   2008 		counts[PCI_INTR_TYPE_MSIX]
   2009 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2010 	} else {
   2011 		max_type = PCI_INTR_TYPE_MSI;
   2012 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2013 	}
   2014 
   2015 	/* Allocation settings */
   2016 	counts[PCI_INTR_TYPE_MSI] = 1;
   2017 	counts[PCI_INTR_TYPE_INTX] = 1;
   2018 	/* overridden by disable flags */
   2019 	if (wm_disable_msi != 0) {
   2020 		counts[PCI_INTR_TYPE_MSI] = 0;
   2021 		if (wm_disable_msix != 0) {
   2022 			max_type = PCI_INTR_TYPE_INTX;
   2023 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2024 		}
   2025 	} else if (wm_disable_msix != 0) {
   2026 		max_type = PCI_INTR_TYPE_MSI;
   2027 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2028 	}
   2029 
   2030 alloc_retry:
   2031 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2032 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2033 		return;
   2034 	}
   2035 
   2036 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2037 		error = wm_setup_msix(sc);
   2038 		if (error) {
   2039 			pci_intr_release(pc, sc->sc_intrs,
   2040 			    counts[PCI_INTR_TYPE_MSIX]);
   2041 
   2042 			/* Setup for MSI: Disable MSI-X */
   2043 			max_type = PCI_INTR_TYPE_MSI;
   2044 			counts[PCI_INTR_TYPE_MSI] = 1;
   2045 			counts[PCI_INTR_TYPE_INTX] = 1;
   2046 			goto alloc_retry;
   2047 		}
   2048 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2049 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2050 		error = wm_setup_legacy(sc);
   2051 		if (error) {
   2052 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2053 			    counts[PCI_INTR_TYPE_MSI]);
   2054 
   2055 			/* The next try is for INTx: Disable MSI */
   2056 			max_type = PCI_INTR_TYPE_INTX;
   2057 			counts[PCI_INTR_TYPE_INTX] = 1;
   2058 			goto alloc_retry;
   2059 		}
   2060 	} else {
   2061 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2062 		error = wm_setup_legacy(sc);
   2063 		if (error) {
   2064 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2065 			    counts[PCI_INTR_TYPE_INTX]);
   2066 			return;
   2067 		}
   2068 	}
   2069 
   2070 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2071 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2072 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2073 	    WM_WORKQUEUE_FLAGS);
   2074 	if (error) {
   2075 		aprint_error_dev(sc->sc_dev,
   2076 		    "unable to create workqueue\n");
   2077 		goto out;
   2078 	}
   2079 
   2080 	/*
   2081 	 * Check the function ID (unit number of the chip).
   2082 	 */
   2083 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2084 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2085 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2086 	    || (sc->sc_type == WM_T_82580)
   2087 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2088 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2089 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2090 	else
   2091 		sc->sc_funcid = 0;
   2092 
   2093 	/*
   2094 	 * Determine a few things about the bus we're connected to.
   2095 	 */
   2096 	if (sc->sc_type < WM_T_82543) {
   2097 		/* We don't really know the bus characteristics here. */
   2098 		sc->sc_bus_speed = 33;
   2099 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2100 		/*
   2101 		 * CSA (Communication Streaming Architecture) is about as fast
   2102 		 * a 32-bit 66MHz PCI Bus.
   2103 		 */
   2104 		sc->sc_flags |= WM_F_CSA;
   2105 		sc->sc_bus_speed = 66;
   2106 		aprint_verbose_dev(sc->sc_dev,
   2107 		    "Communication Streaming Architecture\n");
   2108 		if (sc->sc_type == WM_T_82547) {
   2109 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
   2110 			callout_setfunc(&sc->sc_txfifo_ch,
   2111 			    wm_82547_txfifo_stall, sc);
   2112 			aprint_verbose_dev(sc->sc_dev,
   2113 			    "using 82547 Tx FIFO stall work-around\n");
   2114 		}
   2115 	} else if (sc->sc_type >= WM_T_82571) {
   2116 		sc->sc_flags |= WM_F_PCIE;
   2117 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2118 		    && (sc->sc_type != WM_T_ICH10)
   2119 		    && (sc->sc_type != WM_T_PCH)
   2120 		    && (sc->sc_type != WM_T_PCH2)
   2121 		    && (sc->sc_type != WM_T_PCH_LPT)
   2122 		    && (sc->sc_type != WM_T_PCH_SPT)
   2123 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2124 			/* ICH* and PCH* have no PCIe capability registers */
   2125 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2126 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2127 				NULL) == 0)
   2128 				aprint_error_dev(sc->sc_dev,
   2129 				    "unable to find PCIe capability\n");
   2130 		}
   2131 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2132 	} else {
   2133 		reg = CSR_READ(sc, WMREG_STATUS);
   2134 		if (reg & STATUS_BUS64)
   2135 			sc->sc_flags |= WM_F_BUS64;
   2136 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2137 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2138 
   2139 			sc->sc_flags |= WM_F_PCIX;
   2140 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2141 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2142 				aprint_error_dev(sc->sc_dev,
   2143 				    "unable to find PCIX capability\n");
   2144 			else if (sc->sc_type != WM_T_82545_3 &&
   2145 				 sc->sc_type != WM_T_82546_3) {
   2146 				/*
   2147 				 * Work around a problem caused by the BIOS
   2148 				 * setting the max memory read byte count
   2149 				 * incorrectly.
   2150 				 */
   2151 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2152 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2153 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2154 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2155 
   2156 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2157 				    PCIX_CMD_BYTECNT_SHIFT;
   2158 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2159 				    PCIX_STATUS_MAXB_SHIFT;
   2160 				if (bytecnt > maxb) {
   2161 					aprint_verbose_dev(sc->sc_dev,
   2162 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2163 					    512 << bytecnt, 512 << maxb);
   2164 					pcix_cmd = (pcix_cmd &
   2165 					    ~PCIX_CMD_BYTECNT_MASK) |
   2166 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2167 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2168 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2169 					    pcix_cmd);
   2170 				}
   2171 			}
   2172 		}
   2173 		/*
   2174 		 * The quad port adapter is special; it has a PCIX-PCIX
   2175 		 * bridge on the board, and can run the secondary bus at
   2176 		 * a higher speed.
   2177 		 */
   2178 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2179 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2180 								      : 66;
   2181 		} else if (sc->sc_flags & WM_F_PCIX) {
   2182 			switch (reg & STATUS_PCIXSPD_MASK) {
   2183 			case STATUS_PCIXSPD_50_66:
   2184 				sc->sc_bus_speed = 66;
   2185 				break;
   2186 			case STATUS_PCIXSPD_66_100:
   2187 				sc->sc_bus_speed = 100;
   2188 				break;
   2189 			case STATUS_PCIXSPD_100_133:
   2190 				sc->sc_bus_speed = 133;
   2191 				break;
   2192 			default:
   2193 				aprint_error_dev(sc->sc_dev,
   2194 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2195 				    reg & STATUS_PCIXSPD_MASK);
   2196 				sc->sc_bus_speed = 66;
   2197 				break;
   2198 			}
   2199 		} else
   2200 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2201 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2202 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2203 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2204 	}
   2205 
   2206 	/* clear interesting stat counters */
   2207 	CSR_READ(sc, WMREG_COLC);
   2208 	CSR_READ(sc, WMREG_RXERRC);
   2209 
   2210 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2211 	    || (sc->sc_type >= WM_T_ICH8))
   2212 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2213 	if (sc->sc_type >= WM_T_ICH8)
   2214 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2215 
   2216 	/* Set PHY, NVM mutex related stuff */
   2217 	switch (sc->sc_type) {
   2218 	case WM_T_82542_2_0:
   2219 	case WM_T_82542_2_1:
   2220 	case WM_T_82543:
   2221 	case WM_T_82544:
   2222 		/* Microwire */
   2223 		sc->nvm.read = wm_nvm_read_uwire;
   2224 		sc->sc_nvm_wordsize = 64;
   2225 		sc->sc_nvm_addrbits = 6;
   2226 		break;
   2227 	case WM_T_82540:
   2228 	case WM_T_82545:
   2229 	case WM_T_82545_3:
   2230 	case WM_T_82546:
   2231 	case WM_T_82546_3:
   2232 		/* Microwire */
   2233 		sc->nvm.read = wm_nvm_read_uwire;
   2234 		reg = CSR_READ(sc, WMREG_EECD);
   2235 		if (reg & EECD_EE_SIZE) {
   2236 			sc->sc_nvm_wordsize = 256;
   2237 			sc->sc_nvm_addrbits = 8;
   2238 		} else {
   2239 			sc->sc_nvm_wordsize = 64;
   2240 			sc->sc_nvm_addrbits = 6;
   2241 		}
   2242 		sc->sc_flags |= WM_F_LOCK_EECD;
   2243 		sc->nvm.acquire = wm_get_eecd;
   2244 		sc->nvm.release = wm_put_eecd;
   2245 		break;
   2246 	case WM_T_82541:
   2247 	case WM_T_82541_2:
   2248 	case WM_T_82547:
   2249 	case WM_T_82547_2:
   2250 		reg = CSR_READ(sc, WMREG_EECD);
   2251 		/*
   2252 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2253 		 * on 8254[17], so set flags and functios before calling it.
   2254 		 */
   2255 		sc->sc_flags |= WM_F_LOCK_EECD;
   2256 		sc->nvm.acquire = wm_get_eecd;
   2257 		sc->nvm.release = wm_put_eecd;
   2258 		if (reg & EECD_EE_TYPE) {
   2259 			/* SPI */
   2260 			sc->nvm.read = wm_nvm_read_spi;
   2261 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2262 			wm_nvm_set_addrbits_size_eecd(sc);
   2263 		} else {
   2264 			/* Microwire */
   2265 			sc->nvm.read = wm_nvm_read_uwire;
   2266 			if ((reg & EECD_EE_ABITS) != 0) {
   2267 				sc->sc_nvm_wordsize = 256;
   2268 				sc->sc_nvm_addrbits = 8;
   2269 			} else {
   2270 				sc->sc_nvm_wordsize = 64;
   2271 				sc->sc_nvm_addrbits = 6;
   2272 			}
   2273 		}
   2274 		break;
   2275 	case WM_T_82571:
   2276 	case WM_T_82572:
   2277 		/* SPI */
   2278 		sc->nvm.read = wm_nvm_read_eerd;
   2279 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2280 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2281 		wm_nvm_set_addrbits_size_eecd(sc);
   2282 		sc->phy.acquire = wm_get_swsm_semaphore;
   2283 		sc->phy.release = wm_put_swsm_semaphore;
   2284 		sc->nvm.acquire = wm_get_nvm_82571;
   2285 		sc->nvm.release = wm_put_nvm_82571;
   2286 		break;
   2287 	case WM_T_82573:
   2288 	case WM_T_82574:
   2289 	case WM_T_82583:
   2290 		sc->nvm.read = wm_nvm_read_eerd;
   2291 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2292 		if (sc->sc_type == WM_T_82573) {
   2293 			sc->phy.acquire = wm_get_swsm_semaphore;
   2294 			sc->phy.release = wm_put_swsm_semaphore;
   2295 			sc->nvm.acquire = wm_get_nvm_82571;
   2296 			sc->nvm.release = wm_put_nvm_82571;
   2297 		} else {
   2298 			/* Both PHY and NVM use the same semaphore. */
   2299 			sc->phy.acquire = sc->nvm.acquire
   2300 			    = wm_get_swfwhw_semaphore;
   2301 			sc->phy.release = sc->nvm.release
   2302 			    = wm_put_swfwhw_semaphore;
   2303 		}
   2304 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2305 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2306 			sc->sc_nvm_wordsize = 2048;
   2307 		} else {
   2308 			/* SPI */
   2309 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2310 			wm_nvm_set_addrbits_size_eecd(sc);
   2311 		}
   2312 		break;
   2313 	case WM_T_82575:
   2314 	case WM_T_82576:
   2315 	case WM_T_82580:
   2316 	case WM_T_I350:
   2317 	case WM_T_I354:
   2318 	case WM_T_80003:
   2319 		/* SPI */
   2320 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2321 		wm_nvm_set_addrbits_size_eecd(sc);
   2322 		if ((sc->sc_type == WM_T_80003)
   2323 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2324 			sc->nvm.read = wm_nvm_read_eerd;
   2325 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2326 		} else {
   2327 			sc->nvm.read = wm_nvm_read_spi;
   2328 			sc->sc_flags |= WM_F_LOCK_EECD;
   2329 		}
   2330 		sc->phy.acquire = wm_get_phy_82575;
   2331 		sc->phy.release = wm_put_phy_82575;
   2332 		sc->nvm.acquire = wm_get_nvm_80003;
   2333 		sc->nvm.release = wm_put_nvm_80003;
   2334 		break;
   2335 	case WM_T_ICH8:
   2336 	case WM_T_ICH9:
   2337 	case WM_T_ICH10:
   2338 	case WM_T_PCH:
   2339 	case WM_T_PCH2:
   2340 	case WM_T_PCH_LPT:
   2341 		sc->nvm.read = wm_nvm_read_ich8;
   2342 		/* FLASH */
   2343 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2344 		sc->sc_nvm_wordsize = 2048;
   2345 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2346 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2347 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2348 			aprint_error_dev(sc->sc_dev,
   2349 			    "can't map FLASH registers\n");
   2350 			goto out;
   2351 		}
   2352 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2353 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2354 		    ICH_FLASH_SECTOR_SIZE;
   2355 		sc->sc_ich8_flash_bank_size =
   2356 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2357 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2358 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2359 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2360 		sc->sc_flashreg_offset = 0;
   2361 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2362 		sc->phy.release = wm_put_swflag_ich8lan;
   2363 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2364 		sc->nvm.release = wm_put_nvm_ich8lan;
   2365 		break;
   2366 	case WM_T_PCH_SPT:
   2367 	case WM_T_PCH_CNP:
   2368 		sc->nvm.read = wm_nvm_read_spt;
   2369 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2370 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2371 		sc->sc_flasht = sc->sc_st;
   2372 		sc->sc_flashh = sc->sc_sh;
   2373 		sc->sc_ich8_flash_base = 0;
   2374 		sc->sc_nvm_wordsize =
   2375 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2376 		    * NVM_SIZE_MULTIPLIER;
   2377 		/* It is size in bytes, we want words */
   2378 		sc->sc_nvm_wordsize /= 2;
   2379 		/* Assume 2 banks */
   2380 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2381 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2382 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2383 		sc->phy.release = wm_put_swflag_ich8lan;
   2384 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2385 		sc->nvm.release = wm_put_nvm_ich8lan;
   2386 		break;
   2387 	case WM_T_I210:
   2388 	case WM_T_I211:
   2389 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2390 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2391 		if (wm_nvm_flash_presence_i210(sc)) {
   2392 			sc->nvm.read = wm_nvm_read_eerd;
   2393 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2394 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2395 			wm_nvm_set_addrbits_size_eecd(sc);
   2396 		} else {
   2397 			sc->nvm.read = wm_nvm_read_invm;
   2398 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2399 			sc->sc_nvm_wordsize = INVM_SIZE;
   2400 		}
   2401 		sc->phy.acquire = wm_get_phy_82575;
   2402 		sc->phy.release = wm_put_phy_82575;
   2403 		sc->nvm.acquire = wm_get_nvm_80003;
   2404 		sc->nvm.release = wm_put_nvm_80003;
   2405 		break;
   2406 	default:
   2407 		break;
   2408 	}
   2409 
   2410 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2411 	switch (sc->sc_type) {
   2412 	case WM_T_82571:
   2413 	case WM_T_82572:
   2414 		reg = CSR_READ(sc, WMREG_SWSM2);
   2415 		if ((reg & SWSM2_LOCK) == 0) {
   2416 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2417 			force_clear_smbi = true;
   2418 		} else
   2419 			force_clear_smbi = false;
   2420 		break;
   2421 	case WM_T_82573:
   2422 	case WM_T_82574:
   2423 	case WM_T_82583:
   2424 		force_clear_smbi = true;
   2425 		break;
   2426 	default:
   2427 		force_clear_smbi = false;
   2428 		break;
   2429 	}
   2430 	if (force_clear_smbi) {
   2431 		reg = CSR_READ(sc, WMREG_SWSM);
   2432 		if ((reg & SWSM_SMBI) != 0)
   2433 			aprint_error_dev(sc->sc_dev,
   2434 			    "Please update the Bootagent\n");
   2435 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2436 	}
   2437 
   2438 	/*
   2439 	 * Defer printing the EEPROM type until after verifying the checksum
   2440 	 * This allows the EEPROM type to be printed correctly in the case
   2441 	 * that no EEPROM is attached.
   2442 	 */
   2443 	/*
   2444 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2445 	 * this for later, so we can fail future reads from the EEPROM.
   2446 	 */
   2447 	if (wm_nvm_validate_checksum(sc)) {
   2448 		/*
   2449 		 * Read twice again because some PCI-e parts fail the
   2450 		 * first check due to the link being in sleep state.
   2451 		 */
   2452 		if (wm_nvm_validate_checksum(sc))
   2453 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2454 	}
   2455 
   2456 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2457 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2458 	else {
   2459 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2460 		    sc->sc_nvm_wordsize);
   2461 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2462 			aprint_verbose("iNVM");
   2463 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2464 			aprint_verbose("FLASH(HW)");
   2465 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2466 			aprint_verbose("FLASH");
   2467 		else {
   2468 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2469 				eetype = "SPI";
   2470 			else
   2471 				eetype = "MicroWire";
   2472 			aprint_verbose("(%d address bits) %s EEPROM",
   2473 			    sc->sc_nvm_addrbits, eetype);
   2474 		}
   2475 	}
   2476 	wm_nvm_version(sc);
   2477 	aprint_verbose("\n");
   2478 
   2479 	/*
   2480 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2481 	 * incorrect.
   2482 	 */
   2483 	wm_gmii_setup_phytype(sc, 0, 0);
   2484 
   2485 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2486 	switch (sc->sc_type) {
   2487 	case WM_T_ICH8:
   2488 	case WM_T_ICH9:
   2489 	case WM_T_ICH10:
   2490 	case WM_T_PCH:
   2491 	case WM_T_PCH2:
   2492 	case WM_T_PCH_LPT:
   2493 	case WM_T_PCH_SPT:
   2494 	case WM_T_PCH_CNP:
   2495 		apme_mask = WUC_APME;
   2496 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2497 		if ((eeprom_data & apme_mask) != 0)
   2498 			sc->sc_flags |= WM_F_WOL;
   2499 		break;
   2500 	default:
   2501 		break;
   2502 	}
   2503 
   2504 	/* Reset the chip to a known state. */
   2505 	wm_reset(sc);
   2506 
   2507 	/*
   2508 	 * Check for I21[01] PLL workaround.
   2509 	 *
   2510 	 * Three cases:
   2511 	 * a) Chip is I211.
   2512 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2513 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2514 	 */
   2515 	if (sc->sc_type == WM_T_I211)
   2516 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2517 	if (sc->sc_type == WM_T_I210) {
   2518 		if (!wm_nvm_flash_presence_i210(sc))
   2519 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2520 		else if ((sc->sc_nvm_ver_major < 3)
   2521 		    || ((sc->sc_nvm_ver_major == 3)
   2522 			&& (sc->sc_nvm_ver_minor < 25))) {
   2523 			aprint_verbose_dev(sc->sc_dev,
   2524 			    "ROM image version %d.%d is older than 3.25\n",
   2525 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2526 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2527 		}
   2528 	}
   2529 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2530 		wm_pll_workaround_i210(sc);
   2531 
   2532 	wm_get_wakeup(sc);
   2533 
   2534 	/* Non-AMT based hardware can now take control from firmware */
   2535 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2536 		wm_get_hw_control(sc);
   2537 
   2538 	/*
   2539 	 * Read the Ethernet address from the EEPROM, if not first found
   2540 	 * in device properties.
   2541 	 */
   2542 	ea = prop_dictionary_get(dict, "mac-address");
   2543 	if (ea != NULL) {
   2544 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2545 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2546 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2547 	} else {
   2548 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2549 			aprint_error_dev(sc->sc_dev,
   2550 			    "unable to read Ethernet address\n");
   2551 			goto out;
   2552 		}
   2553 	}
   2554 
   2555 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2556 	    ether_sprintf(enaddr));
   2557 
   2558 	/*
   2559 	 * Read the config info from the EEPROM, and set up various
   2560 	 * bits in the control registers based on their contents.
   2561 	 */
   2562 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2563 	if (pn != NULL) {
   2564 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2565 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2566 	} else {
   2567 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2568 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2569 			goto out;
   2570 		}
   2571 	}
   2572 
   2573 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2574 	if (pn != NULL) {
   2575 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2576 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2577 	} else {
   2578 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2579 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2580 			goto out;
   2581 		}
   2582 	}
   2583 
   2584 	/* check for WM_F_WOL */
   2585 	switch (sc->sc_type) {
   2586 	case WM_T_82542_2_0:
   2587 	case WM_T_82542_2_1:
   2588 	case WM_T_82543:
   2589 		/* dummy? */
   2590 		eeprom_data = 0;
   2591 		apme_mask = NVM_CFG3_APME;
   2592 		break;
   2593 	case WM_T_82544:
   2594 		apme_mask = NVM_CFG2_82544_APM_EN;
   2595 		eeprom_data = cfg2;
   2596 		break;
   2597 	case WM_T_82546:
   2598 	case WM_T_82546_3:
   2599 	case WM_T_82571:
   2600 	case WM_T_82572:
   2601 	case WM_T_82573:
   2602 	case WM_T_82574:
   2603 	case WM_T_82583:
   2604 	case WM_T_80003:
   2605 	case WM_T_82575:
   2606 	case WM_T_82576:
   2607 		apme_mask = NVM_CFG3_APME;
   2608 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2609 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2610 		break;
   2611 	case WM_T_82580:
   2612 	case WM_T_I350:
   2613 	case WM_T_I354:
   2614 	case WM_T_I210:
   2615 	case WM_T_I211:
   2616 		apme_mask = NVM_CFG3_APME;
   2617 		wm_nvm_read(sc,
   2618 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2619 		    1, &eeprom_data);
   2620 		break;
   2621 	case WM_T_ICH8:
   2622 	case WM_T_ICH9:
   2623 	case WM_T_ICH10:
   2624 	case WM_T_PCH:
   2625 	case WM_T_PCH2:
   2626 	case WM_T_PCH_LPT:
   2627 	case WM_T_PCH_SPT:
   2628 	case WM_T_PCH_CNP:
   2629 		/* Already checked before wm_reset () */
   2630 		apme_mask = eeprom_data = 0;
   2631 		break;
   2632 	default: /* XXX 82540 */
   2633 		apme_mask = NVM_CFG3_APME;
   2634 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2635 		break;
   2636 	}
   2637 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2638 	if ((eeprom_data & apme_mask) != 0)
   2639 		sc->sc_flags |= WM_F_WOL;
   2640 
   2641 	/*
   2642 	 * We have the eeprom settings, now apply the special cases
   2643 	 * where the eeprom may be wrong or the board won't support
   2644 	 * wake on lan on a particular port
   2645 	 */
   2646 	switch (sc->sc_pcidevid) {
   2647 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2648 		sc->sc_flags &= ~WM_F_WOL;
   2649 		break;
   2650 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2651 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2652 		/* Wake events only supported on port A for dual fiber
   2653 		 * regardless of eeprom setting */
   2654 		if (sc->sc_funcid == 1)
   2655 			sc->sc_flags &= ~WM_F_WOL;
   2656 		break;
   2657 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2658 		/* If quad port adapter, disable WoL on all but port A */
   2659 		if (sc->sc_funcid != 0)
   2660 			sc->sc_flags &= ~WM_F_WOL;
   2661 		break;
   2662 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2663 		/* Wake events only supported on port A for dual fiber
   2664 		 * regardless of eeprom setting */
   2665 		if (sc->sc_funcid == 1)
   2666 			sc->sc_flags &= ~WM_F_WOL;
   2667 		break;
   2668 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2669 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2670 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2671 		/* If quad port adapter, disable WoL on all but port A */
   2672 		if (sc->sc_funcid != 0)
   2673 			sc->sc_flags &= ~WM_F_WOL;
   2674 		break;
   2675 	}
   2676 
   2677 	if (sc->sc_type >= WM_T_82575) {
   2678 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2679 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2680 			    nvmword);
   2681 			if ((sc->sc_type == WM_T_82575) ||
   2682 			    (sc->sc_type == WM_T_82576)) {
   2683 				/* Check NVM for autonegotiation */
   2684 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2685 				    != 0)
   2686 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2687 			}
   2688 			if ((sc->sc_type == WM_T_82575) ||
   2689 			    (sc->sc_type == WM_T_I350)) {
   2690 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2691 					sc->sc_flags |= WM_F_MAS;
   2692 			}
   2693 		}
   2694 	}
   2695 
   2696 	/*
   2697 	 * XXX need special handling for some multiple port cards
   2698 	 * to disable a paticular port.
   2699 	 */
   2700 
   2701 	if (sc->sc_type >= WM_T_82544) {
   2702 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2703 		if (pn != NULL) {
   2704 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2705 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2706 		} else {
   2707 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2708 				aprint_error_dev(sc->sc_dev,
   2709 				    "unable to read SWDPIN\n");
   2710 				goto out;
   2711 			}
   2712 		}
   2713 	}
   2714 
   2715 	if (cfg1 & NVM_CFG1_ILOS)
   2716 		sc->sc_ctrl |= CTRL_ILOS;
   2717 
   2718 	/*
   2719 	 * XXX
   2720 	 * This code isn't correct because pin 2 and 3 are located
   2721 	 * in different position on newer chips. Check all datasheet.
   2722 	 *
   2723 	 * Until resolve this problem, check if a chip < 82580
   2724 	 */
   2725 	if (sc->sc_type <= WM_T_82580) {
   2726 		if (sc->sc_type >= WM_T_82544) {
   2727 			sc->sc_ctrl |=
   2728 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2729 			    CTRL_SWDPIO_SHIFT;
   2730 			sc->sc_ctrl |=
   2731 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2732 			    CTRL_SWDPINS_SHIFT;
   2733 		} else {
   2734 			sc->sc_ctrl |=
   2735 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2736 			    CTRL_SWDPIO_SHIFT;
   2737 		}
   2738 	}
   2739 
   2740 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2741 		wm_nvm_read(sc,
   2742 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2743 		    1, &nvmword);
   2744 		if (nvmword & NVM_CFG3_ILOS)
   2745 			sc->sc_ctrl |= CTRL_ILOS;
   2746 	}
   2747 
   2748 #if 0
   2749 	if (sc->sc_type >= WM_T_82544) {
   2750 		if (cfg1 & NVM_CFG1_IPS0)
   2751 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2752 		if (cfg1 & NVM_CFG1_IPS1)
   2753 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2754 		sc->sc_ctrl_ext |=
   2755 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2756 		    CTRL_EXT_SWDPIO_SHIFT;
   2757 		sc->sc_ctrl_ext |=
   2758 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2759 		    CTRL_EXT_SWDPINS_SHIFT;
   2760 	} else {
   2761 		sc->sc_ctrl_ext |=
   2762 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2763 		    CTRL_EXT_SWDPIO_SHIFT;
   2764 	}
   2765 #endif
   2766 
   2767 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2768 #if 0
   2769 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2770 #endif
   2771 
   2772 	if (sc->sc_type == WM_T_PCH) {
   2773 		uint16_t val;
   2774 
   2775 		/* Save the NVM K1 bit setting */
   2776 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2777 
   2778 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2779 			sc->sc_nvm_k1_enabled = 1;
   2780 		else
   2781 			sc->sc_nvm_k1_enabled = 0;
   2782 	}
   2783 
   2784 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2785 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2786 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2787 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2788 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2789 	    || sc->sc_type == WM_T_82573
   2790 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2791 		/* Copper only */
   2792 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2793 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2794 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2795 	    || (sc->sc_type ==WM_T_I211)) {
   2796 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2797 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2798 		switch (link_mode) {
   2799 		case CTRL_EXT_LINK_MODE_1000KX:
   2800 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2801 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2802 			break;
   2803 		case CTRL_EXT_LINK_MODE_SGMII:
   2804 			if (wm_sgmii_uses_mdio(sc)) {
   2805 				aprint_normal_dev(sc->sc_dev,
   2806 				    "SGMII(MDIO)\n");
   2807 				sc->sc_flags |= WM_F_SGMII;
   2808 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2809 				break;
   2810 			}
   2811 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2812 			/*FALLTHROUGH*/
   2813 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2814 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2815 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2816 				if (link_mode
   2817 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2818 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2819 					sc->sc_flags |= WM_F_SGMII;
   2820 					aprint_verbose_dev(sc->sc_dev,
   2821 					    "SGMII\n");
   2822 				} else {
   2823 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2824 					aprint_verbose_dev(sc->sc_dev,
   2825 					    "SERDES\n");
   2826 				}
   2827 				break;
   2828 			}
   2829 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2830 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2831 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2832 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2833 				sc->sc_flags |= WM_F_SGMII;
   2834 			}
   2835 			/* Do not change link mode for 100BaseFX */
   2836 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2837 				break;
   2838 
   2839 			/* Change current link mode setting */
   2840 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2841 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2842 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2843 			else
   2844 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2845 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2846 			break;
   2847 		case CTRL_EXT_LINK_MODE_GMII:
   2848 		default:
   2849 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2850 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2851 			break;
   2852 		}
   2853 
   2854 		reg &= ~CTRL_EXT_I2C_ENA;
   2855 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2856 			reg |= CTRL_EXT_I2C_ENA;
   2857 		else
   2858 			reg &= ~CTRL_EXT_I2C_ENA;
   2859 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2860 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   2861 			wm_gmii_setup_phytype(sc, 0, 0);
   2862 			wm_reset_mdicnfg_82580(sc);
   2863 		}
   2864 	} else if (sc->sc_type < WM_T_82543 ||
   2865 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2866 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2867 			aprint_error_dev(sc->sc_dev,
   2868 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2869 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2870 		}
   2871 	} else {
   2872 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2873 			aprint_error_dev(sc->sc_dev,
   2874 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2875 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2876 		}
   2877 	}
   2878 
   2879 	if (sc->sc_type >= WM_T_PCH2)
   2880 		sc->sc_flags |= WM_F_EEE;
   2881 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2882 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2883 		/* XXX: Need special handling for I354. (not yet) */
   2884 		if (sc->sc_type != WM_T_I354)
   2885 			sc->sc_flags |= WM_F_EEE;
   2886 	}
   2887 
   2888 	/* Set device properties (macflags) */
   2889 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2890 
   2891 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2892 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2893 
   2894 #ifdef WM_MPSAFE
   2895 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2896 #else
   2897 	sc->sc_core_lock = NULL;
   2898 #endif
   2899 
   2900 	/* Initialize the media structures accordingly. */
   2901 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2902 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2903 	else
   2904 		wm_tbi_mediainit(sc); /* All others */
   2905 
   2906 	ifp = &sc->sc_ethercom.ec_if;
   2907 	xname = device_xname(sc->sc_dev);
   2908 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2909 	ifp->if_softc = sc;
   2910 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2911 #ifdef WM_MPSAFE
   2912 	ifp->if_extflags = IFEF_MPSAFE;
   2913 #endif
   2914 	ifp->if_ioctl = wm_ioctl;
   2915 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2916 		ifp->if_start = wm_nq_start;
   2917 		/*
   2918 		 * When the number of CPUs is one and the controller can use
   2919 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2920 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2921 		 * and the other is used for link status changing.
   2922 		 * In this situation, wm_nq_transmit() is disadvantageous
   2923 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2924 		 */
   2925 		if (wm_is_using_multiqueue(sc))
   2926 			ifp->if_transmit = wm_nq_transmit;
   2927 	} else {
   2928 		ifp->if_start = wm_start;
   2929 		/*
   2930 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2931 		 */
   2932 		if (wm_is_using_multiqueue(sc))
   2933 			ifp->if_transmit = wm_transmit;
   2934 	}
   2935 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2936 	ifp->if_init = wm_init;
   2937 	ifp->if_stop = wm_stop;
   2938 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2939 	IFQ_SET_READY(&ifp->if_snd);
   2940 
   2941 	/* Check for jumbo frame */
   2942 	switch (sc->sc_type) {
   2943 	case WM_T_82573:
   2944 		/* XXX limited to 9234 if ASPM is disabled */
   2945 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2946 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2947 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2948 		break;
   2949 	case WM_T_82571:
   2950 	case WM_T_82572:
   2951 	case WM_T_82574:
   2952 	case WM_T_82583:
   2953 	case WM_T_82575:
   2954 	case WM_T_82576:
   2955 	case WM_T_82580:
   2956 	case WM_T_I350:
   2957 	case WM_T_I354:
   2958 	case WM_T_I210:
   2959 	case WM_T_I211:
   2960 	case WM_T_80003:
   2961 	case WM_T_ICH9:
   2962 	case WM_T_ICH10:
   2963 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2964 	case WM_T_PCH_LPT:
   2965 	case WM_T_PCH_SPT:
   2966 	case WM_T_PCH_CNP:
   2967 		/* XXX limited to 9234 */
   2968 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2969 		break;
   2970 	case WM_T_PCH:
   2971 		/* XXX limited to 4096 */
   2972 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2973 		break;
   2974 	case WM_T_82542_2_0:
   2975 	case WM_T_82542_2_1:
   2976 	case WM_T_ICH8:
   2977 		/* No support for jumbo frame */
   2978 		break;
   2979 	default:
   2980 		/* ETHER_MAX_LEN_JUMBO */
   2981 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2982 		break;
   2983 	}
   2984 
   2985 	/* If we're a i82543 or greater, we can support VLANs. */
   2986 	if (sc->sc_type >= WM_T_82543) {
   2987 		sc->sc_ethercom.ec_capabilities |=
   2988 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2989 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   2990 	}
   2991 
   2992 	if ((sc->sc_flags & WM_F_EEE) != 0)
   2993 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   2994 
   2995 	/*
   2996 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2997 	 * on i82543 and later.
   2998 	 */
   2999 	if (sc->sc_type >= WM_T_82543) {
   3000 		ifp->if_capabilities |=
   3001 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3002 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3003 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3004 		    IFCAP_CSUM_TCPv6_Tx |
   3005 		    IFCAP_CSUM_UDPv6_Tx;
   3006 	}
   3007 
   3008 	/*
   3009 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3010 	 *
   3011 	 *	82541GI (8086:1076) ... no
   3012 	 *	82572EI (8086:10b9) ... yes
   3013 	 */
   3014 	if (sc->sc_type >= WM_T_82571) {
   3015 		ifp->if_capabilities |=
   3016 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3017 	}
   3018 
   3019 	/*
   3020 	 * If we're a i82544 or greater (except i82547), we can do
   3021 	 * TCP segmentation offload.
   3022 	 */
   3023 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   3024 		ifp->if_capabilities |= IFCAP_TSOv4;
   3025 	}
   3026 
   3027 	if (sc->sc_type >= WM_T_82571) {
   3028 		ifp->if_capabilities |= IFCAP_TSOv6;
   3029 	}
   3030 
   3031 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3032 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3033 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3034 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3035 
   3036 	/* Attach the interface. */
   3037 	error = if_initialize(ifp);
   3038 	if (error != 0) {
   3039 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   3040 		    error);
   3041 		return; /* Error */
   3042 	}
   3043 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3044 	ether_ifattach(ifp, enaddr);
   3045 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3046 	if_register(ifp);
   3047 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3048 	    RND_FLAG_DEFAULT);
   3049 
   3050 #ifdef WM_EVENT_COUNTERS
   3051 	/* Attach event counters. */
   3052 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3053 	    NULL, xname, "linkintr");
   3054 
   3055 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3056 	    NULL, xname, "tx_xoff");
   3057 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3058 	    NULL, xname, "tx_xon");
   3059 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3060 	    NULL, xname, "rx_xoff");
   3061 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3062 	    NULL, xname, "rx_xon");
   3063 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3064 	    NULL, xname, "rx_macctl");
   3065 #endif /* WM_EVENT_COUNTERS */
   3066 
   3067 	sc->sc_txrx_use_workqueue = false;
   3068 
   3069 	wm_init_sysctls(sc);
   3070 
   3071 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3072 		pmf_class_network_register(self, ifp);
   3073 	else
   3074 		aprint_error_dev(self, "couldn't establish power handler\n");
   3075 
   3076 	sc->sc_flags |= WM_F_ATTACHED;
   3077 out:
   3078 	return;
   3079 }
   3080 
   3081 /* The detach function (ca_detach) */
   3082 static int
   3083 wm_detach(device_t self, int flags __unused)
   3084 {
   3085 	struct wm_softc *sc = device_private(self);
   3086 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3087 	int i;
   3088 
   3089 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3090 		return 0;
   3091 
   3092 	/* Stop the interface. Callouts are stopped in it. */
   3093 	wm_stop(ifp, 1);
   3094 
   3095 	pmf_device_deregister(self);
   3096 
   3097 	sysctl_teardown(&sc->sc_sysctllog);
   3098 
   3099 #ifdef WM_EVENT_COUNTERS
   3100 	evcnt_detach(&sc->sc_ev_linkintr);
   3101 
   3102 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3103 	evcnt_detach(&sc->sc_ev_tx_xon);
   3104 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3105 	evcnt_detach(&sc->sc_ev_rx_xon);
   3106 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3107 #endif /* WM_EVENT_COUNTERS */
   3108 
   3109 	rnd_detach_source(&sc->rnd_source);
   3110 
   3111 	/* Tell the firmware about the release */
   3112 	WM_CORE_LOCK(sc);
   3113 	wm_release_manageability(sc);
   3114 	wm_release_hw_control(sc);
   3115 	wm_enable_wakeup(sc);
   3116 	WM_CORE_UNLOCK(sc);
   3117 
   3118 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3119 
   3120 	ether_ifdetach(ifp);
   3121 	if_detach(ifp);
   3122 	if_percpuq_destroy(sc->sc_ipq);
   3123 
   3124 	/* Delete all remaining media. */
   3125 	ifmedia_fini(&sc->sc_mii.mii_media);
   3126 
   3127 	/* Unload RX dmamaps and free mbufs */
   3128 	for (i = 0; i < sc->sc_nqueues; i++) {
   3129 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3130 		mutex_enter(rxq->rxq_lock);
   3131 		wm_rxdrain(rxq);
   3132 		mutex_exit(rxq->rxq_lock);
   3133 	}
   3134 	/* Must unlock here */
   3135 
   3136 	/* Disestablish the interrupt handler */
   3137 	for (i = 0; i < sc->sc_nintrs; i++) {
   3138 		if (sc->sc_ihs[i] != NULL) {
   3139 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3140 			sc->sc_ihs[i] = NULL;
   3141 		}
   3142 	}
   3143 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3144 
   3145 	/* wm_stop() ensure workqueue is stopped. */
   3146 	workqueue_destroy(sc->sc_queue_wq);
   3147 
   3148 	for (i = 0; i < sc->sc_nqueues; i++)
   3149 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3150 
   3151 	wm_free_txrx_queues(sc);
   3152 
   3153 	/* Unmap the registers */
   3154 	if (sc->sc_ss) {
   3155 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3156 		sc->sc_ss = 0;
   3157 	}
   3158 	if (sc->sc_ios) {
   3159 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3160 		sc->sc_ios = 0;
   3161 	}
   3162 	if (sc->sc_flashs) {
   3163 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3164 		sc->sc_flashs = 0;
   3165 	}
   3166 
   3167 	if (sc->sc_core_lock)
   3168 		mutex_obj_free(sc->sc_core_lock);
   3169 	if (sc->sc_ich_phymtx)
   3170 		mutex_obj_free(sc->sc_ich_phymtx);
   3171 	if (sc->sc_ich_nvmmtx)
   3172 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3173 
   3174 	return 0;
   3175 }
   3176 
   3177 static bool
   3178 wm_suspend(device_t self, const pmf_qual_t *qual)
   3179 {
   3180 	struct wm_softc *sc = device_private(self);
   3181 
   3182 	wm_release_manageability(sc);
   3183 	wm_release_hw_control(sc);
   3184 	wm_enable_wakeup(sc);
   3185 
   3186 	return true;
   3187 }
   3188 
   3189 static bool
   3190 wm_resume(device_t self, const pmf_qual_t *qual)
   3191 {
   3192 	struct wm_softc *sc = device_private(self);
   3193 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3194 	pcireg_t reg;
   3195 	char buf[256];
   3196 
   3197 	reg = CSR_READ(sc, WMREG_WUS);
   3198 	if (reg != 0) {
   3199 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3200 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3201 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3202 	}
   3203 
   3204 	if (sc->sc_type >= WM_T_PCH2)
   3205 		wm_resume_workarounds_pchlan(sc);
   3206 	if ((ifp->if_flags & IFF_UP) == 0) {
   3207 		wm_reset(sc);
   3208 		/* Non-AMT based hardware can now take control from firmware */
   3209 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3210 			wm_get_hw_control(sc);
   3211 		wm_init_manageability(sc);
   3212 	} else {
   3213 		/*
   3214 		 * We called pmf_class_network_register(), so if_init() is
   3215 		 * automatically called when IFF_UP. wm_reset(),
   3216 		 * wm_get_hw_control() and wm_init_manageability() are called
   3217 		 * via wm_init().
   3218 		 */
   3219 	}
   3220 
   3221 	return true;
   3222 }
   3223 
   3224 /*
   3225  * wm_watchdog:		[ifnet interface function]
   3226  *
   3227  *	Watchdog timer handler.
   3228  */
   3229 static void
   3230 wm_watchdog(struct ifnet *ifp)
   3231 {
   3232 	int qid;
   3233 	struct wm_softc *sc = ifp->if_softc;
   3234 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3235 
   3236 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3237 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3238 
   3239 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3240 	}
   3241 
   3242 	/* IF any of queues hanged up, reset the interface. */
   3243 	if (hang_queue != 0) {
   3244 		(void)wm_init(ifp);
   3245 
   3246 		/*
   3247 		 * There are still some upper layer processing which call
   3248 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3249 		 */
   3250 		/* Try to get more packets going. */
   3251 		ifp->if_start(ifp);
   3252 	}
   3253 }
   3254 
   3255 
   3256 static void
   3257 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3258 {
   3259 
   3260 	mutex_enter(txq->txq_lock);
   3261 	if (txq->txq_sending &&
   3262 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3263 		wm_watchdog_txq_locked(ifp, txq, hang);
   3264 
   3265 	mutex_exit(txq->txq_lock);
   3266 }
   3267 
   3268 static void
   3269 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3270     uint16_t *hang)
   3271 {
   3272 	struct wm_softc *sc = ifp->if_softc;
   3273 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3274 
   3275 	KASSERT(mutex_owned(txq->txq_lock));
   3276 
   3277 	/*
   3278 	 * Since we're using delayed interrupts, sweep up
   3279 	 * before we report an error.
   3280 	 */
   3281 	wm_txeof(txq, UINT_MAX);
   3282 
   3283 	if (txq->txq_sending)
   3284 		*hang |= __BIT(wmq->wmq_id);
   3285 
   3286 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3287 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3288 		    device_xname(sc->sc_dev));
   3289 	} else {
   3290 #ifdef WM_DEBUG
   3291 		int i, j;
   3292 		struct wm_txsoft *txs;
   3293 #endif
   3294 		log(LOG_ERR,
   3295 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3296 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3297 		    txq->txq_next);
   3298 		if_statinc(ifp, if_oerrors);
   3299 #ifdef WM_DEBUG
   3300 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3301 		    i = WM_NEXTTXS(txq, i)) {
   3302 			txs = &txq->txq_soft[i];
   3303 			printf("txs %d tx %d -> %d\n",
   3304 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3305 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3306 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3307 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3308 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3309 					printf("\t %#08x%08x\n",
   3310 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3311 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3312 				} else {
   3313 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3314 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3315 					    txq->txq_descs[j].wtx_addr.wa_low);
   3316 					printf("\t %#04x%02x%02x%08x\n",
   3317 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3318 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3319 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3320 					    txq->txq_descs[j].wtx_cmdlen);
   3321 				}
   3322 				if (j == txs->txs_lastdesc)
   3323 					break;
   3324 			}
   3325 		}
   3326 #endif
   3327 	}
   3328 }
   3329 
   3330 /*
   3331  * wm_tick:
   3332  *
   3333  *	One second timer, used to check link status, sweep up
   3334  *	completed transmit jobs, etc.
   3335  */
   3336 static void
   3337 wm_tick(void *arg)
   3338 {
   3339 	struct wm_softc *sc = arg;
   3340 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3341 #ifndef WM_MPSAFE
   3342 	int s = splnet();
   3343 #endif
   3344 
   3345 	WM_CORE_LOCK(sc);
   3346 
   3347 	if (sc->sc_core_stopping) {
   3348 		WM_CORE_UNLOCK(sc);
   3349 #ifndef WM_MPSAFE
   3350 		splx(s);
   3351 #endif
   3352 		return;
   3353 	}
   3354 
   3355 	if (sc->sc_type >= WM_T_82542_2_1) {
   3356 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3357 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3358 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3359 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3360 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3361 	}
   3362 
   3363 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3364 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
   3365 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
   3366 	    + CSR_READ(sc, WMREG_CRCERRS)
   3367 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3368 	    + CSR_READ(sc, WMREG_SYMERRC)
   3369 	    + CSR_READ(sc, WMREG_RXERRC)
   3370 	    + CSR_READ(sc, WMREG_SEC)
   3371 	    + CSR_READ(sc, WMREG_CEXTERR)
   3372 	    + CSR_READ(sc, WMREG_RLEC));
   3373 	/*
   3374 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3375 	 * memory. It does not mean the number of dropped packet. Because
   3376 	 * ethernet controller can receive packets in such case if there is
   3377 	 * space in phy's FIFO.
   3378 	 *
   3379 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3380 	 * own EVCNT instead of if_iqdrops.
   3381 	 */
   3382 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
   3383 	IF_STAT_PUTREF(ifp);
   3384 
   3385 	if (sc->sc_flags & WM_F_HAS_MII)
   3386 		mii_tick(&sc->sc_mii);
   3387 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3388 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3389 		wm_serdes_tick(sc);
   3390 	else
   3391 		wm_tbi_tick(sc);
   3392 
   3393 	WM_CORE_UNLOCK(sc);
   3394 
   3395 	wm_watchdog(ifp);
   3396 
   3397 	callout_schedule(&sc->sc_tick_ch, hz);
   3398 }
   3399 
   3400 static int
   3401 wm_ifflags_cb(struct ethercom *ec)
   3402 {
   3403 	struct ifnet *ifp = &ec->ec_if;
   3404 	struct wm_softc *sc = ifp->if_softc;
   3405 	u_short iffchange;
   3406 	int ecchange;
   3407 	bool needreset = false;
   3408 	int rc = 0;
   3409 
   3410 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3411 		device_xname(sc->sc_dev), __func__));
   3412 
   3413 	WM_CORE_LOCK(sc);
   3414 
   3415 	/*
   3416 	 * Check for if_flags.
   3417 	 * Main usage is to prevent linkdown when opening bpf.
   3418 	 */
   3419 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3420 	sc->sc_if_flags = ifp->if_flags;
   3421 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3422 		needreset = true;
   3423 		goto ec;
   3424 	}
   3425 
   3426 	/* iff related updates */
   3427 	if ((iffchange & IFF_PROMISC) != 0)
   3428 		wm_set_filter(sc);
   3429 
   3430 	wm_set_vlan(sc);
   3431 
   3432 ec:
   3433 	/* Check for ec_capenable. */
   3434 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3435 	sc->sc_ec_capenable = ec->ec_capenable;
   3436 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3437 		needreset = true;
   3438 		goto out;
   3439 	}
   3440 
   3441 	/* ec related updates */
   3442 	wm_set_eee(sc);
   3443 
   3444 out:
   3445 	if (needreset)
   3446 		rc = ENETRESET;
   3447 	WM_CORE_UNLOCK(sc);
   3448 
   3449 	return rc;
   3450 }
   3451 
   3452 /*
   3453  * wm_ioctl:		[ifnet interface function]
   3454  *
   3455  *	Handle control requests from the operator.
   3456  */
   3457 static int
   3458 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3459 {
   3460 	struct wm_softc *sc = ifp->if_softc;
   3461 	struct ifreq *ifr = (struct ifreq *)data;
   3462 	struct ifaddr *ifa = (struct ifaddr *)data;
   3463 	struct sockaddr_dl *sdl;
   3464 	int s, error;
   3465 
   3466 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3467 		device_xname(sc->sc_dev), __func__));
   3468 
   3469 #ifndef WM_MPSAFE
   3470 	s = splnet();
   3471 #endif
   3472 	switch (cmd) {
   3473 	case SIOCSIFMEDIA:
   3474 		WM_CORE_LOCK(sc);
   3475 		/* Flow control requires full-duplex mode. */
   3476 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3477 		    (ifr->ifr_media & IFM_FDX) == 0)
   3478 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3479 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3480 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3481 				/* We can do both TXPAUSE and RXPAUSE. */
   3482 				ifr->ifr_media |=
   3483 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3484 			}
   3485 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3486 		}
   3487 		WM_CORE_UNLOCK(sc);
   3488 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3489 		break;
   3490 	case SIOCINITIFADDR:
   3491 		WM_CORE_LOCK(sc);
   3492 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3493 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3494 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3495 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3496 			/* Unicast address is the first multicast entry */
   3497 			wm_set_filter(sc);
   3498 			error = 0;
   3499 			WM_CORE_UNLOCK(sc);
   3500 			break;
   3501 		}
   3502 		WM_CORE_UNLOCK(sc);
   3503 		/*FALLTHROUGH*/
   3504 	default:
   3505 #ifdef WM_MPSAFE
   3506 		s = splnet();
   3507 #endif
   3508 		/* It may call wm_start, so unlock here */
   3509 		error = ether_ioctl(ifp, cmd, data);
   3510 #ifdef WM_MPSAFE
   3511 		splx(s);
   3512 #endif
   3513 		if (error != ENETRESET)
   3514 			break;
   3515 
   3516 		error = 0;
   3517 
   3518 		if (cmd == SIOCSIFCAP)
   3519 			error = (*ifp->if_init)(ifp);
   3520 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3521 			;
   3522 		else if (ifp->if_flags & IFF_RUNNING) {
   3523 			/*
   3524 			 * Multicast list has changed; set the hardware filter
   3525 			 * accordingly.
   3526 			 */
   3527 			WM_CORE_LOCK(sc);
   3528 			wm_set_filter(sc);
   3529 			WM_CORE_UNLOCK(sc);
   3530 		}
   3531 		break;
   3532 	}
   3533 
   3534 #ifndef WM_MPSAFE
   3535 	splx(s);
   3536 #endif
   3537 	return error;
   3538 }
   3539 
   3540 /* MAC address related */
   3541 
   3542 /*
   3543  * Get the offset of MAC address and return it.
   3544  * If error occured, use offset 0.
   3545  */
   3546 static uint16_t
   3547 wm_check_alt_mac_addr(struct wm_softc *sc)
   3548 {
   3549 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3550 	uint16_t offset = NVM_OFF_MACADDR;
   3551 
   3552 	/* Try to read alternative MAC address pointer */
   3553 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3554 		return 0;
   3555 
   3556 	/* Check pointer if it's valid or not. */
   3557 	if ((offset == 0x0000) || (offset == 0xffff))
   3558 		return 0;
   3559 
   3560 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3561 	/*
   3562 	 * Check whether alternative MAC address is valid or not.
   3563 	 * Some cards have non 0xffff pointer but those don't use
   3564 	 * alternative MAC address in reality.
   3565 	 *
   3566 	 * Check whether the broadcast bit is set or not.
   3567 	 */
   3568 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3569 		if (((myea[0] & 0xff) & 0x01) == 0)
   3570 			return offset; /* Found */
   3571 
   3572 	/* Not found */
   3573 	return 0;
   3574 }
   3575 
   3576 static int
   3577 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3578 {
   3579 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3580 	uint16_t offset = NVM_OFF_MACADDR;
   3581 	int do_invert = 0;
   3582 
   3583 	switch (sc->sc_type) {
   3584 	case WM_T_82580:
   3585 	case WM_T_I350:
   3586 	case WM_T_I354:
   3587 		/* EEPROM Top Level Partitioning */
   3588 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3589 		break;
   3590 	case WM_T_82571:
   3591 	case WM_T_82575:
   3592 	case WM_T_82576:
   3593 	case WM_T_80003:
   3594 	case WM_T_I210:
   3595 	case WM_T_I211:
   3596 		offset = wm_check_alt_mac_addr(sc);
   3597 		if (offset == 0)
   3598 			if ((sc->sc_funcid & 0x01) == 1)
   3599 				do_invert = 1;
   3600 		break;
   3601 	default:
   3602 		if ((sc->sc_funcid & 0x01) == 1)
   3603 			do_invert = 1;
   3604 		break;
   3605 	}
   3606 
   3607 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3608 		goto bad;
   3609 
   3610 	enaddr[0] = myea[0] & 0xff;
   3611 	enaddr[1] = myea[0] >> 8;
   3612 	enaddr[2] = myea[1] & 0xff;
   3613 	enaddr[3] = myea[1] >> 8;
   3614 	enaddr[4] = myea[2] & 0xff;
   3615 	enaddr[5] = myea[2] >> 8;
   3616 
   3617 	/*
   3618 	 * Toggle the LSB of the MAC address on the second port
   3619 	 * of some dual port cards.
   3620 	 */
   3621 	if (do_invert != 0)
   3622 		enaddr[5] ^= 1;
   3623 
   3624 	return 0;
   3625 
   3626  bad:
   3627 	return -1;
   3628 }
   3629 
   3630 /*
   3631  * wm_set_ral:
   3632  *
   3633  *	Set an entery in the receive address list.
   3634  */
   3635 static void
   3636 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3637 {
   3638 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3639 	uint32_t wlock_mac;
   3640 	int rv;
   3641 
   3642 	if (enaddr != NULL) {
   3643 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3644 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3645 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3646 		ral_hi |= RAL_AV;
   3647 	} else {
   3648 		ral_lo = 0;
   3649 		ral_hi = 0;
   3650 	}
   3651 
   3652 	switch (sc->sc_type) {
   3653 	case WM_T_82542_2_0:
   3654 	case WM_T_82542_2_1:
   3655 	case WM_T_82543:
   3656 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3657 		CSR_WRITE_FLUSH(sc);
   3658 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3659 		CSR_WRITE_FLUSH(sc);
   3660 		break;
   3661 	case WM_T_PCH2:
   3662 	case WM_T_PCH_LPT:
   3663 	case WM_T_PCH_SPT:
   3664 	case WM_T_PCH_CNP:
   3665 		if (idx == 0) {
   3666 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3667 			CSR_WRITE_FLUSH(sc);
   3668 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3669 			CSR_WRITE_FLUSH(sc);
   3670 			return;
   3671 		}
   3672 		if (sc->sc_type != WM_T_PCH2) {
   3673 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3674 			    FWSM_WLOCK_MAC);
   3675 			addrl = WMREG_SHRAL(idx - 1);
   3676 			addrh = WMREG_SHRAH(idx - 1);
   3677 		} else {
   3678 			wlock_mac = 0;
   3679 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3680 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3681 		}
   3682 
   3683 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3684 			rv = wm_get_swflag_ich8lan(sc);
   3685 			if (rv != 0)
   3686 				return;
   3687 			CSR_WRITE(sc, addrl, ral_lo);
   3688 			CSR_WRITE_FLUSH(sc);
   3689 			CSR_WRITE(sc, addrh, ral_hi);
   3690 			CSR_WRITE_FLUSH(sc);
   3691 			wm_put_swflag_ich8lan(sc);
   3692 		}
   3693 
   3694 		break;
   3695 	default:
   3696 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3697 		CSR_WRITE_FLUSH(sc);
   3698 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3699 		CSR_WRITE_FLUSH(sc);
   3700 		break;
   3701 	}
   3702 }
   3703 
   3704 /*
   3705  * wm_mchash:
   3706  *
   3707  *	Compute the hash of the multicast address for the 4096-bit
   3708  *	multicast filter.
   3709  */
   3710 static uint32_t
   3711 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3712 {
   3713 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3714 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3715 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3716 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3717 	uint32_t hash;
   3718 
   3719 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3720 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3721 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3722 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3723 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3724 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3725 		return (hash & 0x3ff);
   3726 	}
   3727 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3728 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3729 
   3730 	return (hash & 0xfff);
   3731 }
   3732 
   3733 /*
   3734  *
   3735  *
   3736  */
   3737 static int
   3738 wm_rar_count(struct wm_softc *sc)
   3739 {
   3740 	int size;
   3741 
   3742 	switch (sc->sc_type) {
   3743 	case WM_T_ICH8:
   3744 		size = WM_RAL_TABSIZE_ICH8 -1;
   3745 		break;
   3746 	case WM_T_ICH9:
   3747 	case WM_T_ICH10:
   3748 	case WM_T_PCH:
   3749 		size = WM_RAL_TABSIZE_ICH8;
   3750 		break;
   3751 	case WM_T_PCH2:
   3752 		size = WM_RAL_TABSIZE_PCH2;
   3753 		break;
   3754 	case WM_T_PCH_LPT:
   3755 	case WM_T_PCH_SPT:
   3756 	case WM_T_PCH_CNP:
   3757 		size = WM_RAL_TABSIZE_PCH_LPT;
   3758 		break;
   3759 	case WM_T_82575:
   3760 	case WM_T_I210:
   3761 	case WM_T_I211:
   3762 		size = WM_RAL_TABSIZE_82575;
   3763 		break;
   3764 	case WM_T_82576:
   3765 	case WM_T_82580:
   3766 		size = WM_RAL_TABSIZE_82576;
   3767 		break;
   3768 	case WM_T_I350:
   3769 	case WM_T_I354:
   3770 		size = WM_RAL_TABSIZE_I350;
   3771 		break;
   3772 	default:
   3773 		size = WM_RAL_TABSIZE;
   3774 	}
   3775 
   3776 	return size;
   3777 }
   3778 
   3779 /*
   3780  * wm_set_filter:
   3781  *
   3782  *	Set up the receive filter.
   3783  */
   3784 static void
   3785 wm_set_filter(struct wm_softc *sc)
   3786 {
   3787 	struct ethercom *ec = &sc->sc_ethercom;
   3788 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3789 	struct ether_multi *enm;
   3790 	struct ether_multistep step;
   3791 	bus_addr_t mta_reg;
   3792 	uint32_t hash, reg, bit;
   3793 	int i, size, ralmax;
   3794 
   3795 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3796 		device_xname(sc->sc_dev), __func__));
   3797 
   3798 	if (sc->sc_type >= WM_T_82544)
   3799 		mta_reg = WMREG_CORDOVA_MTA;
   3800 	else
   3801 		mta_reg = WMREG_MTA;
   3802 
   3803 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3804 
   3805 	if (ifp->if_flags & IFF_BROADCAST)
   3806 		sc->sc_rctl |= RCTL_BAM;
   3807 	if (ifp->if_flags & IFF_PROMISC) {
   3808 		sc->sc_rctl |= RCTL_UPE;
   3809 		ETHER_LOCK(ec);
   3810 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3811 		ETHER_UNLOCK(ec);
   3812 		goto allmulti;
   3813 	}
   3814 
   3815 	/*
   3816 	 * Set the station address in the first RAL slot, and
   3817 	 * clear the remaining slots.
   3818 	 */
   3819 	size = wm_rar_count(sc);
   3820 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3821 
   3822 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3823 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3824 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3825 		switch (i) {
   3826 		case 0:
   3827 			/* We can use all entries */
   3828 			ralmax = size;
   3829 			break;
   3830 		case 1:
   3831 			/* Only RAR[0] */
   3832 			ralmax = 1;
   3833 			break;
   3834 		default:
   3835 			/* Available SHRA + RAR[0] */
   3836 			ralmax = i + 1;
   3837 		}
   3838 	} else
   3839 		ralmax = size;
   3840 	for (i = 1; i < size; i++) {
   3841 		if (i < ralmax)
   3842 			wm_set_ral(sc, NULL, i);
   3843 	}
   3844 
   3845 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3846 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3847 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3848 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3849 		size = WM_ICH8_MC_TABSIZE;
   3850 	else
   3851 		size = WM_MC_TABSIZE;
   3852 	/* Clear out the multicast table. */
   3853 	for (i = 0; i < size; i++) {
   3854 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3855 		CSR_WRITE_FLUSH(sc);
   3856 	}
   3857 
   3858 	ETHER_LOCK(ec);
   3859 	ETHER_FIRST_MULTI(step, ec, enm);
   3860 	while (enm != NULL) {
   3861 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3862 			ec->ec_flags |= ETHER_F_ALLMULTI;
   3863 			ETHER_UNLOCK(ec);
   3864 			/*
   3865 			 * We must listen to a range of multicast addresses.
   3866 			 * For now, just accept all multicasts, rather than
   3867 			 * trying to set only those filter bits needed to match
   3868 			 * the range.  (At this time, the only use of address
   3869 			 * ranges is for IP multicast routing, for which the
   3870 			 * range is big enough to require all bits set.)
   3871 			 */
   3872 			goto allmulti;
   3873 		}
   3874 
   3875 		hash = wm_mchash(sc, enm->enm_addrlo);
   3876 
   3877 		reg = (hash >> 5);
   3878 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3879 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3880 		    || (sc->sc_type == WM_T_PCH2)
   3881 		    || (sc->sc_type == WM_T_PCH_LPT)
   3882 		    || (sc->sc_type == WM_T_PCH_SPT)
   3883 		    || (sc->sc_type == WM_T_PCH_CNP))
   3884 			reg &= 0x1f;
   3885 		else
   3886 			reg &= 0x7f;
   3887 		bit = hash & 0x1f;
   3888 
   3889 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3890 		hash |= 1U << bit;
   3891 
   3892 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3893 			/*
   3894 			 * 82544 Errata 9: Certain register cannot be written
   3895 			 * with particular alignments in PCI-X bus operation
   3896 			 * (FCAH, MTA and VFTA).
   3897 			 */
   3898 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3899 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3900 			CSR_WRITE_FLUSH(sc);
   3901 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3902 			CSR_WRITE_FLUSH(sc);
   3903 		} else {
   3904 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3905 			CSR_WRITE_FLUSH(sc);
   3906 		}
   3907 
   3908 		ETHER_NEXT_MULTI(step, enm);
   3909 	}
   3910 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   3911 	ETHER_UNLOCK(ec);
   3912 
   3913 	goto setit;
   3914 
   3915  allmulti:
   3916 	sc->sc_rctl |= RCTL_MPE;
   3917 
   3918  setit:
   3919 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3920 }
   3921 
   3922 /* Reset and init related */
   3923 
   3924 static void
   3925 wm_set_vlan(struct wm_softc *sc)
   3926 {
   3927 
   3928 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3929 		device_xname(sc->sc_dev), __func__));
   3930 
   3931 	/* Deal with VLAN enables. */
   3932 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3933 		sc->sc_ctrl |= CTRL_VME;
   3934 	else
   3935 		sc->sc_ctrl &= ~CTRL_VME;
   3936 
   3937 	/* Write the control registers. */
   3938 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3939 }
   3940 
   3941 static void
   3942 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3943 {
   3944 	uint32_t gcr;
   3945 	pcireg_t ctrl2;
   3946 
   3947 	gcr = CSR_READ(sc, WMREG_GCR);
   3948 
   3949 	/* Only take action if timeout value is defaulted to 0 */
   3950 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3951 		goto out;
   3952 
   3953 	if ((gcr & GCR_CAP_VER2) == 0) {
   3954 		gcr |= GCR_CMPL_TMOUT_10MS;
   3955 		goto out;
   3956 	}
   3957 
   3958 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3959 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3960 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3961 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3962 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3963 
   3964 out:
   3965 	/* Disable completion timeout resend */
   3966 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3967 
   3968 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3969 }
   3970 
   3971 void
   3972 wm_get_auto_rd_done(struct wm_softc *sc)
   3973 {
   3974 	int i;
   3975 
   3976 	/* wait for eeprom to reload */
   3977 	switch (sc->sc_type) {
   3978 	case WM_T_82571:
   3979 	case WM_T_82572:
   3980 	case WM_T_82573:
   3981 	case WM_T_82574:
   3982 	case WM_T_82583:
   3983 	case WM_T_82575:
   3984 	case WM_T_82576:
   3985 	case WM_T_82580:
   3986 	case WM_T_I350:
   3987 	case WM_T_I354:
   3988 	case WM_T_I210:
   3989 	case WM_T_I211:
   3990 	case WM_T_80003:
   3991 	case WM_T_ICH8:
   3992 	case WM_T_ICH9:
   3993 		for (i = 0; i < 10; i++) {
   3994 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3995 				break;
   3996 			delay(1000);
   3997 		}
   3998 		if (i == 10) {
   3999 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4000 			    "complete\n", device_xname(sc->sc_dev));
   4001 		}
   4002 		break;
   4003 	default:
   4004 		break;
   4005 	}
   4006 }
   4007 
   4008 void
   4009 wm_lan_init_done(struct wm_softc *sc)
   4010 {
   4011 	uint32_t reg = 0;
   4012 	int i;
   4013 
   4014 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4015 		device_xname(sc->sc_dev), __func__));
   4016 
   4017 	/* Wait for eeprom to reload */
   4018 	switch (sc->sc_type) {
   4019 	case WM_T_ICH10:
   4020 	case WM_T_PCH:
   4021 	case WM_T_PCH2:
   4022 	case WM_T_PCH_LPT:
   4023 	case WM_T_PCH_SPT:
   4024 	case WM_T_PCH_CNP:
   4025 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4026 			reg = CSR_READ(sc, WMREG_STATUS);
   4027 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4028 				break;
   4029 			delay(100);
   4030 		}
   4031 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4032 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4033 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4034 		}
   4035 		break;
   4036 	default:
   4037 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4038 		    __func__);
   4039 		break;
   4040 	}
   4041 
   4042 	reg &= ~STATUS_LAN_INIT_DONE;
   4043 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4044 }
   4045 
   4046 void
   4047 wm_get_cfg_done(struct wm_softc *sc)
   4048 {
   4049 	int mask;
   4050 	uint32_t reg;
   4051 	int i;
   4052 
   4053 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4054 		device_xname(sc->sc_dev), __func__));
   4055 
   4056 	/* Wait for eeprom to reload */
   4057 	switch (sc->sc_type) {
   4058 	case WM_T_82542_2_0:
   4059 	case WM_T_82542_2_1:
   4060 		/* null */
   4061 		break;
   4062 	case WM_T_82543:
   4063 	case WM_T_82544:
   4064 	case WM_T_82540:
   4065 	case WM_T_82545:
   4066 	case WM_T_82545_3:
   4067 	case WM_T_82546:
   4068 	case WM_T_82546_3:
   4069 	case WM_T_82541:
   4070 	case WM_T_82541_2:
   4071 	case WM_T_82547:
   4072 	case WM_T_82547_2:
   4073 	case WM_T_82573:
   4074 	case WM_T_82574:
   4075 	case WM_T_82583:
   4076 		/* generic */
   4077 		delay(10*1000);
   4078 		break;
   4079 	case WM_T_80003:
   4080 	case WM_T_82571:
   4081 	case WM_T_82572:
   4082 	case WM_T_82575:
   4083 	case WM_T_82576:
   4084 	case WM_T_82580:
   4085 	case WM_T_I350:
   4086 	case WM_T_I354:
   4087 	case WM_T_I210:
   4088 	case WM_T_I211:
   4089 		if (sc->sc_type == WM_T_82571) {
   4090 			/* Only 82571 shares port 0 */
   4091 			mask = EEMNGCTL_CFGDONE_0;
   4092 		} else
   4093 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4094 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4095 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4096 				break;
   4097 			delay(1000);
   4098 		}
   4099 		if (i >= WM_PHY_CFG_TIMEOUT)
   4100 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   4101 				device_xname(sc->sc_dev), __func__));
   4102 		break;
   4103 	case WM_T_ICH8:
   4104 	case WM_T_ICH9:
   4105 	case WM_T_ICH10:
   4106 	case WM_T_PCH:
   4107 	case WM_T_PCH2:
   4108 	case WM_T_PCH_LPT:
   4109 	case WM_T_PCH_SPT:
   4110 	case WM_T_PCH_CNP:
   4111 		delay(10*1000);
   4112 		if (sc->sc_type >= WM_T_ICH10)
   4113 			wm_lan_init_done(sc);
   4114 		else
   4115 			wm_get_auto_rd_done(sc);
   4116 
   4117 		/* Clear PHY Reset Asserted bit */
   4118 		reg = CSR_READ(sc, WMREG_STATUS);
   4119 		if ((reg & STATUS_PHYRA) != 0)
   4120 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4121 		break;
   4122 	default:
   4123 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4124 		    __func__);
   4125 		break;
   4126 	}
   4127 }
   4128 
   4129 int
   4130 wm_phy_post_reset(struct wm_softc *sc)
   4131 {
   4132 	device_t dev = sc->sc_dev;
   4133 	uint16_t reg;
   4134 	int rv = 0;
   4135 
   4136 	/* This function is only for ICH8 and newer. */
   4137 	if (sc->sc_type < WM_T_ICH8)
   4138 		return 0;
   4139 
   4140 	if (wm_phy_resetisblocked(sc)) {
   4141 		/* XXX */
   4142 		device_printf(dev, "PHY is blocked\n");
   4143 		return -1;
   4144 	}
   4145 
   4146 	/* Allow time for h/w to get to quiescent state after reset */
   4147 	delay(10*1000);
   4148 
   4149 	/* Perform any necessary post-reset workarounds */
   4150 	if (sc->sc_type == WM_T_PCH)
   4151 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4152 	else if (sc->sc_type == WM_T_PCH2)
   4153 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4154 	if (rv != 0)
   4155 		return rv;
   4156 
   4157 	/* Clear the host wakeup bit after lcd reset */
   4158 	if (sc->sc_type >= WM_T_PCH) {
   4159 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4160 		reg &= ~BM_WUC_HOST_WU_BIT;
   4161 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4162 	}
   4163 
   4164 	/* Configure the LCD with the extended configuration region in NVM */
   4165 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4166 		return rv;
   4167 
   4168 	/* Configure the LCD with the OEM bits in NVM */
   4169 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4170 
   4171 	if (sc->sc_type == WM_T_PCH2) {
   4172 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4173 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4174 			delay(10 * 1000);
   4175 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4176 		}
   4177 		/* Set EEE LPI Update Timer to 200usec */
   4178 		rv = sc->phy.acquire(sc);
   4179 		if (rv)
   4180 			return rv;
   4181 		rv = wm_write_emi_reg_locked(dev,
   4182 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4183 		sc->phy.release(sc);
   4184 	}
   4185 
   4186 	return rv;
   4187 }
   4188 
   4189 /* Only for PCH and newer */
   4190 static int
   4191 wm_write_smbus_addr(struct wm_softc *sc)
   4192 {
   4193 	uint32_t strap, freq;
   4194 	uint16_t phy_data;
   4195 	int rv;
   4196 
   4197 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4198 		device_xname(sc->sc_dev), __func__));
   4199 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4200 
   4201 	strap = CSR_READ(sc, WMREG_STRAP);
   4202 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4203 
   4204 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4205 	if (rv != 0)
   4206 		return -1;
   4207 
   4208 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4209 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4210 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4211 
   4212 	if (sc->sc_phytype == WMPHY_I217) {
   4213 		/* Restore SMBus frequency */
   4214 		if (freq --) {
   4215 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4216 			    | HV_SMB_ADDR_FREQ_HIGH);
   4217 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4218 			    HV_SMB_ADDR_FREQ_LOW);
   4219 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4220 			    HV_SMB_ADDR_FREQ_HIGH);
   4221 		} else
   4222 			DPRINTF(WM_DEBUG_INIT,
   4223 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4224 				device_xname(sc->sc_dev), __func__));
   4225 	}
   4226 
   4227 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4228 	    phy_data);
   4229 }
   4230 
   4231 static int
   4232 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4233 {
   4234 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4235 	uint16_t phy_page = 0;
   4236 	int rv = 0;
   4237 
   4238 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4239 		device_xname(sc->sc_dev), __func__));
   4240 
   4241 	switch (sc->sc_type) {
   4242 	case WM_T_ICH8:
   4243 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4244 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4245 			return 0;
   4246 
   4247 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4248 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4249 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4250 			break;
   4251 		}
   4252 		/* FALLTHROUGH */
   4253 	case WM_T_PCH:
   4254 	case WM_T_PCH2:
   4255 	case WM_T_PCH_LPT:
   4256 	case WM_T_PCH_SPT:
   4257 	case WM_T_PCH_CNP:
   4258 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4259 		break;
   4260 	default:
   4261 		return 0;
   4262 	}
   4263 
   4264 	if ((rv = sc->phy.acquire(sc)) != 0)
   4265 		return rv;
   4266 
   4267 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4268 	if ((reg & sw_cfg_mask) == 0)
   4269 		goto release;
   4270 
   4271 	/*
   4272 	 * Make sure HW does not configure LCD from PHY extended configuration
   4273 	 * before SW configuration
   4274 	 */
   4275 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4276 	if ((sc->sc_type < WM_T_PCH2)
   4277 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4278 		goto release;
   4279 
   4280 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4281 		device_xname(sc->sc_dev), __func__));
   4282 	/* word_addr is in DWORD */
   4283 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4284 
   4285 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4286 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4287 	if (cnf_size == 0)
   4288 		goto release;
   4289 
   4290 	if (((sc->sc_type == WM_T_PCH)
   4291 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4292 	    || (sc->sc_type > WM_T_PCH)) {
   4293 		/*
   4294 		 * HW configures the SMBus address and LEDs when the OEM and
   4295 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4296 		 * are cleared, SW will configure them instead.
   4297 		 */
   4298 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4299 			device_xname(sc->sc_dev), __func__));
   4300 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4301 			goto release;
   4302 
   4303 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4304 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4305 		    (uint16_t)reg);
   4306 		if (rv != 0)
   4307 			goto release;
   4308 	}
   4309 
   4310 	/* Configure LCD from extended configuration region. */
   4311 	for (i = 0; i < cnf_size; i++) {
   4312 		uint16_t reg_data, reg_addr;
   4313 
   4314 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4315 			goto release;
   4316 
   4317 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4318 			goto release;
   4319 
   4320 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4321 			phy_page = reg_data;
   4322 
   4323 		reg_addr &= IGPHY_MAXREGADDR;
   4324 		reg_addr |= phy_page;
   4325 
   4326 		KASSERT(sc->phy.writereg_locked != NULL);
   4327 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4328 		    reg_data);
   4329 	}
   4330 
   4331 release:
   4332 	sc->phy.release(sc);
   4333 	return rv;
   4334 }
   4335 
   4336 /*
   4337  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4338  *  @sc:       pointer to the HW structure
   4339  *  @d0_state: boolean if entering d0 or d3 device state
   4340  *
   4341  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4342  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4343  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4344  */
   4345 int
   4346 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4347 {
   4348 	uint32_t mac_reg;
   4349 	uint16_t oem_reg;
   4350 	int rv;
   4351 
   4352 	if (sc->sc_type < WM_T_PCH)
   4353 		return 0;
   4354 
   4355 	rv = sc->phy.acquire(sc);
   4356 	if (rv != 0)
   4357 		return rv;
   4358 
   4359 	if (sc->sc_type == WM_T_PCH) {
   4360 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4361 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4362 			goto release;
   4363 	}
   4364 
   4365 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4366 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4367 		goto release;
   4368 
   4369 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4370 
   4371 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4372 	if (rv != 0)
   4373 		goto release;
   4374 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4375 
   4376 	if (d0_state) {
   4377 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4378 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4379 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4380 			oem_reg |= HV_OEM_BITS_LPLU;
   4381 	} else {
   4382 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4383 		    != 0)
   4384 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4385 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4386 		    != 0)
   4387 			oem_reg |= HV_OEM_BITS_LPLU;
   4388 	}
   4389 
   4390 	/* Set Restart auto-neg to activate the bits */
   4391 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4392 	    && (wm_phy_resetisblocked(sc) == false))
   4393 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4394 
   4395 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4396 
   4397 release:
   4398 	sc->phy.release(sc);
   4399 
   4400 	return rv;
   4401 }
   4402 
   4403 /* Init hardware bits */
   4404 void
   4405 wm_initialize_hardware_bits(struct wm_softc *sc)
   4406 {
   4407 	uint32_t tarc0, tarc1, reg;
   4408 
   4409 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4410 		device_xname(sc->sc_dev), __func__));
   4411 
   4412 	/* For 82571 variant, 80003 and ICHs */
   4413 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4414 	    || (sc->sc_type >= WM_T_80003)) {
   4415 
   4416 		/* Transmit Descriptor Control 0 */
   4417 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4418 		reg |= TXDCTL_COUNT_DESC;
   4419 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4420 
   4421 		/* Transmit Descriptor Control 1 */
   4422 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4423 		reg |= TXDCTL_COUNT_DESC;
   4424 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4425 
   4426 		/* TARC0 */
   4427 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4428 		switch (sc->sc_type) {
   4429 		case WM_T_82571:
   4430 		case WM_T_82572:
   4431 		case WM_T_82573:
   4432 		case WM_T_82574:
   4433 		case WM_T_82583:
   4434 		case WM_T_80003:
   4435 			/* Clear bits 30..27 */
   4436 			tarc0 &= ~__BITS(30, 27);
   4437 			break;
   4438 		default:
   4439 			break;
   4440 		}
   4441 
   4442 		switch (sc->sc_type) {
   4443 		case WM_T_82571:
   4444 		case WM_T_82572:
   4445 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4446 
   4447 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4448 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4449 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4450 			/* 8257[12] Errata No.7 */
   4451 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4452 
   4453 			/* TARC1 bit 28 */
   4454 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4455 				tarc1 &= ~__BIT(28);
   4456 			else
   4457 				tarc1 |= __BIT(28);
   4458 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4459 
   4460 			/*
   4461 			 * 8257[12] Errata No.13
   4462 			 * Disable Dyamic Clock Gating.
   4463 			 */
   4464 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4465 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4466 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4467 			break;
   4468 		case WM_T_82573:
   4469 		case WM_T_82574:
   4470 		case WM_T_82583:
   4471 			if ((sc->sc_type == WM_T_82574)
   4472 			    || (sc->sc_type == WM_T_82583))
   4473 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4474 
   4475 			/* Extended Device Control */
   4476 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4477 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4478 			reg |= __BIT(22);	/* Set bit 22 */
   4479 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4480 
   4481 			/* Device Control */
   4482 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4483 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4484 
   4485 			/* PCIe Control Register */
   4486 			/*
   4487 			 * 82573 Errata (unknown).
   4488 			 *
   4489 			 * 82574 Errata 25 and 82583 Errata 12
   4490 			 * "Dropped Rx Packets":
   4491 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4492 			 */
   4493 			reg = CSR_READ(sc, WMREG_GCR);
   4494 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4495 			CSR_WRITE(sc, WMREG_GCR, reg);
   4496 
   4497 			if ((sc->sc_type == WM_T_82574)
   4498 			    || (sc->sc_type == WM_T_82583)) {
   4499 				/*
   4500 				 * Document says this bit must be set for
   4501 				 * proper operation.
   4502 				 */
   4503 				reg = CSR_READ(sc, WMREG_GCR);
   4504 				reg |= __BIT(22);
   4505 				CSR_WRITE(sc, WMREG_GCR, reg);
   4506 
   4507 				/*
   4508 				 * Apply workaround for hardware errata
   4509 				 * documented in errata docs Fixes issue where
   4510 				 * some error prone or unreliable PCIe
   4511 				 * completions are occurring, particularly
   4512 				 * with ASPM enabled. Without fix, issue can
   4513 				 * cause Tx timeouts.
   4514 				 */
   4515 				reg = CSR_READ(sc, WMREG_GCR2);
   4516 				reg |= __BIT(0);
   4517 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4518 			}
   4519 			break;
   4520 		case WM_T_80003:
   4521 			/* TARC0 */
   4522 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4523 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4524 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4525 
   4526 			/* TARC1 bit 28 */
   4527 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4528 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4529 				tarc1 &= ~__BIT(28);
   4530 			else
   4531 				tarc1 |= __BIT(28);
   4532 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4533 			break;
   4534 		case WM_T_ICH8:
   4535 		case WM_T_ICH9:
   4536 		case WM_T_ICH10:
   4537 		case WM_T_PCH:
   4538 		case WM_T_PCH2:
   4539 		case WM_T_PCH_LPT:
   4540 		case WM_T_PCH_SPT:
   4541 		case WM_T_PCH_CNP:
   4542 			/* TARC0 */
   4543 			if (sc->sc_type == WM_T_ICH8) {
   4544 				/* Set TARC0 bits 29 and 28 */
   4545 				tarc0 |= __BITS(29, 28);
   4546 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4547 				tarc0 |= __BIT(29);
   4548 				/*
   4549 				 *  Drop bit 28. From Linux.
   4550 				 * See I218/I219 spec update
   4551 				 * "5. Buffer Overrun While the I219 is
   4552 				 * Processing DMA Transactions"
   4553 				 */
   4554 				tarc0 &= ~__BIT(28);
   4555 			}
   4556 			/* Set TARC0 bits 23,24,26,27 */
   4557 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4558 
   4559 			/* CTRL_EXT */
   4560 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4561 			reg |= __BIT(22);	/* Set bit 22 */
   4562 			/*
   4563 			 * Enable PHY low-power state when MAC is at D3
   4564 			 * w/o WoL
   4565 			 */
   4566 			if (sc->sc_type >= WM_T_PCH)
   4567 				reg |= CTRL_EXT_PHYPDEN;
   4568 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4569 
   4570 			/* TARC1 */
   4571 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4572 			/* bit 28 */
   4573 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4574 				tarc1 &= ~__BIT(28);
   4575 			else
   4576 				tarc1 |= __BIT(28);
   4577 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4578 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4579 
   4580 			/* Device Status */
   4581 			if (sc->sc_type == WM_T_ICH8) {
   4582 				reg = CSR_READ(sc, WMREG_STATUS);
   4583 				reg &= ~__BIT(31);
   4584 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4585 
   4586 			}
   4587 
   4588 			/* IOSFPC */
   4589 			if (sc->sc_type == WM_T_PCH_SPT) {
   4590 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4591 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4592 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4593 			}
   4594 			/*
   4595 			 * Work-around descriptor data corruption issue during
   4596 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4597 			 * capability.
   4598 			 */
   4599 			reg = CSR_READ(sc, WMREG_RFCTL);
   4600 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4601 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4602 			break;
   4603 		default:
   4604 			break;
   4605 		}
   4606 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4607 
   4608 		switch (sc->sc_type) {
   4609 		/*
   4610 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4611 		 * Avoid RSS Hash Value bug.
   4612 		 */
   4613 		case WM_T_82571:
   4614 		case WM_T_82572:
   4615 		case WM_T_82573:
   4616 		case WM_T_80003:
   4617 		case WM_T_ICH8:
   4618 			reg = CSR_READ(sc, WMREG_RFCTL);
   4619 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4620 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4621 			break;
   4622 		case WM_T_82574:
   4623 			/* Use extened Rx descriptor. */
   4624 			reg = CSR_READ(sc, WMREG_RFCTL);
   4625 			reg |= WMREG_RFCTL_EXSTEN;
   4626 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4627 			break;
   4628 		default:
   4629 			break;
   4630 		}
   4631 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4632 		/*
   4633 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4634 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4635 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4636 		 * Correctly by the Device"
   4637 		 *
   4638 		 * I354(C2000) Errata AVR53:
   4639 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4640 		 * Hang"
   4641 		 */
   4642 		reg = CSR_READ(sc, WMREG_RFCTL);
   4643 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4644 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4645 	}
   4646 }
   4647 
   4648 static uint32_t
   4649 wm_rxpbs_adjust_82580(uint32_t val)
   4650 {
   4651 	uint32_t rv = 0;
   4652 
   4653 	if (val < __arraycount(wm_82580_rxpbs_table))
   4654 		rv = wm_82580_rxpbs_table[val];
   4655 
   4656 	return rv;
   4657 }
   4658 
   4659 /*
   4660  * wm_reset_phy:
   4661  *
   4662  *	generic PHY reset function.
   4663  *	Same as e1000_phy_hw_reset_generic()
   4664  */
   4665 static int
   4666 wm_reset_phy(struct wm_softc *sc)
   4667 {
   4668 	uint32_t reg;
   4669 
   4670 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4671 		device_xname(sc->sc_dev), __func__));
   4672 	if (wm_phy_resetisblocked(sc))
   4673 		return -1;
   4674 
   4675 	sc->phy.acquire(sc);
   4676 
   4677 	reg = CSR_READ(sc, WMREG_CTRL);
   4678 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4679 	CSR_WRITE_FLUSH(sc);
   4680 
   4681 	delay(sc->phy.reset_delay_us);
   4682 
   4683 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4684 	CSR_WRITE_FLUSH(sc);
   4685 
   4686 	delay(150);
   4687 
   4688 	sc->phy.release(sc);
   4689 
   4690 	wm_get_cfg_done(sc);
   4691 	wm_phy_post_reset(sc);
   4692 
   4693 	return 0;
   4694 }
   4695 
   4696 /*
   4697  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4698  * so it is enough to check sc->sc_queue[0] only.
   4699  */
   4700 static void
   4701 wm_flush_desc_rings(struct wm_softc *sc)
   4702 {
   4703 	pcireg_t preg;
   4704 	uint32_t reg;
   4705 	struct wm_txqueue *txq;
   4706 	wiseman_txdesc_t *txd;
   4707 	int nexttx;
   4708 	uint32_t rctl;
   4709 
   4710 	/* First, disable MULR fix in FEXTNVM11 */
   4711 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4712 	reg |= FEXTNVM11_DIS_MULRFIX;
   4713 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4714 
   4715 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4716 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4717 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4718 		return;
   4719 
   4720 	/* TX */
   4721 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
   4722 	    preg, reg);
   4723 	reg = CSR_READ(sc, WMREG_TCTL);
   4724 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4725 
   4726 	txq = &sc->sc_queue[0].wmq_txq;
   4727 	nexttx = txq->txq_next;
   4728 	txd = &txq->txq_descs[nexttx];
   4729 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4730 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4731 	txd->wtx_fields.wtxu_status = 0;
   4732 	txd->wtx_fields.wtxu_options = 0;
   4733 	txd->wtx_fields.wtxu_vlan = 0;
   4734 
   4735 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4736 	    BUS_SPACE_BARRIER_WRITE);
   4737 
   4738 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4739 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4740 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4741 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4742 	delay(250);
   4743 
   4744 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4745 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4746 		return;
   4747 
   4748 	/* RX */
   4749 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4750 	rctl = CSR_READ(sc, WMREG_RCTL);
   4751 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4752 	CSR_WRITE_FLUSH(sc);
   4753 	delay(150);
   4754 
   4755 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4756 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4757 	reg &= 0xffffc000;
   4758 	/*
   4759 	 * Update thresholds: prefetch threshold to 31, host threshold
   4760 	 * to 1 and make sure the granularity is "descriptors" and not
   4761 	 * "cache lines"
   4762 	 */
   4763 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4764 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4765 
   4766 	/* Momentarily enable the RX ring for the changes to take effect */
   4767 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4768 	CSR_WRITE_FLUSH(sc);
   4769 	delay(150);
   4770 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4771 }
   4772 
   4773 /*
   4774  * wm_reset:
   4775  *
   4776  *	Reset the i82542 chip.
   4777  */
   4778 static void
   4779 wm_reset(struct wm_softc *sc)
   4780 {
   4781 	int phy_reset = 0;
   4782 	int i, error = 0;
   4783 	uint32_t reg;
   4784 	uint16_t kmreg;
   4785 	int rv;
   4786 
   4787 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4788 		device_xname(sc->sc_dev), __func__));
   4789 	KASSERT(sc->sc_type != 0);
   4790 
   4791 	/*
   4792 	 * Allocate on-chip memory according to the MTU size.
   4793 	 * The Packet Buffer Allocation register must be written
   4794 	 * before the chip is reset.
   4795 	 */
   4796 	switch (sc->sc_type) {
   4797 	case WM_T_82547:
   4798 	case WM_T_82547_2:
   4799 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4800 		    PBA_22K : PBA_30K;
   4801 		for (i = 0; i < sc->sc_nqueues; i++) {
   4802 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4803 			txq->txq_fifo_head = 0;
   4804 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4805 			txq->txq_fifo_size =
   4806 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4807 			txq->txq_fifo_stall = 0;
   4808 		}
   4809 		break;
   4810 	case WM_T_82571:
   4811 	case WM_T_82572:
   4812 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4813 	case WM_T_80003:
   4814 		sc->sc_pba = PBA_32K;
   4815 		break;
   4816 	case WM_T_82573:
   4817 		sc->sc_pba = PBA_12K;
   4818 		break;
   4819 	case WM_T_82574:
   4820 	case WM_T_82583:
   4821 		sc->sc_pba = PBA_20K;
   4822 		break;
   4823 	case WM_T_82576:
   4824 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4825 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4826 		break;
   4827 	case WM_T_82580:
   4828 	case WM_T_I350:
   4829 	case WM_T_I354:
   4830 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4831 		break;
   4832 	case WM_T_I210:
   4833 	case WM_T_I211:
   4834 		sc->sc_pba = PBA_34K;
   4835 		break;
   4836 	case WM_T_ICH8:
   4837 		/* Workaround for a bit corruption issue in FIFO memory */
   4838 		sc->sc_pba = PBA_8K;
   4839 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4840 		break;
   4841 	case WM_T_ICH9:
   4842 	case WM_T_ICH10:
   4843 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4844 		    PBA_14K : PBA_10K;
   4845 		break;
   4846 	case WM_T_PCH:
   4847 	case WM_T_PCH2:	/* XXX 14K? */
   4848 	case WM_T_PCH_LPT:
   4849 	case WM_T_PCH_SPT:
   4850 	case WM_T_PCH_CNP:
   4851 		sc->sc_pba = PBA_26K;
   4852 		break;
   4853 	default:
   4854 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4855 		    PBA_40K : PBA_48K;
   4856 		break;
   4857 	}
   4858 	/*
   4859 	 * Only old or non-multiqueue devices have the PBA register
   4860 	 * XXX Need special handling for 82575.
   4861 	 */
   4862 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4863 	    || (sc->sc_type == WM_T_82575))
   4864 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4865 
   4866 	/* Prevent the PCI-E bus from sticking */
   4867 	if (sc->sc_flags & WM_F_PCIE) {
   4868 		int timeout = 800;
   4869 
   4870 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4871 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4872 
   4873 		while (timeout--) {
   4874 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4875 			    == 0)
   4876 				break;
   4877 			delay(100);
   4878 		}
   4879 		if (timeout == 0)
   4880 			device_printf(sc->sc_dev,
   4881 			    "failed to disable busmastering\n");
   4882 	}
   4883 
   4884 	/* Set the completion timeout for interface */
   4885 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4886 	    || (sc->sc_type == WM_T_82580)
   4887 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4888 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4889 		wm_set_pcie_completion_timeout(sc);
   4890 
   4891 	/* Clear interrupt */
   4892 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4893 	if (wm_is_using_msix(sc)) {
   4894 		if (sc->sc_type != WM_T_82574) {
   4895 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4896 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4897 		} else
   4898 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4899 	}
   4900 
   4901 	/* Stop the transmit and receive processes. */
   4902 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4903 	sc->sc_rctl &= ~RCTL_EN;
   4904 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4905 	CSR_WRITE_FLUSH(sc);
   4906 
   4907 	/* XXX set_tbi_sbp_82543() */
   4908 
   4909 	delay(10*1000);
   4910 
   4911 	/* Must acquire the MDIO ownership before MAC reset */
   4912 	switch (sc->sc_type) {
   4913 	case WM_T_82573:
   4914 	case WM_T_82574:
   4915 	case WM_T_82583:
   4916 		error = wm_get_hw_semaphore_82573(sc);
   4917 		break;
   4918 	default:
   4919 		break;
   4920 	}
   4921 
   4922 	/*
   4923 	 * 82541 Errata 29? & 82547 Errata 28?
   4924 	 * See also the description about PHY_RST bit in CTRL register
   4925 	 * in 8254x_GBe_SDM.pdf.
   4926 	 */
   4927 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4928 		CSR_WRITE(sc, WMREG_CTRL,
   4929 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4930 		CSR_WRITE_FLUSH(sc);
   4931 		delay(5000);
   4932 	}
   4933 
   4934 	switch (sc->sc_type) {
   4935 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4936 	case WM_T_82541:
   4937 	case WM_T_82541_2:
   4938 	case WM_T_82547:
   4939 	case WM_T_82547_2:
   4940 		/*
   4941 		 * On some chipsets, a reset through a memory-mapped write
   4942 		 * cycle can cause the chip to reset before completing the
   4943 		 * write cycle. This causes major headache that can be avoided
   4944 		 * by issuing the reset via indirect register writes through
   4945 		 * I/O space.
   4946 		 *
   4947 		 * So, if we successfully mapped the I/O BAR at attach time,
   4948 		 * use that. Otherwise, try our luck with a memory-mapped
   4949 		 * reset.
   4950 		 */
   4951 		if (sc->sc_flags & WM_F_IOH_VALID)
   4952 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4953 		else
   4954 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4955 		break;
   4956 	case WM_T_82545_3:
   4957 	case WM_T_82546_3:
   4958 		/* Use the shadow control register on these chips. */
   4959 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4960 		break;
   4961 	case WM_T_80003:
   4962 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4963 		sc->phy.acquire(sc);
   4964 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4965 		sc->phy.release(sc);
   4966 		break;
   4967 	case WM_T_ICH8:
   4968 	case WM_T_ICH9:
   4969 	case WM_T_ICH10:
   4970 	case WM_T_PCH:
   4971 	case WM_T_PCH2:
   4972 	case WM_T_PCH_LPT:
   4973 	case WM_T_PCH_SPT:
   4974 	case WM_T_PCH_CNP:
   4975 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4976 		if (wm_phy_resetisblocked(sc) == false) {
   4977 			/*
   4978 			 * Gate automatic PHY configuration by hardware on
   4979 			 * non-managed 82579
   4980 			 */
   4981 			if ((sc->sc_type == WM_T_PCH2)
   4982 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4983 				== 0))
   4984 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4985 
   4986 			reg |= CTRL_PHY_RESET;
   4987 			phy_reset = 1;
   4988 		} else
   4989 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   4990 		sc->phy.acquire(sc);
   4991 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4992 		/* Don't insert a completion barrier when reset */
   4993 		delay(20*1000);
   4994 		mutex_exit(sc->sc_ich_phymtx);
   4995 		break;
   4996 	case WM_T_82580:
   4997 	case WM_T_I350:
   4998 	case WM_T_I354:
   4999 	case WM_T_I210:
   5000 	case WM_T_I211:
   5001 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5002 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5003 			CSR_WRITE_FLUSH(sc);
   5004 		delay(5000);
   5005 		break;
   5006 	case WM_T_82542_2_0:
   5007 	case WM_T_82542_2_1:
   5008 	case WM_T_82543:
   5009 	case WM_T_82540:
   5010 	case WM_T_82545:
   5011 	case WM_T_82546:
   5012 	case WM_T_82571:
   5013 	case WM_T_82572:
   5014 	case WM_T_82573:
   5015 	case WM_T_82574:
   5016 	case WM_T_82575:
   5017 	case WM_T_82576:
   5018 	case WM_T_82583:
   5019 	default:
   5020 		/* Everything else can safely use the documented method. */
   5021 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5022 		break;
   5023 	}
   5024 
   5025 	/* Must release the MDIO ownership after MAC reset */
   5026 	switch (sc->sc_type) {
   5027 	case WM_T_82573:
   5028 	case WM_T_82574:
   5029 	case WM_T_82583:
   5030 		if (error == 0)
   5031 			wm_put_hw_semaphore_82573(sc);
   5032 		break;
   5033 	default:
   5034 		break;
   5035 	}
   5036 
   5037 	/* Set Phy Config Counter to 50msec */
   5038 	if (sc->sc_type == WM_T_PCH2) {
   5039 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5040 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5041 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5042 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5043 	}
   5044 
   5045 	if (phy_reset != 0)
   5046 		wm_get_cfg_done(sc);
   5047 
   5048 	/* Reload EEPROM */
   5049 	switch (sc->sc_type) {
   5050 	case WM_T_82542_2_0:
   5051 	case WM_T_82542_2_1:
   5052 	case WM_T_82543:
   5053 	case WM_T_82544:
   5054 		delay(10);
   5055 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5056 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5057 		CSR_WRITE_FLUSH(sc);
   5058 		delay(2000);
   5059 		break;
   5060 	case WM_T_82540:
   5061 	case WM_T_82545:
   5062 	case WM_T_82545_3:
   5063 	case WM_T_82546:
   5064 	case WM_T_82546_3:
   5065 		delay(5*1000);
   5066 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5067 		break;
   5068 	case WM_T_82541:
   5069 	case WM_T_82541_2:
   5070 	case WM_T_82547:
   5071 	case WM_T_82547_2:
   5072 		delay(20000);
   5073 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5074 		break;
   5075 	case WM_T_82571:
   5076 	case WM_T_82572:
   5077 	case WM_T_82573:
   5078 	case WM_T_82574:
   5079 	case WM_T_82583:
   5080 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5081 			delay(10);
   5082 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5083 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5084 			CSR_WRITE_FLUSH(sc);
   5085 		}
   5086 		/* check EECD_EE_AUTORD */
   5087 		wm_get_auto_rd_done(sc);
   5088 		/*
   5089 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5090 		 * is set.
   5091 		 */
   5092 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5093 		    || (sc->sc_type == WM_T_82583))
   5094 			delay(25*1000);
   5095 		break;
   5096 	case WM_T_82575:
   5097 	case WM_T_82576:
   5098 	case WM_T_82580:
   5099 	case WM_T_I350:
   5100 	case WM_T_I354:
   5101 	case WM_T_I210:
   5102 	case WM_T_I211:
   5103 	case WM_T_80003:
   5104 		/* check EECD_EE_AUTORD */
   5105 		wm_get_auto_rd_done(sc);
   5106 		break;
   5107 	case WM_T_ICH8:
   5108 	case WM_T_ICH9:
   5109 	case WM_T_ICH10:
   5110 	case WM_T_PCH:
   5111 	case WM_T_PCH2:
   5112 	case WM_T_PCH_LPT:
   5113 	case WM_T_PCH_SPT:
   5114 	case WM_T_PCH_CNP:
   5115 		break;
   5116 	default:
   5117 		panic("%s: unknown type\n", __func__);
   5118 	}
   5119 
   5120 	/* Check whether EEPROM is present or not */
   5121 	switch (sc->sc_type) {
   5122 	case WM_T_82575:
   5123 	case WM_T_82576:
   5124 	case WM_T_82580:
   5125 	case WM_T_I350:
   5126 	case WM_T_I354:
   5127 	case WM_T_ICH8:
   5128 	case WM_T_ICH9:
   5129 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5130 			/* Not found */
   5131 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5132 			if (sc->sc_type == WM_T_82575)
   5133 				wm_reset_init_script_82575(sc);
   5134 		}
   5135 		break;
   5136 	default:
   5137 		break;
   5138 	}
   5139 
   5140 	if (phy_reset != 0)
   5141 		wm_phy_post_reset(sc);
   5142 
   5143 	if ((sc->sc_type == WM_T_82580)
   5144 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5145 		/* Clear global device reset status bit */
   5146 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5147 	}
   5148 
   5149 	/* Clear any pending interrupt events. */
   5150 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5151 	reg = CSR_READ(sc, WMREG_ICR);
   5152 	if (wm_is_using_msix(sc)) {
   5153 		if (sc->sc_type != WM_T_82574) {
   5154 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5155 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5156 		} else
   5157 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5158 	}
   5159 
   5160 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5161 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5162 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5163 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5164 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5165 		reg |= KABGTXD_BGSQLBIAS;
   5166 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5167 	}
   5168 
   5169 	/* Reload sc_ctrl */
   5170 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5171 
   5172 	wm_set_eee(sc);
   5173 
   5174 	/*
   5175 	 * For PCH, this write will make sure that any noise will be detected
   5176 	 * as a CRC error and be dropped rather than show up as a bad packet
   5177 	 * to the DMA engine
   5178 	 */
   5179 	if (sc->sc_type == WM_T_PCH)
   5180 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5181 
   5182 	if (sc->sc_type >= WM_T_82544)
   5183 		CSR_WRITE(sc, WMREG_WUC, 0);
   5184 
   5185 	if (sc->sc_type < WM_T_82575)
   5186 		wm_disable_aspm(sc); /* Workaround for some chips */
   5187 
   5188 	wm_reset_mdicnfg_82580(sc);
   5189 
   5190 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5191 		wm_pll_workaround_i210(sc);
   5192 
   5193 	if (sc->sc_type == WM_T_80003) {
   5194 		/* Default to TRUE to enable the MDIC W/A */
   5195 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5196 
   5197 		rv = wm_kmrn_readreg(sc,
   5198 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5199 		if (rv == 0) {
   5200 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5201 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5202 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5203 			else
   5204 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5205 		}
   5206 	}
   5207 }
   5208 
   5209 /*
   5210  * wm_add_rxbuf:
   5211  *
   5212  *	Add a receive buffer to the indiciated descriptor.
   5213  */
   5214 static int
   5215 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5216 {
   5217 	struct wm_softc *sc = rxq->rxq_sc;
   5218 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5219 	struct mbuf *m;
   5220 	int error;
   5221 
   5222 	KASSERT(mutex_owned(rxq->rxq_lock));
   5223 
   5224 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5225 	if (m == NULL)
   5226 		return ENOBUFS;
   5227 
   5228 	MCLGET(m, M_DONTWAIT);
   5229 	if ((m->m_flags & M_EXT) == 0) {
   5230 		m_freem(m);
   5231 		return ENOBUFS;
   5232 	}
   5233 
   5234 	if (rxs->rxs_mbuf != NULL)
   5235 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5236 
   5237 	rxs->rxs_mbuf = m;
   5238 
   5239 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5240 	/*
   5241 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5242 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5243 	 */
   5244 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5245 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5246 	if (error) {
   5247 		/* XXX XXX XXX */
   5248 		aprint_error_dev(sc->sc_dev,
   5249 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5250 		panic("wm_add_rxbuf");
   5251 	}
   5252 
   5253 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5254 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5255 
   5256 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5257 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5258 			wm_init_rxdesc(rxq, idx);
   5259 	} else
   5260 		wm_init_rxdesc(rxq, idx);
   5261 
   5262 	return 0;
   5263 }
   5264 
   5265 /*
   5266  * wm_rxdrain:
   5267  *
   5268  *	Drain the receive queue.
   5269  */
   5270 static void
   5271 wm_rxdrain(struct wm_rxqueue *rxq)
   5272 {
   5273 	struct wm_softc *sc = rxq->rxq_sc;
   5274 	struct wm_rxsoft *rxs;
   5275 	int i;
   5276 
   5277 	KASSERT(mutex_owned(rxq->rxq_lock));
   5278 
   5279 	for (i = 0; i < WM_NRXDESC; i++) {
   5280 		rxs = &rxq->rxq_soft[i];
   5281 		if (rxs->rxs_mbuf != NULL) {
   5282 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5283 			m_freem(rxs->rxs_mbuf);
   5284 			rxs->rxs_mbuf = NULL;
   5285 		}
   5286 	}
   5287 }
   5288 
   5289 /*
   5290  * Setup registers for RSS.
   5291  *
   5292  * XXX not yet VMDq support
   5293  */
   5294 static void
   5295 wm_init_rss(struct wm_softc *sc)
   5296 {
   5297 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5298 	int i;
   5299 
   5300 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5301 
   5302 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5303 		unsigned int qid, reta_ent;
   5304 
   5305 		qid  = i % sc->sc_nqueues;
   5306 		switch (sc->sc_type) {
   5307 		case WM_T_82574:
   5308 			reta_ent = __SHIFTIN(qid,
   5309 			    RETA_ENT_QINDEX_MASK_82574);
   5310 			break;
   5311 		case WM_T_82575:
   5312 			reta_ent = __SHIFTIN(qid,
   5313 			    RETA_ENT_QINDEX1_MASK_82575);
   5314 			break;
   5315 		default:
   5316 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5317 			break;
   5318 		}
   5319 
   5320 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5321 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5322 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5323 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5324 	}
   5325 
   5326 	rss_getkey((uint8_t *)rss_key);
   5327 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5328 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5329 
   5330 	if (sc->sc_type == WM_T_82574)
   5331 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5332 	else
   5333 		mrqc = MRQC_ENABLE_RSS_MQ;
   5334 
   5335 	/*
   5336 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5337 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5338 	 */
   5339 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5340 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5341 #if 0
   5342 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5343 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5344 #endif
   5345 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5346 
   5347 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5348 }
   5349 
   5350 /*
   5351  * Adjust TX and RX queue numbers which the system actulally uses.
   5352  *
   5353  * The numbers are affected by below parameters.
   5354  *     - The nubmer of hardware queues
   5355  *     - The number of MSI-X vectors (= "nvectors" argument)
   5356  *     - ncpu
   5357  */
   5358 static void
   5359 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5360 {
   5361 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5362 
   5363 	if (nvectors < 2) {
   5364 		sc->sc_nqueues = 1;
   5365 		return;
   5366 	}
   5367 
   5368 	switch (sc->sc_type) {
   5369 	case WM_T_82572:
   5370 		hw_ntxqueues = 2;
   5371 		hw_nrxqueues = 2;
   5372 		break;
   5373 	case WM_T_82574:
   5374 		hw_ntxqueues = 2;
   5375 		hw_nrxqueues = 2;
   5376 		break;
   5377 	case WM_T_82575:
   5378 		hw_ntxqueues = 4;
   5379 		hw_nrxqueues = 4;
   5380 		break;
   5381 	case WM_T_82576:
   5382 		hw_ntxqueues = 16;
   5383 		hw_nrxqueues = 16;
   5384 		break;
   5385 	case WM_T_82580:
   5386 	case WM_T_I350:
   5387 	case WM_T_I354:
   5388 		hw_ntxqueues = 8;
   5389 		hw_nrxqueues = 8;
   5390 		break;
   5391 	case WM_T_I210:
   5392 		hw_ntxqueues = 4;
   5393 		hw_nrxqueues = 4;
   5394 		break;
   5395 	case WM_T_I211:
   5396 		hw_ntxqueues = 2;
   5397 		hw_nrxqueues = 2;
   5398 		break;
   5399 		/*
   5400 		 * As below ethernet controllers does not support MSI-X,
   5401 		 * this driver let them not use multiqueue.
   5402 		 *     - WM_T_80003
   5403 		 *     - WM_T_ICH8
   5404 		 *     - WM_T_ICH9
   5405 		 *     - WM_T_ICH10
   5406 		 *     - WM_T_PCH
   5407 		 *     - WM_T_PCH2
   5408 		 *     - WM_T_PCH_LPT
   5409 		 */
   5410 	default:
   5411 		hw_ntxqueues = 1;
   5412 		hw_nrxqueues = 1;
   5413 		break;
   5414 	}
   5415 
   5416 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5417 
   5418 	/*
   5419 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5420 	 * the number of queues used actually.
   5421 	 */
   5422 	if (nvectors < hw_nqueues + 1)
   5423 		sc->sc_nqueues = nvectors - 1;
   5424 	else
   5425 		sc->sc_nqueues = hw_nqueues;
   5426 
   5427 	/*
   5428 	 * As queues more then cpus cannot improve scaling, we limit
   5429 	 * the number of queues used actually.
   5430 	 */
   5431 	if (ncpu < sc->sc_nqueues)
   5432 		sc->sc_nqueues = ncpu;
   5433 }
   5434 
   5435 static inline bool
   5436 wm_is_using_msix(struct wm_softc *sc)
   5437 {
   5438 
   5439 	return (sc->sc_nintrs > 1);
   5440 }
   5441 
   5442 static inline bool
   5443 wm_is_using_multiqueue(struct wm_softc *sc)
   5444 {
   5445 
   5446 	return (sc->sc_nqueues > 1);
   5447 }
   5448 
   5449 static int
   5450 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   5451 {
   5452 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5453 
   5454 	wmq->wmq_id = qidx;
   5455 	wmq->wmq_intr_idx = intr_idx;
   5456 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
   5457 	    wm_handle_queue, wmq);
   5458 	if (wmq->wmq_si != NULL)
   5459 		return 0;
   5460 
   5461 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5462 	    wmq->wmq_id);
   5463 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5464 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5465 	return ENOMEM;
   5466 }
   5467 
   5468 /*
   5469  * Both single interrupt MSI and INTx can use this function.
   5470  */
   5471 static int
   5472 wm_setup_legacy(struct wm_softc *sc)
   5473 {
   5474 	pci_chipset_tag_t pc = sc->sc_pc;
   5475 	const char *intrstr = NULL;
   5476 	char intrbuf[PCI_INTRSTR_LEN];
   5477 	int error;
   5478 
   5479 	error = wm_alloc_txrx_queues(sc);
   5480 	if (error) {
   5481 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5482 		    error);
   5483 		return ENOMEM;
   5484 	}
   5485 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5486 	    sizeof(intrbuf));
   5487 #ifdef WM_MPSAFE
   5488 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5489 #endif
   5490 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5491 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5492 	if (sc->sc_ihs[0] == NULL) {
   5493 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5494 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5495 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5496 		return ENOMEM;
   5497 	}
   5498 
   5499 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5500 	sc->sc_nintrs = 1;
   5501 
   5502 	return wm_softint_establish_queue(sc, 0, 0);
   5503 }
   5504 
   5505 static int
   5506 wm_setup_msix(struct wm_softc *sc)
   5507 {
   5508 	void *vih;
   5509 	kcpuset_t *affinity;
   5510 	int qidx, error, intr_idx, txrx_established;
   5511 	pci_chipset_tag_t pc = sc->sc_pc;
   5512 	const char *intrstr = NULL;
   5513 	char intrbuf[PCI_INTRSTR_LEN];
   5514 	char intr_xname[INTRDEVNAMEBUF];
   5515 
   5516 	if (sc->sc_nqueues < ncpu) {
   5517 		/*
   5518 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5519 		 * interrupts start from CPU#1.
   5520 		 */
   5521 		sc->sc_affinity_offset = 1;
   5522 	} else {
   5523 		/*
   5524 		 * In this case, this device use all CPUs. So, we unify
   5525 		 * affinitied cpu_index to msix vector number for readability.
   5526 		 */
   5527 		sc->sc_affinity_offset = 0;
   5528 	}
   5529 
   5530 	error = wm_alloc_txrx_queues(sc);
   5531 	if (error) {
   5532 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5533 		    error);
   5534 		return ENOMEM;
   5535 	}
   5536 
   5537 	kcpuset_create(&affinity, false);
   5538 	intr_idx = 0;
   5539 
   5540 	/*
   5541 	 * TX and RX
   5542 	 */
   5543 	txrx_established = 0;
   5544 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5545 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5546 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5547 
   5548 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5549 		    sizeof(intrbuf));
   5550 #ifdef WM_MPSAFE
   5551 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5552 		    PCI_INTR_MPSAFE, true);
   5553 #endif
   5554 		memset(intr_xname, 0, sizeof(intr_xname));
   5555 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5556 		    device_xname(sc->sc_dev), qidx);
   5557 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5558 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5559 		if (vih == NULL) {
   5560 			aprint_error_dev(sc->sc_dev,
   5561 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5562 			    intrstr ? " at " : "",
   5563 			    intrstr ? intrstr : "");
   5564 
   5565 			goto fail;
   5566 		}
   5567 		kcpuset_zero(affinity);
   5568 		/* Round-robin affinity */
   5569 		kcpuset_set(affinity, affinity_to);
   5570 		error = interrupt_distribute(vih, affinity, NULL);
   5571 		if (error == 0) {
   5572 			aprint_normal_dev(sc->sc_dev,
   5573 			    "for TX and RX interrupting at %s affinity to %u\n",
   5574 			    intrstr, affinity_to);
   5575 		} else {
   5576 			aprint_normal_dev(sc->sc_dev,
   5577 			    "for TX and RX interrupting at %s\n", intrstr);
   5578 		}
   5579 		sc->sc_ihs[intr_idx] = vih;
   5580 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   5581 			goto fail;
   5582 		txrx_established++;
   5583 		intr_idx++;
   5584 	}
   5585 
   5586 	/* LINK */
   5587 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5588 	    sizeof(intrbuf));
   5589 #ifdef WM_MPSAFE
   5590 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5591 #endif
   5592 	memset(intr_xname, 0, sizeof(intr_xname));
   5593 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5594 	    device_xname(sc->sc_dev));
   5595 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5596 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5597 	if (vih == NULL) {
   5598 		aprint_error_dev(sc->sc_dev,
   5599 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5600 		    intrstr ? " at " : "",
   5601 		    intrstr ? intrstr : "");
   5602 
   5603 		goto fail;
   5604 	}
   5605 	/* Keep default affinity to LINK interrupt */
   5606 	aprint_normal_dev(sc->sc_dev,
   5607 	    "for LINK interrupting at %s\n", intrstr);
   5608 	sc->sc_ihs[intr_idx] = vih;
   5609 	sc->sc_link_intr_idx = intr_idx;
   5610 
   5611 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5612 	kcpuset_destroy(affinity);
   5613 	return 0;
   5614 
   5615  fail:
   5616 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5617 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5618 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5619 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5620 	}
   5621 
   5622 	kcpuset_destroy(affinity);
   5623 	return ENOMEM;
   5624 }
   5625 
   5626 static void
   5627 wm_unset_stopping_flags(struct wm_softc *sc)
   5628 {
   5629 	int i;
   5630 
   5631 	KASSERT(WM_CORE_LOCKED(sc));
   5632 
   5633 	/* Must unset stopping flags in ascending order. */
   5634 	for (i = 0; i < sc->sc_nqueues; i++) {
   5635 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5636 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5637 
   5638 		mutex_enter(txq->txq_lock);
   5639 		txq->txq_stopping = false;
   5640 		mutex_exit(txq->txq_lock);
   5641 
   5642 		mutex_enter(rxq->rxq_lock);
   5643 		rxq->rxq_stopping = false;
   5644 		mutex_exit(rxq->rxq_lock);
   5645 	}
   5646 
   5647 	sc->sc_core_stopping = false;
   5648 }
   5649 
   5650 static void
   5651 wm_set_stopping_flags(struct wm_softc *sc)
   5652 {
   5653 	int i;
   5654 
   5655 	KASSERT(WM_CORE_LOCKED(sc));
   5656 
   5657 	sc->sc_core_stopping = true;
   5658 
   5659 	/* Must set stopping flags in ascending order. */
   5660 	for (i = 0; i < sc->sc_nqueues; i++) {
   5661 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5662 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5663 
   5664 		mutex_enter(rxq->rxq_lock);
   5665 		rxq->rxq_stopping = true;
   5666 		mutex_exit(rxq->rxq_lock);
   5667 
   5668 		mutex_enter(txq->txq_lock);
   5669 		txq->txq_stopping = true;
   5670 		mutex_exit(txq->txq_lock);
   5671 	}
   5672 }
   5673 
   5674 /*
   5675  * Write interrupt interval value to ITR or EITR
   5676  */
   5677 static void
   5678 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5679 {
   5680 
   5681 	if (!wmq->wmq_set_itr)
   5682 		return;
   5683 
   5684 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5685 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5686 
   5687 		/*
   5688 		 * 82575 doesn't have CNT_INGR field.
   5689 		 * So, overwrite counter field by software.
   5690 		 */
   5691 		if (sc->sc_type == WM_T_82575)
   5692 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5693 		else
   5694 			eitr |= EITR_CNT_INGR;
   5695 
   5696 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5697 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5698 		/*
   5699 		 * 82574 has both ITR and EITR. SET EITR when we use
   5700 		 * the multi queue function with MSI-X.
   5701 		 */
   5702 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5703 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5704 	} else {
   5705 		KASSERT(wmq->wmq_id == 0);
   5706 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5707 	}
   5708 
   5709 	wmq->wmq_set_itr = false;
   5710 }
   5711 
   5712 /*
   5713  * TODO
   5714  * Below dynamic calculation of itr is almost the same as linux igb,
   5715  * however it does not fit to wm(4). So, we will have been disable AIM
   5716  * until we will find appropriate calculation of itr.
   5717  */
   5718 /*
   5719  * calculate interrupt interval value to be going to write register in
   5720  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5721  */
   5722 static void
   5723 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5724 {
   5725 #ifdef NOTYET
   5726 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5727 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5728 	uint32_t avg_size = 0;
   5729 	uint32_t new_itr;
   5730 
   5731 	if (rxq->rxq_packets)
   5732 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5733 	if (txq->txq_packets)
   5734 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5735 
   5736 	if (avg_size == 0) {
   5737 		new_itr = 450; /* restore default value */
   5738 		goto out;
   5739 	}
   5740 
   5741 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5742 	avg_size += 24;
   5743 
   5744 	/* Don't starve jumbo frames */
   5745 	avg_size = uimin(avg_size, 3000);
   5746 
   5747 	/* Give a little boost to mid-size frames */
   5748 	if ((avg_size > 300) && (avg_size < 1200))
   5749 		new_itr = avg_size / 3;
   5750 	else
   5751 		new_itr = avg_size / 2;
   5752 
   5753 out:
   5754 	/*
   5755 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5756 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5757 	 */
   5758 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5759 		new_itr *= 4;
   5760 
   5761 	if (new_itr != wmq->wmq_itr) {
   5762 		wmq->wmq_itr = new_itr;
   5763 		wmq->wmq_set_itr = true;
   5764 	} else
   5765 		wmq->wmq_set_itr = false;
   5766 
   5767 	rxq->rxq_packets = 0;
   5768 	rxq->rxq_bytes = 0;
   5769 	txq->txq_packets = 0;
   5770 	txq->txq_bytes = 0;
   5771 #endif
   5772 }
   5773 
   5774 static void
   5775 wm_init_sysctls(struct wm_softc *sc)
   5776 {
   5777 	struct sysctllog **log;
   5778 	const struct sysctlnode *rnode, *cnode;
   5779 	int rv;
   5780 	const char *dvname;
   5781 
   5782 	log = &sc->sc_sysctllog;
   5783 	dvname = device_xname(sc->sc_dev);
   5784 
   5785 	rv = sysctl_createv(log, 0, NULL, &rnode,
   5786 	    0, CTLTYPE_NODE, dvname,
   5787 	    SYSCTL_DESCR("wm information and settings"),
   5788 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   5789 	if (rv != 0)
   5790 		goto err;
   5791 
   5792 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5793 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   5794 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   5795 	if (rv != 0)
   5796 		goto teardown;
   5797 
   5798 	return;
   5799 
   5800 teardown:
   5801 	sysctl_teardown(log);
   5802 err:
   5803 	sc->sc_sysctllog = NULL;
   5804 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   5805 	    __func__, rv);
   5806 }
   5807 
   5808 /*
   5809  * wm_init:		[ifnet interface function]
   5810  *
   5811  *	Initialize the interface.
   5812  */
   5813 static int
   5814 wm_init(struct ifnet *ifp)
   5815 {
   5816 	struct wm_softc *sc = ifp->if_softc;
   5817 	int ret;
   5818 
   5819 	WM_CORE_LOCK(sc);
   5820 	ret = wm_init_locked(ifp);
   5821 	WM_CORE_UNLOCK(sc);
   5822 
   5823 	return ret;
   5824 }
   5825 
   5826 static int
   5827 wm_init_locked(struct ifnet *ifp)
   5828 {
   5829 	struct wm_softc *sc = ifp->if_softc;
   5830 	struct ethercom *ec = &sc->sc_ethercom;
   5831 	int i, j, trynum, error = 0;
   5832 	uint32_t reg, sfp_mask = 0;
   5833 
   5834 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5835 		device_xname(sc->sc_dev), __func__));
   5836 	KASSERT(WM_CORE_LOCKED(sc));
   5837 
   5838 	/*
   5839 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5840 	 * There is a small but measurable benefit to avoiding the adjusment
   5841 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5842 	 * on such platforms.  One possibility is that the DMA itself is
   5843 	 * slightly more efficient if the front of the entire packet (instead
   5844 	 * of the front of the headers) is aligned.
   5845 	 *
   5846 	 * Note we must always set align_tweak to 0 if we are using
   5847 	 * jumbo frames.
   5848 	 */
   5849 #ifdef __NO_STRICT_ALIGNMENT
   5850 	sc->sc_align_tweak = 0;
   5851 #else
   5852 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5853 		sc->sc_align_tweak = 0;
   5854 	else
   5855 		sc->sc_align_tweak = 2;
   5856 #endif /* __NO_STRICT_ALIGNMENT */
   5857 
   5858 	/* Cancel any pending I/O. */
   5859 	wm_stop_locked(ifp, false, false);
   5860 
   5861 	/* Update statistics before reset */
   5862 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   5863 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   5864 
   5865 	/* PCH_SPT hardware workaround */
   5866 	if (sc->sc_type == WM_T_PCH_SPT)
   5867 		wm_flush_desc_rings(sc);
   5868 
   5869 	/* Reset the chip to a known state. */
   5870 	wm_reset(sc);
   5871 
   5872 	/*
   5873 	 * AMT based hardware can now take control from firmware
   5874 	 * Do this after reset.
   5875 	 */
   5876 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5877 		wm_get_hw_control(sc);
   5878 
   5879 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5880 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5881 		wm_legacy_irq_quirk_spt(sc);
   5882 
   5883 	/* Init hardware bits */
   5884 	wm_initialize_hardware_bits(sc);
   5885 
   5886 	/* Reset the PHY. */
   5887 	if (sc->sc_flags & WM_F_HAS_MII)
   5888 		wm_gmii_reset(sc);
   5889 
   5890 	if (sc->sc_type >= WM_T_ICH8) {
   5891 		reg = CSR_READ(sc, WMREG_GCR);
   5892 		/*
   5893 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5894 		 * default after reset.
   5895 		 */
   5896 		if (sc->sc_type == WM_T_ICH8)
   5897 			reg |= GCR_NO_SNOOP_ALL;
   5898 		else
   5899 			reg &= ~GCR_NO_SNOOP_ALL;
   5900 		CSR_WRITE(sc, WMREG_GCR, reg);
   5901 	}
   5902 
   5903 	if ((sc->sc_type >= WM_T_ICH8)
   5904 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5905 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5906 
   5907 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5908 		reg |= CTRL_EXT_RO_DIS;
   5909 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5910 	}
   5911 
   5912 	/* Calculate (E)ITR value */
   5913 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5914 		/*
   5915 		 * For NEWQUEUE's EITR (except for 82575).
   5916 		 * 82575's EITR should be set same throttling value as other
   5917 		 * old controllers' ITR because the interrupt/sec calculation
   5918 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5919 		 *
   5920 		 * 82574's EITR should be set same throttling value as ITR.
   5921 		 *
   5922 		 * For N interrupts/sec, set this value to:
   5923 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5924 		 */
   5925 		sc->sc_itr_init = 450;
   5926 	} else if (sc->sc_type >= WM_T_82543) {
   5927 		/*
   5928 		 * Set up the interrupt throttling register (units of 256ns)
   5929 		 * Note that a footnote in Intel's documentation says this
   5930 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5931 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5932 		 * that that is also true for the 1024ns units of the other
   5933 		 * interrupt-related timer registers -- so, really, we ought
   5934 		 * to divide this value by 4 when the link speed is low.
   5935 		 *
   5936 		 * XXX implement this division at link speed change!
   5937 		 */
   5938 
   5939 		/*
   5940 		 * For N interrupts/sec, set this value to:
   5941 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5942 		 * absolute and packet timer values to this value
   5943 		 * divided by 4 to get "simple timer" behavior.
   5944 		 */
   5945 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5946 	}
   5947 
   5948 	error = wm_init_txrx_queues(sc);
   5949 	if (error)
   5950 		goto out;
   5951 
   5952 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   5953 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   5954 	    (sc->sc_type >= WM_T_82575))
   5955 		wm_serdes_power_up_link_82575(sc);
   5956 
   5957 	/* Clear out the VLAN table -- we don't use it (yet). */
   5958 	CSR_WRITE(sc, WMREG_VET, 0);
   5959 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5960 		trynum = 10; /* Due to hw errata */
   5961 	else
   5962 		trynum = 1;
   5963 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5964 		for (j = 0; j < trynum; j++)
   5965 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5966 
   5967 	/*
   5968 	 * Set up flow-control parameters.
   5969 	 *
   5970 	 * XXX Values could probably stand some tuning.
   5971 	 */
   5972 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5973 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5974 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5975 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5976 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5977 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5978 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5979 	}
   5980 
   5981 	sc->sc_fcrtl = FCRTL_DFLT;
   5982 	if (sc->sc_type < WM_T_82543) {
   5983 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5984 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5985 	} else {
   5986 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5987 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5988 	}
   5989 
   5990 	if (sc->sc_type == WM_T_80003)
   5991 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5992 	else
   5993 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5994 
   5995 	/* Writes the control register. */
   5996 	wm_set_vlan(sc);
   5997 
   5998 	if (sc->sc_flags & WM_F_HAS_MII) {
   5999 		uint16_t kmreg;
   6000 
   6001 		switch (sc->sc_type) {
   6002 		case WM_T_80003:
   6003 		case WM_T_ICH8:
   6004 		case WM_T_ICH9:
   6005 		case WM_T_ICH10:
   6006 		case WM_T_PCH:
   6007 		case WM_T_PCH2:
   6008 		case WM_T_PCH_LPT:
   6009 		case WM_T_PCH_SPT:
   6010 		case WM_T_PCH_CNP:
   6011 			/*
   6012 			 * Set the mac to wait the maximum time between each
   6013 			 * iteration and increase the max iterations when
   6014 			 * polling the phy; this fixes erroneous timeouts at
   6015 			 * 10Mbps.
   6016 			 */
   6017 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6018 			    0xFFFF);
   6019 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6020 			    &kmreg);
   6021 			kmreg |= 0x3F;
   6022 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6023 			    kmreg);
   6024 			break;
   6025 		default:
   6026 			break;
   6027 		}
   6028 
   6029 		if (sc->sc_type == WM_T_80003) {
   6030 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6031 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6032 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6033 
   6034 			/* Bypass RX and TX FIFO's */
   6035 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6036 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6037 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6038 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6039 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6040 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6041 		}
   6042 	}
   6043 #if 0
   6044 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6045 #endif
   6046 
   6047 	/* Set up checksum offload parameters. */
   6048 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6049 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6050 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6051 		reg |= RXCSUM_IPOFL;
   6052 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6053 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6054 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6055 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6056 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6057 
   6058 	/* Set registers about MSI-X */
   6059 	if (wm_is_using_msix(sc)) {
   6060 		uint32_t ivar, qintr_idx;
   6061 		struct wm_queue *wmq;
   6062 		unsigned int qid;
   6063 
   6064 		if (sc->sc_type == WM_T_82575) {
   6065 			/* Interrupt control */
   6066 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6067 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6068 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6069 
   6070 			/* TX and RX */
   6071 			for (i = 0; i < sc->sc_nqueues; i++) {
   6072 				wmq = &sc->sc_queue[i];
   6073 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6074 				    EITR_TX_QUEUE(wmq->wmq_id)
   6075 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6076 			}
   6077 			/* Link status */
   6078 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6079 			    EITR_OTHER);
   6080 		} else if (sc->sc_type == WM_T_82574) {
   6081 			/* Interrupt control */
   6082 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6083 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6084 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6085 
   6086 			/*
   6087 			 * Workaround issue with spurious interrupts
   6088 			 * in MSI-X mode.
   6089 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6090 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6091 			 */
   6092 			reg = CSR_READ(sc, WMREG_RFCTL);
   6093 			reg |= WMREG_RFCTL_ACKDIS;
   6094 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6095 
   6096 			ivar = 0;
   6097 			/* TX and RX */
   6098 			for (i = 0; i < sc->sc_nqueues; i++) {
   6099 				wmq = &sc->sc_queue[i];
   6100 				qid = wmq->wmq_id;
   6101 				qintr_idx = wmq->wmq_intr_idx;
   6102 
   6103 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6104 				    IVAR_TX_MASK_Q_82574(qid));
   6105 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6106 				    IVAR_RX_MASK_Q_82574(qid));
   6107 			}
   6108 			/* Link status */
   6109 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6110 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6111 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6112 		} else {
   6113 			/* Interrupt control */
   6114 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6115 			    | GPIE_EIAME | GPIE_PBA);
   6116 
   6117 			switch (sc->sc_type) {
   6118 			case WM_T_82580:
   6119 			case WM_T_I350:
   6120 			case WM_T_I354:
   6121 			case WM_T_I210:
   6122 			case WM_T_I211:
   6123 				/* TX and RX */
   6124 				for (i = 0; i < sc->sc_nqueues; i++) {
   6125 					wmq = &sc->sc_queue[i];
   6126 					qid = wmq->wmq_id;
   6127 					qintr_idx = wmq->wmq_intr_idx;
   6128 
   6129 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6130 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6131 					ivar |= __SHIFTIN((qintr_idx
   6132 						| IVAR_VALID),
   6133 					    IVAR_TX_MASK_Q(qid));
   6134 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6135 					ivar |= __SHIFTIN((qintr_idx
   6136 						| IVAR_VALID),
   6137 					    IVAR_RX_MASK_Q(qid));
   6138 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6139 				}
   6140 				break;
   6141 			case WM_T_82576:
   6142 				/* TX and RX */
   6143 				for (i = 0; i < sc->sc_nqueues; i++) {
   6144 					wmq = &sc->sc_queue[i];
   6145 					qid = wmq->wmq_id;
   6146 					qintr_idx = wmq->wmq_intr_idx;
   6147 
   6148 					ivar = CSR_READ(sc,
   6149 					    WMREG_IVAR_Q_82576(qid));
   6150 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6151 					ivar |= __SHIFTIN((qintr_idx
   6152 						| IVAR_VALID),
   6153 					    IVAR_TX_MASK_Q_82576(qid));
   6154 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6155 					ivar |= __SHIFTIN((qintr_idx
   6156 						| IVAR_VALID),
   6157 					    IVAR_RX_MASK_Q_82576(qid));
   6158 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6159 					    ivar);
   6160 				}
   6161 				break;
   6162 			default:
   6163 				break;
   6164 			}
   6165 
   6166 			/* Link status */
   6167 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6168 			    IVAR_MISC_OTHER);
   6169 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6170 		}
   6171 
   6172 		if (wm_is_using_multiqueue(sc)) {
   6173 			wm_init_rss(sc);
   6174 
   6175 			/*
   6176 			** NOTE: Receive Full-Packet Checksum Offload
   6177 			** is mutually exclusive with Multiqueue. However
   6178 			** this is not the same as TCP/IP checksums which
   6179 			** still work.
   6180 			*/
   6181 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6182 			reg |= RXCSUM_PCSD;
   6183 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6184 		}
   6185 	}
   6186 
   6187 	/* Set up the interrupt registers. */
   6188 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6189 
   6190 	/* Enable SFP module insertion interrupt if it's required */
   6191 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6192 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6193 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6194 		sfp_mask = ICR_GPI(0);
   6195 	}
   6196 
   6197 	if (wm_is_using_msix(sc)) {
   6198 		uint32_t mask;
   6199 		struct wm_queue *wmq;
   6200 
   6201 		switch (sc->sc_type) {
   6202 		case WM_T_82574:
   6203 			mask = 0;
   6204 			for (i = 0; i < sc->sc_nqueues; i++) {
   6205 				wmq = &sc->sc_queue[i];
   6206 				mask |= ICR_TXQ(wmq->wmq_id);
   6207 				mask |= ICR_RXQ(wmq->wmq_id);
   6208 			}
   6209 			mask |= ICR_OTHER;
   6210 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6211 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6212 			break;
   6213 		default:
   6214 			if (sc->sc_type == WM_T_82575) {
   6215 				mask = 0;
   6216 				for (i = 0; i < sc->sc_nqueues; i++) {
   6217 					wmq = &sc->sc_queue[i];
   6218 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6219 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6220 				}
   6221 				mask |= EITR_OTHER;
   6222 			} else {
   6223 				mask = 0;
   6224 				for (i = 0; i < sc->sc_nqueues; i++) {
   6225 					wmq = &sc->sc_queue[i];
   6226 					mask |= 1 << wmq->wmq_intr_idx;
   6227 				}
   6228 				mask |= 1 << sc->sc_link_intr_idx;
   6229 			}
   6230 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6231 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6232 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6233 
   6234 			/* For other interrupts */
   6235 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6236 			break;
   6237 		}
   6238 	} else {
   6239 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6240 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6241 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6242 	}
   6243 
   6244 	/* Set up the inter-packet gap. */
   6245 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6246 
   6247 	if (sc->sc_type >= WM_T_82543) {
   6248 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6249 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6250 			wm_itrs_writereg(sc, wmq);
   6251 		}
   6252 		/*
   6253 		 * Link interrupts occur much less than TX
   6254 		 * interrupts and RX interrupts. So, we don't
   6255 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6256 		 * FreeBSD's if_igb.
   6257 		 */
   6258 	}
   6259 
   6260 	/* Set the VLAN ethernetype. */
   6261 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6262 
   6263 	/*
   6264 	 * Set up the transmit control register; we start out with
   6265 	 * a collision distance suitable for FDX, but update it whe
   6266 	 * we resolve the media type.
   6267 	 */
   6268 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6269 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6270 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6271 	if (sc->sc_type >= WM_T_82571)
   6272 		sc->sc_tctl |= TCTL_MULR;
   6273 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6274 
   6275 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6276 		/* Write TDT after TCTL.EN is set. See the document. */
   6277 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6278 	}
   6279 
   6280 	if (sc->sc_type == WM_T_80003) {
   6281 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6282 		reg &= ~TCTL_EXT_GCEX_MASK;
   6283 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6284 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6285 	}
   6286 
   6287 	/* Set the media. */
   6288 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6289 		goto out;
   6290 
   6291 	/* Configure for OS presence */
   6292 	wm_init_manageability(sc);
   6293 
   6294 	/*
   6295 	 * Set up the receive control register; we actually program the
   6296 	 * register when we set the receive filter. Use multicast address
   6297 	 * offset type 0.
   6298 	 *
   6299 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6300 	 * don't enable that feature.
   6301 	 */
   6302 	sc->sc_mchash_type = 0;
   6303 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6304 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6305 
   6306 	/* 82574 use one buffer extended Rx descriptor. */
   6307 	if (sc->sc_type == WM_T_82574)
   6308 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6309 
   6310 	/*
   6311 	 * The I350 has a bug where it always strips the CRC whether
   6312 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6313 	 */
   6314 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6315 	    || (sc->sc_type == WM_T_I210))
   6316 		sc->sc_rctl |= RCTL_SECRC;
   6317 
   6318 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6319 	    && (ifp->if_mtu > ETHERMTU)) {
   6320 		sc->sc_rctl |= RCTL_LPE;
   6321 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6322 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6323 	}
   6324 
   6325 	if (MCLBYTES == 2048)
   6326 		sc->sc_rctl |= RCTL_2k;
   6327 	else {
   6328 		if (sc->sc_type >= WM_T_82543) {
   6329 			switch (MCLBYTES) {
   6330 			case 4096:
   6331 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6332 				break;
   6333 			case 8192:
   6334 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6335 				break;
   6336 			case 16384:
   6337 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6338 				break;
   6339 			default:
   6340 				panic("wm_init: MCLBYTES %d unsupported",
   6341 				    MCLBYTES);
   6342 				break;
   6343 			}
   6344 		} else
   6345 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6346 	}
   6347 
   6348 	/* Enable ECC */
   6349 	switch (sc->sc_type) {
   6350 	case WM_T_82571:
   6351 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6352 		reg |= PBA_ECC_CORR_EN;
   6353 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6354 		break;
   6355 	case WM_T_PCH_LPT:
   6356 	case WM_T_PCH_SPT:
   6357 	case WM_T_PCH_CNP:
   6358 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6359 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6360 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6361 
   6362 		sc->sc_ctrl |= CTRL_MEHE;
   6363 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6364 		break;
   6365 	default:
   6366 		break;
   6367 	}
   6368 
   6369 	/*
   6370 	 * Set the receive filter.
   6371 	 *
   6372 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6373 	 * the setting of RCTL.EN in wm_set_filter()
   6374 	 */
   6375 	wm_set_filter(sc);
   6376 
   6377 	/* On 575 and later set RDT only if RX enabled */
   6378 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6379 		int qidx;
   6380 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6381 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6382 			for (i = 0; i < WM_NRXDESC; i++) {
   6383 				mutex_enter(rxq->rxq_lock);
   6384 				wm_init_rxdesc(rxq, i);
   6385 				mutex_exit(rxq->rxq_lock);
   6386 
   6387 			}
   6388 		}
   6389 	}
   6390 
   6391 	wm_unset_stopping_flags(sc);
   6392 
   6393 	/* Start the one second link check clock. */
   6394 	callout_schedule(&sc->sc_tick_ch, hz);
   6395 
   6396 	/* ...all done! */
   6397 	ifp->if_flags |= IFF_RUNNING;
   6398 
   6399  out:
   6400 	/* Save last flags for the callback */
   6401 	sc->sc_if_flags = ifp->if_flags;
   6402 	sc->sc_ec_capenable = ec->ec_capenable;
   6403 	if (error)
   6404 		log(LOG_ERR, "%s: interface not running\n",
   6405 		    device_xname(sc->sc_dev));
   6406 	return error;
   6407 }
   6408 
   6409 /*
   6410  * wm_stop:		[ifnet interface function]
   6411  *
   6412  *	Stop transmission on the interface.
   6413  */
   6414 static void
   6415 wm_stop(struct ifnet *ifp, int disable)
   6416 {
   6417 	struct wm_softc *sc = ifp->if_softc;
   6418 
   6419 	ASSERT_SLEEPABLE();
   6420 
   6421 	WM_CORE_LOCK(sc);
   6422 	wm_stop_locked(ifp, disable ? true : false, true);
   6423 	WM_CORE_UNLOCK(sc);
   6424 
   6425 	/*
   6426 	 * After wm_set_stopping_flags(), it is guaranteed
   6427 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   6428 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   6429 	 * because it can sleep...
   6430 	 * so, call workqueue_wait() here.
   6431 	 */
   6432 	for (int i = 0; i < sc->sc_nqueues; i++)
   6433 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   6434 }
   6435 
   6436 static void
   6437 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   6438 {
   6439 	struct wm_softc *sc = ifp->if_softc;
   6440 	struct wm_txsoft *txs;
   6441 	int i, qidx;
   6442 
   6443 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6444 		device_xname(sc->sc_dev), __func__));
   6445 	KASSERT(WM_CORE_LOCKED(sc));
   6446 
   6447 	wm_set_stopping_flags(sc);
   6448 
   6449 	if (sc->sc_flags & WM_F_HAS_MII) {
   6450 		/* Down the MII. */
   6451 		mii_down(&sc->sc_mii);
   6452 	} else {
   6453 #if 0
   6454 		/* Should we clear PHY's status properly? */
   6455 		wm_reset(sc);
   6456 #endif
   6457 	}
   6458 
   6459 	/* Stop the transmit and receive processes. */
   6460 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6461 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6462 	sc->sc_rctl &= ~RCTL_EN;
   6463 
   6464 	/*
   6465 	 * Clear the interrupt mask to ensure the device cannot assert its
   6466 	 * interrupt line.
   6467 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6468 	 * service any currently pending or shared interrupt.
   6469 	 */
   6470 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6471 	sc->sc_icr = 0;
   6472 	if (wm_is_using_msix(sc)) {
   6473 		if (sc->sc_type != WM_T_82574) {
   6474 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6475 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6476 		} else
   6477 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6478 	}
   6479 
   6480 	/*
   6481 	 * Stop callouts after interrupts are disabled; if we have
   6482 	 * to wait for them, we will be releasing the CORE_LOCK
   6483 	 * briefly, which will unblock interrupts on the current CPU.
   6484 	 */
   6485 
   6486 	/* Stop the one second clock. */
   6487 	if (wait)
   6488 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   6489 	else
   6490 		callout_stop(&sc->sc_tick_ch);
   6491 
   6492 	/* Stop the 82547 Tx FIFO stall check timer. */
   6493 	if (sc->sc_type == WM_T_82547) {
   6494 		if (wait)
   6495 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   6496 		else
   6497 			callout_stop(&sc->sc_txfifo_ch);
   6498 	}
   6499 
   6500 	/* Release any queued transmit buffers. */
   6501 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6502 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6503 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6504 		mutex_enter(txq->txq_lock);
   6505 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6506 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6507 			txs = &txq->txq_soft[i];
   6508 			if (txs->txs_mbuf != NULL) {
   6509 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6510 				m_freem(txs->txs_mbuf);
   6511 				txs->txs_mbuf = NULL;
   6512 			}
   6513 		}
   6514 		mutex_exit(txq->txq_lock);
   6515 	}
   6516 
   6517 	/* Mark the interface as down and cancel the watchdog timer. */
   6518 	ifp->if_flags &= ~IFF_RUNNING;
   6519 
   6520 	if (disable) {
   6521 		for (i = 0; i < sc->sc_nqueues; i++) {
   6522 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6523 			mutex_enter(rxq->rxq_lock);
   6524 			wm_rxdrain(rxq);
   6525 			mutex_exit(rxq->rxq_lock);
   6526 		}
   6527 	}
   6528 
   6529 #if 0 /* notyet */
   6530 	if (sc->sc_type >= WM_T_82544)
   6531 		CSR_WRITE(sc, WMREG_WUC, 0);
   6532 #endif
   6533 }
   6534 
   6535 static void
   6536 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6537 {
   6538 	struct mbuf *m;
   6539 	int i;
   6540 
   6541 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6542 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6543 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6544 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6545 		    m->m_data, m->m_len, m->m_flags);
   6546 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6547 	    i, i == 1 ? "" : "s");
   6548 }
   6549 
   6550 /*
   6551  * wm_82547_txfifo_stall:
   6552  *
   6553  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6554  *	reset the FIFO pointers, and restart packet transmission.
   6555  */
   6556 static void
   6557 wm_82547_txfifo_stall(void *arg)
   6558 {
   6559 	struct wm_softc *sc = arg;
   6560 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6561 
   6562 	mutex_enter(txq->txq_lock);
   6563 
   6564 	if (txq->txq_stopping)
   6565 		goto out;
   6566 
   6567 	if (txq->txq_fifo_stall) {
   6568 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6569 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6570 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6571 			/*
   6572 			 * Packets have drained.  Stop transmitter, reset
   6573 			 * FIFO pointers, restart transmitter, and kick
   6574 			 * the packet queue.
   6575 			 */
   6576 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6577 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6578 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6579 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6580 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6581 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6582 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6583 			CSR_WRITE_FLUSH(sc);
   6584 
   6585 			txq->txq_fifo_head = 0;
   6586 			txq->txq_fifo_stall = 0;
   6587 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6588 		} else {
   6589 			/*
   6590 			 * Still waiting for packets to drain; try again in
   6591 			 * another tick.
   6592 			 */
   6593 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6594 		}
   6595 	}
   6596 
   6597 out:
   6598 	mutex_exit(txq->txq_lock);
   6599 }
   6600 
   6601 /*
   6602  * wm_82547_txfifo_bugchk:
   6603  *
   6604  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6605  *	prevent enqueueing a packet that would wrap around the end
   6606  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6607  *
   6608  *	We do this by checking the amount of space before the end
   6609  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6610  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6611  *	the internal FIFO pointers to the beginning, and restart
   6612  *	transmission on the interface.
   6613  */
   6614 #define	WM_FIFO_HDR		0x10
   6615 #define	WM_82547_PAD_LEN	0x3e0
   6616 static int
   6617 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6618 {
   6619 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6620 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6621 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6622 
   6623 	/* Just return if already stalled. */
   6624 	if (txq->txq_fifo_stall)
   6625 		return 1;
   6626 
   6627 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6628 		/* Stall only occurs in half-duplex mode. */
   6629 		goto send_packet;
   6630 	}
   6631 
   6632 	if (len >= WM_82547_PAD_LEN + space) {
   6633 		txq->txq_fifo_stall = 1;
   6634 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6635 		return 1;
   6636 	}
   6637 
   6638  send_packet:
   6639 	txq->txq_fifo_head += len;
   6640 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6641 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6642 
   6643 	return 0;
   6644 }
   6645 
   6646 static int
   6647 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6648 {
   6649 	int error;
   6650 
   6651 	/*
   6652 	 * Allocate the control data structures, and create and load the
   6653 	 * DMA map for it.
   6654 	 *
   6655 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6656 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6657 	 * both sets within the same 4G segment.
   6658 	 */
   6659 	if (sc->sc_type < WM_T_82544)
   6660 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6661 	else
   6662 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6663 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6664 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6665 	else
   6666 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6667 
   6668 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6669 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6670 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6671 		aprint_error_dev(sc->sc_dev,
   6672 		    "unable to allocate TX control data, error = %d\n",
   6673 		    error);
   6674 		goto fail_0;
   6675 	}
   6676 
   6677 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6678 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6679 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6680 		aprint_error_dev(sc->sc_dev,
   6681 		    "unable to map TX control data, error = %d\n", error);
   6682 		goto fail_1;
   6683 	}
   6684 
   6685 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6686 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6687 		aprint_error_dev(sc->sc_dev,
   6688 		    "unable to create TX control data DMA map, error = %d\n",
   6689 		    error);
   6690 		goto fail_2;
   6691 	}
   6692 
   6693 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6694 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6695 		aprint_error_dev(sc->sc_dev,
   6696 		    "unable to load TX control data DMA map, error = %d\n",
   6697 		    error);
   6698 		goto fail_3;
   6699 	}
   6700 
   6701 	return 0;
   6702 
   6703  fail_3:
   6704 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6705  fail_2:
   6706 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6707 	    WM_TXDESCS_SIZE(txq));
   6708  fail_1:
   6709 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6710  fail_0:
   6711 	return error;
   6712 }
   6713 
   6714 static void
   6715 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6716 {
   6717 
   6718 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6719 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6720 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6721 	    WM_TXDESCS_SIZE(txq));
   6722 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6723 }
   6724 
   6725 static int
   6726 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6727 {
   6728 	int error;
   6729 	size_t rxq_descs_size;
   6730 
   6731 	/*
   6732 	 * Allocate the control data structures, and create and load the
   6733 	 * DMA map for it.
   6734 	 *
   6735 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6736 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6737 	 * both sets within the same 4G segment.
   6738 	 */
   6739 	rxq->rxq_ndesc = WM_NRXDESC;
   6740 	if (sc->sc_type == WM_T_82574)
   6741 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6742 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6743 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6744 	else
   6745 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6746 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6747 
   6748 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6749 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6750 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6751 		aprint_error_dev(sc->sc_dev,
   6752 		    "unable to allocate RX control data, error = %d\n",
   6753 		    error);
   6754 		goto fail_0;
   6755 	}
   6756 
   6757 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6758 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6759 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6760 		aprint_error_dev(sc->sc_dev,
   6761 		    "unable to map RX control data, error = %d\n", error);
   6762 		goto fail_1;
   6763 	}
   6764 
   6765 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6766 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6767 		aprint_error_dev(sc->sc_dev,
   6768 		    "unable to create RX control data DMA map, error = %d\n",
   6769 		    error);
   6770 		goto fail_2;
   6771 	}
   6772 
   6773 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6774 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6775 		aprint_error_dev(sc->sc_dev,
   6776 		    "unable to load RX control data DMA map, error = %d\n",
   6777 		    error);
   6778 		goto fail_3;
   6779 	}
   6780 
   6781 	return 0;
   6782 
   6783  fail_3:
   6784 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6785  fail_2:
   6786 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6787 	    rxq_descs_size);
   6788  fail_1:
   6789 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6790  fail_0:
   6791 	return error;
   6792 }
   6793 
   6794 static void
   6795 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6796 {
   6797 
   6798 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6799 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6800 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6801 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6802 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6803 }
   6804 
   6805 
   6806 static int
   6807 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6808 {
   6809 	int i, error;
   6810 
   6811 	/* Create the transmit buffer DMA maps. */
   6812 	WM_TXQUEUELEN(txq) =
   6813 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6814 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6815 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6816 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6817 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6818 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6819 			aprint_error_dev(sc->sc_dev,
   6820 			    "unable to create Tx DMA map %d, error = %d\n",
   6821 			    i, error);
   6822 			goto fail;
   6823 		}
   6824 	}
   6825 
   6826 	return 0;
   6827 
   6828  fail:
   6829 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6830 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6831 			bus_dmamap_destroy(sc->sc_dmat,
   6832 			    txq->txq_soft[i].txs_dmamap);
   6833 	}
   6834 	return error;
   6835 }
   6836 
   6837 static void
   6838 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6839 {
   6840 	int i;
   6841 
   6842 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6843 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6844 			bus_dmamap_destroy(sc->sc_dmat,
   6845 			    txq->txq_soft[i].txs_dmamap);
   6846 	}
   6847 }
   6848 
   6849 static int
   6850 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6851 {
   6852 	int i, error;
   6853 
   6854 	/* Create the receive buffer DMA maps. */
   6855 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6856 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6857 			    MCLBYTES, 0, 0,
   6858 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6859 			aprint_error_dev(sc->sc_dev,
   6860 			    "unable to create Rx DMA map %d error = %d\n",
   6861 			    i, error);
   6862 			goto fail;
   6863 		}
   6864 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6865 	}
   6866 
   6867 	return 0;
   6868 
   6869  fail:
   6870 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6871 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6872 			bus_dmamap_destroy(sc->sc_dmat,
   6873 			    rxq->rxq_soft[i].rxs_dmamap);
   6874 	}
   6875 	return error;
   6876 }
   6877 
   6878 static void
   6879 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6880 {
   6881 	int i;
   6882 
   6883 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6884 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6885 			bus_dmamap_destroy(sc->sc_dmat,
   6886 			    rxq->rxq_soft[i].rxs_dmamap);
   6887 	}
   6888 }
   6889 
   6890 /*
   6891  * wm_alloc_quques:
   6892  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6893  */
   6894 static int
   6895 wm_alloc_txrx_queues(struct wm_softc *sc)
   6896 {
   6897 	int i, error, tx_done, rx_done;
   6898 
   6899 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6900 	    KM_SLEEP);
   6901 	if (sc->sc_queue == NULL) {
   6902 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6903 		error = ENOMEM;
   6904 		goto fail_0;
   6905 	}
   6906 
   6907 	/* For transmission */
   6908 	error = 0;
   6909 	tx_done = 0;
   6910 	for (i = 0; i < sc->sc_nqueues; i++) {
   6911 #ifdef WM_EVENT_COUNTERS
   6912 		int j;
   6913 		const char *xname;
   6914 #endif
   6915 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6916 		txq->txq_sc = sc;
   6917 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6918 
   6919 		error = wm_alloc_tx_descs(sc, txq);
   6920 		if (error)
   6921 			break;
   6922 		error = wm_alloc_tx_buffer(sc, txq);
   6923 		if (error) {
   6924 			wm_free_tx_descs(sc, txq);
   6925 			break;
   6926 		}
   6927 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6928 		if (txq->txq_interq == NULL) {
   6929 			wm_free_tx_descs(sc, txq);
   6930 			wm_free_tx_buffer(sc, txq);
   6931 			error = ENOMEM;
   6932 			break;
   6933 		}
   6934 
   6935 #ifdef WM_EVENT_COUNTERS
   6936 		xname = device_xname(sc->sc_dev);
   6937 
   6938 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6939 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6940 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6941 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6942 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6943 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6944 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6945 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6946 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6947 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6948 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6949 
   6950 		for (j = 0; j < WM_NTXSEGS; j++) {
   6951 			snprintf(txq->txq_txseg_evcnt_names[j],
   6952 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6953 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6954 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6955 		}
   6956 
   6957 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6958 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6959 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6960 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6961 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6962 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   6963 #endif /* WM_EVENT_COUNTERS */
   6964 
   6965 		tx_done++;
   6966 	}
   6967 	if (error)
   6968 		goto fail_1;
   6969 
   6970 	/* For receive */
   6971 	error = 0;
   6972 	rx_done = 0;
   6973 	for (i = 0; i < sc->sc_nqueues; i++) {
   6974 #ifdef WM_EVENT_COUNTERS
   6975 		const char *xname;
   6976 #endif
   6977 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6978 		rxq->rxq_sc = sc;
   6979 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6980 
   6981 		error = wm_alloc_rx_descs(sc, rxq);
   6982 		if (error)
   6983 			break;
   6984 
   6985 		error = wm_alloc_rx_buffer(sc, rxq);
   6986 		if (error) {
   6987 			wm_free_rx_descs(sc, rxq);
   6988 			break;
   6989 		}
   6990 
   6991 #ifdef WM_EVENT_COUNTERS
   6992 		xname = device_xname(sc->sc_dev);
   6993 
   6994 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6995 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6996 
   6997 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6998 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6999 #endif /* WM_EVENT_COUNTERS */
   7000 
   7001 		rx_done++;
   7002 	}
   7003 	if (error)
   7004 		goto fail_2;
   7005 
   7006 	return 0;
   7007 
   7008  fail_2:
   7009 	for (i = 0; i < rx_done; i++) {
   7010 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7011 		wm_free_rx_buffer(sc, rxq);
   7012 		wm_free_rx_descs(sc, rxq);
   7013 		if (rxq->rxq_lock)
   7014 			mutex_obj_free(rxq->rxq_lock);
   7015 	}
   7016  fail_1:
   7017 	for (i = 0; i < tx_done; i++) {
   7018 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7019 		pcq_destroy(txq->txq_interq);
   7020 		wm_free_tx_buffer(sc, txq);
   7021 		wm_free_tx_descs(sc, txq);
   7022 		if (txq->txq_lock)
   7023 			mutex_obj_free(txq->txq_lock);
   7024 	}
   7025 
   7026 	kmem_free(sc->sc_queue,
   7027 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7028  fail_0:
   7029 	return error;
   7030 }
   7031 
   7032 /*
   7033  * wm_free_quques:
   7034  *	Free {tx,rx}descs and {tx,rx} buffers
   7035  */
   7036 static void
   7037 wm_free_txrx_queues(struct wm_softc *sc)
   7038 {
   7039 	int i;
   7040 
   7041 	for (i = 0; i < sc->sc_nqueues; i++) {
   7042 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7043 
   7044 #ifdef WM_EVENT_COUNTERS
   7045 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7046 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7047 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7048 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7049 #endif /* WM_EVENT_COUNTERS */
   7050 
   7051 		wm_free_rx_buffer(sc, rxq);
   7052 		wm_free_rx_descs(sc, rxq);
   7053 		if (rxq->rxq_lock)
   7054 			mutex_obj_free(rxq->rxq_lock);
   7055 	}
   7056 
   7057 	for (i = 0; i < sc->sc_nqueues; i++) {
   7058 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7059 		struct mbuf *m;
   7060 #ifdef WM_EVENT_COUNTERS
   7061 		int j;
   7062 
   7063 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7064 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7065 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7066 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7067 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7068 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7069 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7070 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7071 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7072 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7073 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7074 
   7075 		for (j = 0; j < WM_NTXSEGS; j++)
   7076 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7077 
   7078 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7079 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7080 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7081 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7082 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7083 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7084 #endif /* WM_EVENT_COUNTERS */
   7085 
   7086 		/* Drain txq_interq */
   7087 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7088 			m_freem(m);
   7089 		pcq_destroy(txq->txq_interq);
   7090 
   7091 		wm_free_tx_buffer(sc, txq);
   7092 		wm_free_tx_descs(sc, txq);
   7093 		if (txq->txq_lock)
   7094 			mutex_obj_free(txq->txq_lock);
   7095 	}
   7096 
   7097 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7098 }
   7099 
   7100 static void
   7101 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7102 {
   7103 
   7104 	KASSERT(mutex_owned(txq->txq_lock));
   7105 
   7106 	/* Initialize the transmit descriptor ring. */
   7107 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7108 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7109 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7110 	txq->txq_free = WM_NTXDESC(txq);
   7111 	txq->txq_next = 0;
   7112 }
   7113 
   7114 static void
   7115 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7116     struct wm_txqueue *txq)
   7117 {
   7118 
   7119 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7120 		device_xname(sc->sc_dev), __func__));
   7121 	KASSERT(mutex_owned(txq->txq_lock));
   7122 
   7123 	if (sc->sc_type < WM_T_82543) {
   7124 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7125 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7126 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7127 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7128 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7129 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7130 	} else {
   7131 		int qid = wmq->wmq_id;
   7132 
   7133 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7134 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7135 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7136 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7137 
   7138 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7139 			/*
   7140 			 * Don't write TDT before TCTL.EN is set.
   7141 			 * See the document.
   7142 			 */
   7143 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7144 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7145 			    | TXDCTL_WTHRESH(0));
   7146 		else {
   7147 			/* XXX should update with AIM? */
   7148 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7149 			if (sc->sc_type >= WM_T_82540) {
   7150 				/* Should be the same */
   7151 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7152 			}
   7153 
   7154 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7155 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7156 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7157 		}
   7158 	}
   7159 }
   7160 
   7161 static void
   7162 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7163 {
   7164 	int i;
   7165 
   7166 	KASSERT(mutex_owned(txq->txq_lock));
   7167 
   7168 	/* Initialize the transmit job descriptors. */
   7169 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7170 		txq->txq_soft[i].txs_mbuf = NULL;
   7171 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7172 	txq->txq_snext = 0;
   7173 	txq->txq_sdirty = 0;
   7174 }
   7175 
   7176 static void
   7177 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7178     struct wm_txqueue *txq)
   7179 {
   7180 
   7181 	KASSERT(mutex_owned(txq->txq_lock));
   7182 
   7183 	/*
   7184 	 * Set up some register offsets that are different between
   7185 	 * the i82542 and the i82543 and later chips.
   7186 	 */
   7187 	if (sc->sc_type < WM_T_82543)
   7188 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7189 	else
   7190 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7191 
   7192 	wm_init_tx_descs(sc, txq);
   7193 	wm_init_tx_regs(sc, wmq, txq);
   7194 	wm_init_tx_buffer(sc, txq);
   7195 
   7196 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7197 	txq->txq_sending = false;
   7198 }
   7199 
   7200 static void
   7201 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7202     struct wm_rxqueue *rxq)
   7203 {
   7204 
   7205 	KASSERT(mutex_owned(rxq->rxq_lock));
   7206 
   7207 	/*
   7208 	 * Initialize the receive descriptor and receive job
   7209 	 * descriptor rings.
   7210 	 */
   7211 	if (sc->sc_type < WM_T_82543) {
   7212 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7213 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7214 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7215 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7216 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7217 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7218 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7219 
   7220 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7221 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7222 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7223 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7224 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7225 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7226 	} else {
   7227 		int qid = wmq->wmq_id;
   7228 
   7229 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7230 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7231 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7232 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7233 
   7234 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7235 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7236 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7237 
   7238 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7239 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7240 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7241 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7242 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7243 			    | RXDCTL_WTHRESH(1));
   7244 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7245 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7246 		} else {
   7247 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7248 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7249 			/* XXX should update with AIM? */
   7250 			CSR_WRITE(sc, WMREG_RDTR,
   7251 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7252 			/* MUST be same */
   7253 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7254 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7255 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7256 		}
   7257 	}
   7258 }
   7259 
   7260 static int
   7261 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7262 {
   7263 	struct wm_rxsoft *rxs;
   7264 	int error, i;
   7265 
   7266 	KASSERT(mutex_owned(rxq->rxq_lock));
   7267 
   7268 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7269 		rxs = &rxq->rxq_soft[i];
   7270 		if (rxs->rxs_mbuf == NULL) {
   7271 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7272 				log(LOG_ERR, "%s: unable to allocate or map "
   7273 				    "rx buffer %d, error = %d\n",
   7274 				    device_xname(sc->sc_dev), i, error);
   7275 				/*
   7276 				 * XXX Should attempt to run with fewer receive
   7277 				 * XXX buffers instead of just failing.
   7278 				 */
   7279 				wm_rxdrain(rxq);
   7280 				return ENOMEM;
   7281 			}
   7282 		} else {
   7283 			/*
   7284 			 * For 82575 and 82576, the RX descriptors must be
   7285 			 * initialized after the setting of RCTL.EN in
   7286 			 * wm_set_filter()
   7287 			 */
   7288 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7289 				wm_init_rxdesc(rxq, i);
   7290 		}
   7291 	}
   7292 	rxq->rxq_ptr = 0;
   7293 	rxq->rxq_discard = 0;
   7294 	WM_RXCHAIN_RESET(rxq);
   7295 
   7296 	return 0;
   7297 }
   7298 
   7299 static int
   7300 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7301     struct wm_rxqueue *rxq)
   7302 {
   7303 
   7304 	KASSERT(mutex_owned(rxq->rxq_lock));
   7305 
   7306 	/*
   7307 	 * Set up some register offsets that are different between
   7308 	 * the i82542 and the i82543 and later chips.
   7309 	 */
   7310 	if (sc->sc_type < WM_T_82543)
   7311 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7312 	else
   7313 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7314 
   7315 	wm_init_rx_regs(sc, wmq, rxq);
   7316 	return wm_init_rx_buffer(sc, rxq);
   7317 }
   7318 
   7319 /*
   7320  * wm_init_quques:
   7321  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7322  */
   7323 static int
   7324 wm_init_txrx_queues(struct wm_softc *sc)
   7325 {
   7326 	int i, error = 0;
   7327 
   7328 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7329 		device_xname(sc->sc_dev), __func__));
   7330 
   7331 	for (i = 0; i < sc->sc_nqueues; i++) {
   7332 		struct wm_queue *wmq = &sc->sc_queue[i];
   7333 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7334 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7335 
   7336 		/*
   7337 		 * TODO
   7338 		 * Currently, use constant variable instead of AIM.
   7339 		 * Furthermore, the interrupt interval of multiqueue which use
   7340 		 * polling mode is less than default value.
   7341 		 * More tuning and AIM are required.
   7342 		 */
   7343 		if (wm_is_using_multiqueue(sc))
   7344 			wmq->wmq_itr = 50;
   7345 		else
   7346 			wmq->wmq_itr = sc->sc_itr_init;
   7347 		wmq->wmq_set_itr = true;
   7348 
   7349 		mutex_enter(txq->txq_lock);
   7350 		wm_init_tx_queue(sc, wmq, txq);
   7351 		mutex_exit(txq->txq_lock);
   7352 
   7353 		mutex_enter(rxq->rxq_lock);
   7354 		error = wm_init_rx_queue(sc, wmq, rxq);
   7355 		mutex_exit(rxq->rxq_lock);
   7356 		if (error)
   7357 			break;
   7358 	}
   7359 
   7360 	return error;
   7361 }
   7362 
   7363 /*
   7364  * wm_tx_offload:
   7365  *
   7366  *	Set up TCP/IP checksumming parameters for the
   7367  *	specified packet.
   7368  */
   7369 static void
   7370 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7371     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7372 {
   7373 	struct mbuf *m0 = txs->txs_mbuf;
   7374 	struct livengood_tcpip_ctxdesc *t;
   7375 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7376 	uint32_t ipcse;
   7377 	struct ether_header *eh;
   7378 	int offset, iphl;
   7379 	uint8_t fields;
   7380 
   7381 	/*
   7382 	 * XXX It would be nice if the mbuf pkthdr had offset
   7383 	 * fields for the protocol headers.
   7384 	 */
   7385 
   7386 	eh = mtod(m0, struct ether_header *);
   7387 	switch (htons(eh->ether_type)) {
   7388 	case ETHERTYPE_IP:
   7389 	case ETHERTYPE_IPV6:
   7390 		offset = ETHER_HDR_LEN;
   7391 		break;
   7392 
   7393 	case ETHERTYPE_VLAN:
   7394 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7395 		break;
   7396 
   7397 	default:
   7398 		/* Don't support this protocol or encapsulation. */
   7399  		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7400  		txq->txq_last_hw_ipcs = 0;
   7401  		txq->txq_last_hw_tucs = 0;
   7402 		*fieldsp = 0;
   7403 		*cmdp = 0;
   7404 		return;
   7405 	}
   7406 
   7407 	if ((m0->m_pkthdr.csum_flags &
   7408 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7409 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7410 	} else
   7411 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7412 
   7413 	ipcse = offset + iphl - 1;
   7414 
   7415 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7416 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7417 	seg = 0;
   7418 	fields = 0;
   7419 
   7420 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7421 		int hlen = offset + iphl;
   7422 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7423 
   7424 		if (__predict_false(m0->m_len <
   7425 				    (hlen + sizeof(struct tcphdr)))) {
   7426 			/*
   7427 			 * TCP/IP headers are not in the first mbuf; we need
   7428 			 * to do this the slow and painful way. Let's just
   7429 			 * hope this doesn't happen very often.
   7430 			 */
   7431 			struct tcphdr th;
   7432 
   7433 			WM_Q_EVCNT_INCR(txq, tsopain);
   7434 
   7435 			m_copydata(m0, hlen, sizeof(th), &th);
   7436 			if (v4) {
   7437 				struct ip ip;
   7438 
   7439 				m_copydata(m0, offset, sizeof(ip), &ip);
   7440 				ip.ip_len = 0;
   7441 				m_copyback(m0,
   7442 				    offset + offsetof(struct ip, ip_len),
   7443 				    sizeof(ip.ip_len), &ip.ip_len);
   7444 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7445 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7446 			} else {
   7447 				struct ip6_hdr ip6;
   7448 
   7449 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7450 				ip6.ip6_plen = 0;
   7451 				m_copyback(m0,
   7452 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7453 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7454 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7455 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7456 			}
   7457 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7458 			    sizeof(th.th_sum), &th.th_sum);
   7459 
   7460 			hlen += th.th_off << 2;
   7461 		} else {
   7462 			/*
   7463 			 * TCP/IP headers are in the first mbuf; we can do
   7464 			 * this the easy way.
   7465 			 */
   7466 			struct tcphdr *th;
   7467 
   7468 			if (v4) {
   7469 				struct ip *ip =
   7470 				    (void *)(mtod(m0, char *) + offset);
   7471 				th = (void *)(mtod(m0, char *) + hlen);
   7472 
   7473 				ip->ip_len = 0;
   7474 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7475 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7476 			} else {
   7477 				struct ip6_hdr *ip6 =
   7478 				    (void *)(mtod(m0, char *) + offset);
   7479 				th = (void *)(mtod(m0, char *) + hlen);
   7480 
   7481 				ip6->ip6_plen = 0;
   7482 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7483 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7484 			}
   7485 			hlen += th->th_off << 2;
   7486 		}
   7487 
   7488 		if (v4) {
   7489 			WM_Q_EVCNT_INCR(txq, tso);
   7490 			cmdlen |= WTX_TCPIP_CMD_IP;
   7491 		} else {
   7492 			WM_Q_EVCNT_INCR(txq, tso6);
   7493 			ipcse = 0;
   7494 		}
   7495 		cmd |= WTX_TCPIP_CMD_TSE;
   7496 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7497 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7498 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7499 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7500 	}
   7501 
   7502 	/*
   7503 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7504 	 * offload feature, if we load the context descriptor, we
   7505 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7506 	 */
   7507 
   7508 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7509 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7510 	    WTX_TCPIP_IPCSE(ipcse);
   7511 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7512 		WM_Q_EVCNT_INCR(txq, ipsum);
   7513 		fields |= WTX_IXSM;
   7514 	}
   7515 
   7516 	offset += iphl;
   7517 
   7518 	if (m0->m_pkthdr.csum_flags &
   7519 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7520 		WM_Q_EVCNT_INCR(txq, tusum);
   7521 		fields |= WTX_TXSM;
   7522 		tucs = WTX_TCPIP_TUCSS(offset) |
   7523 		    WTX_TCPIP_TUCSO(offset +
   7524 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7525 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7526 	} else if ((m0->m_pkthdr.csum_flags &
   7527 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7528 		WM_Q_EVCNT_INCR(txq, tusum6);
   7529 		fields |= WTX_TXSM;
   7530 		tucs = WTX_TCPIP_TUCSS(offset) |
   7531 		    WTX_TCPIP_TUCSO(offset +
   7532 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7533 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7534 	} else {
   7535 		/* Just initialize it to a valid TCP context. */
   7536 		tucs = WTX_TCPIP_TUCSS(offset) |
   7537 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7538 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7539 	}
   7540 
   7541 	*cmdp = cmd;
   7542 	*fieldsp = fields;
   7543 
   7544 	/*
   7545 	 * We don't have to write context descriptor for every packet
   7546 	 * except for 82574. For 82574, we must write context descriptor
   7547 	 * for every packet when we use two descriptor queues.
   7548 	 *
   7549 	 * The 82574L can only remember the *last* context used
   7550 	 * regardless of queue that it was use for.  We cannot reuse
   7551 	 * contexts on this hardware platform and must generate a new
   7552 	 * context every time.  82574L hardware spec, section 7.2.6,
   7553 	 * second note.
   7554 	 */
   7555 	if (sc->sc_nqueues < 2) {
   7556 		/*
   7557 	 	 *
   7558 	  	 * Setting up new checksum offload context for every
   7559 		 * frames takes a lot of processing time for hardware.
   7560 		 * This also reduces performance a lot for small sized
   7561 		 * frames so avoid it if driver can use previously
   7562 		 * configured checksum offload context.
   7563 		 * For TSO, in theory we can use the same TSO context only if
   7564 		 * frame is the same type(IP/TCP) and the same MSS. However
   7565 		 * checking whether a frame has the same IP/TCP structure is
   7566 		 * hard thing so just ignore that and always restablish a
   7567 		 * new TSO context.
   7568 	  	 */
   7569 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   7570 		    == 0) {
   7571 			if (txq->txq_last_hw_cmd == cmd &&
   7572 			    txq->txq_last_hw_fields == fields &&
   7573 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   7574 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   7575 				WM_Q_EVCNT_INCR(txq, skipcontext);
   7576 				return;
   7577 			}
   7578 		}
   7579 
   7580 	 	txq->txq_last_hw_cmd = cmd;
   7581  		txq->txq_last_hw_fields = fields;
   7582  		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   7583 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   7584 	}
   7585 
   7586 	/* Fill in the context descriptor. */
   7587 	t = (struct livengood_tcpip_ctxdesc *)
   7588 	    &txq->txq_descs[txq->txq_next];
   7589 	t->tcpip_ipcs = htole32(ipcs);
   7590 	t->tcpip_tucs = htole32(tucs);
   7591 	t->tcpip_cmdlen = htole32(cmdlen);
   7592 	t->tcpip_seg = htole32(seg);
   7593 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7594 
   7595 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7596 	txs->txs_ndesc++;
   7597 }
   7598 
   7599 static inline int
   7600 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7601 {
   7602 	struct wm_softc *sc = ifp->if_softc;
   7603 	u_int cpuid = cpu_index(curcpu());
   7604 
   7605 	/*
   7606 	 * Currently, simple distribute strategy.
   7607 	 * TODO:
   7608 	 * distribute by flowid(RSS has value).
   7609 	 */
   7610 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7611 }
   7612 
   7613 /*
   7614  * wm_start:		[ifnet interface function]
   7615  *
   7616  *	Start packet transmission on the interface.
   7617  */
   7618 static void
   7619 wm_start(struct ifnet *ifp)
   7620 {
   7621 	struct wm_softc *sc = ifp->if_softc;
   7622 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7623 
   7624 #ifdef WM_MPSAFE
   7625 	KASSERT(if_is_mpsafe(ifp));
   7626 #endif
   7627 	/*
   7628 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   7629 	 */
   7630 
   7631 	mutex_enter(txq->txq_lock);
   7632 	if (!txq->txq_stopping)
   7633 		wm_start_locked(ifp);
   7634 	mutex_exit(txq->txq_lock);
   7635 }
   7636 
   7637 static void
   7638 wm_start_locked(struct ifnet *ifp)
   7639 {
   7640 	struct wm_softc *sc = ifp->if_softc;
   7641 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7642 
   7643 	wm_send_common_locked(ifp, txq, false);
   7644 }
   7645 
   7646 static int
   7647 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7648 {
   7649 	int qid;
   7650 	struct wm_softc *sc = ifp->if_softc;
   7651 	struct wm_txqueue *txq;
   7652 
   7653 	qid = wm_select_txqueue(ifp, m);
   7654 	txq = &sc->sc_queue[qid].wmq_txq;
   7655 
   7656 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7657 		m_freem(m);
   7658 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7659 		return ENOBUFS;
   7660 	}
   7661 
   7662 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   7663 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   7664 	if (m->m_flags & M_MCAST)
   7665 		if_statinc_ref(nsr, if_omcasts);
   7666 	IF_STAT_PUTREF(ifp);
   7667 
   7668 	if (mutex_tryenter(txq->txq_lock)) {
   7669 		if (!txq->txq_stopping)
   7670 			wm_transmit_locked(ifp, txq);
   7671 		mutex_exit(txq->txq_lock);
   7672 	}
   7673 
   7674 	return 0;
   7675 }
   7676 
   7677 static void
   7678 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7679 {
   7680 
   7681 	wm_send_common_locked(ifp, txq, true);
   7682 }
   7683 
   7684 static void
   7685 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7686     bool is_transmit)
   7687 {
   7688 	struct wm_softc *sc = ifp->if_softc;
   7689 	struct mbuf *m0;
   7690 	struct wm_txsoft *txs;
   7691 	bus_dmamap_t dmamap;
   7692 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7693 	bus_addr_t curaddr;
   7694 	bus_size_t seglen, curlen;
   7695 	uint32_t cksumcmd;
   7696 	uint8_t cksumfields;
   7697 	bool remap = true;
   7698 
   7699 	KASSERT(mutex_owned(txq->txq_lock));
   7700 
   7701 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7702 		return;
   7703 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7704 		return;
   7705 
   7706 	/* Remember the previous number of free descriptors. */
   7707 	ofree = txq->txq_free;
   7708 
   7709 	/*
   7710 	 * Loop through the send queue, setting up transmit descriptors
   7711 	 * until we drain the queue, or use up all available transmit
   7712 	 * descriptors.
   7713 	 */
   7714 	for (;;) {
   7715 		m0 = NULL;
   7716 
   7717 		/* Get a work queue entry. */
   7718 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7719 			wm_txeof(txq, UINT_MAX);
   7720 			if (txq->txq_sfree == 0) {
   7721 				DPRINTF(WM_DEBUG_TX,
   7722 				    ("%s: TX: no free job descriptors\n",
   7723 					device_xname(sc->sc_dev)));
   7724 				WM_Q_EVCNT_INCR(txq, txsstall);
   7725 				break;
   7726 			}
   7727 		}
   7728 
   7729 		/* Grab a packet off the queue. */
   7730 		if (is_transmit)
   7731 			m0 = pcq_get(txq->txq_interq);
   7732 		else
   7733 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7734 		if (m0 == NULL)
   7735 			break;
   7736 
   7737 		DPRINTF(WM_DEBUG_TX,
   7738 		    ("%s: TX: have packet to transmit: %p\n",
   7739 			device_xname(sc->sc_dev), m0));
   7740 
   7741 		txs = &txq->txq_soft[txq->txq_snext];
   7742 		dmamap = txs->txs_dmamap;
   7743 
   7744 		use_tso = (m0->m_pkthdr.csum_flags &
   7745 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7746 
   7747 		/*
   7748 		 * So says the Linux driver:
   7749 		 * The controller does a simple calculation to make sure
   7750 		 * there is enough room in the FIFO before initiating the
   7751 		 * DMA for each buffer. The calc is:
   7752 		 *	4 = ceil(buffer len / MSS)
   7753 		 * To make sure we don't overrun the FIFO, adjust the max
   7754 		 * buffer len if the MSS drops.
   7755 		 */
   7756 		dmamap->dm_maxsegsz =
   7757 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7758 		    ? m0->m_pkthdr.segsz << 2
   7759 		    : WTX_MAX_LEN;
   7760 
   7761 		/*
   7762 		 * Load the DMA map.  If this fails, the packet either
   7763 		 * didn't fit in the allotted number of segments, or we
   7764 		 * were short on resources.  For the too-many-segments
   7765 		 * case, we simply report an error and drop the packet,
   7766 		 * since we can't sanely copy a jumbo packet to a single
   7767 		 * buffer.
   7768 		 */
   7769 retry:
   7770 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7771 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7772 		if (__predict_false(error)) {
   7773 			if (error == EFBIG) {
   7774 				if (remap == true) {
   7775 					struct mbuf *m;
   7776 
   7777 					remap = false;
   7778 					m = m_defrag(m0, M_NOWAIT);
   7779 					if (m != NULL) {
   7780 						WM_Q_EVCNT_INCR(txq, defrag);
   7781 						m0 = m;
   7782 						goto retry;
   7783 					}
   7784 				}
   7785 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7786 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7787 				    "DMA segments, dropping...\n",
   7788 				    device_xname(sc->sc_dev));
   7789 				wm_dump_mbuf_chain(sc, m0);
   7790 				m_freem(m0);
   7791 				continue;
   7792 			}
   7793 			/* Short on resources, just stop for now. */
   7794 			DPRINTF(WM_DEBUG_TX,
   7795 			    ("%s: TX: dmamap load failed: %d\n",
   7796 				device_xname(sc->sc_dev), error));
   7797 			break;
   7798 		}
   7799 
   7800 		segs_needed = dmamap->dm_nsegs;
   7801 		if (use_tso) {
   7802 			/* For sentinel descriptor; see below. */
   7803 			segs_needed++;
   7804 		}
   7805 
   7806 		/*
   7807 		 * Ensure we have enough descriptors free to describe
   7808 		 * the packet. Note, we always reserve one descriptor
   7809 		 * at the end of the ring due to the semantics of the
   7810 		 * TDT register, plus one more in the event we need
   7811 		 * to load offload context.
   7812 		 */
   7813 		if (segs_needed > txq->txq_free - 2) {
   7814 			/*
   7815 			 * Not enough free descriptors to transmit this
   7816 			 * packet.  We haven't committed anything yet,
   7817 			 * so just unload the DMA map, put the packet
   7818 			 * pack on the queue, and punt. Notify the upper
   7819 			 * layer that there are no more slots left.
   7820 			 */
   7821 			DPRINTF(WM_DEBUG_TX,
   7822 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7823 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7824 				segs_needed, txq->txq_free - 1));
   7825 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7826 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7827 			WM_Q_EVCNT_INCR(txq, txdstall);
   7828 			break;
   7829 		}
   7830 
   7831 		/*
   7832 		 * Check for 82547 Tx FIFO bug. We need to do this
   7833 		 * once we know we can transmit the packet, since we
   7834 		 * do some internal FIFO space accounting here.
   7835 		 */
   7836 		if (sc->sc_type == WM_T_82547 &&
   7837 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7838 			DPRINTF(WM_DEBUG_TX,
   7839 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7840 				device_xname(sc->sc_dev)));
   7841 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7842 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7843 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7844 			break;
   7845 		}
   7846 
   7847 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7848 
   7849 		DPRINTF(WM_DEBUG_TX,
   7850 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7851 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7852 
   7853 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7854 
   7855 		/*
   7856 		 * Store a pointer to the packet so that we can free it
   7857 		 * later.
   7858 		 *
   7859 		 * Initially, we consider the number of descriptors the
   7860 		 * packet uses the number of DMA segments.  This may be
   7861 		 * incremented by 1 if we do checksum offload (a descriptor
   7862 		 * is used to set the checksum context).
   7863 		 */
   7864 		txs->txs_mbuf = m0;
   7865 		txs->txs_firstdesc = txq->txq_next;
   7866 		txs->txs_ndesc = segs_needed;
   7867 
   7868 		/* Set up offload parameters for this packet. */
   7869 		if (m0->m_pkthdr.csum_flags &
   7870 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7871 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7872 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7873 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   7874 		} else {
   7875  			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7876  			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   7877 			cksumcmd = 0;
   7878 			cksumfields = 0;
   7879 		}
   7880 
   7881 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7882 
   7883 		/* Sync the DMA map. */
   7884 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7885 		    BUS_DMASYNC_PREWRITE);
   7886 
   7887 		/* Initialize the transmit descriptor. */
   7888 		for (nexttx = txq->txq_next, seg = 0;
   7889 		     seg < dmamap->dm_nsegs; seg++) {
   7890 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7891 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7892 			     seglen != 0;
   7893 			     curaddr += curlen, seglen -= curlen,
   7894 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7895 				curlen = seglen;
   7896 
   7897 				/*
   7898 				 * So says the Linux driver:
   7899 				 * Work around for premature descriptor
   7900 				 * write-backs in TSO mode.  Append a
   7901 				 * 4-byte sentinel descriptor.
   7902 				 */
   7903 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7904 				    curlen > 8)
   7905 					curlen -= 4;
   7906 
   7907 				wm_set_dma_addr(
   7908 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7909 				txq->txq_descs[nexttx].wtx_cmdlen
   7910 				    = htole32(cksumcmd | curlen);
   7911 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7912 				    = 0;
   7913 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7914 				    = cksumfields;
   7915 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7916 				lasttx = nexttx;
   7917 
   7918 				DPRINTF(WM_DEBUG_TX,
   7919 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7920 					"len %#04zx\n",
   7921 					device_xname(sc->sc_dev), nexttx,
   7922 					(uint64_t)curaddr, curlen));
   7923 			}
   7924 		}
   7925 
   7926 		KASSERT(lasttx != -1);
   7927 
   7928 		/*
   7929 		 * Set up the command byte on the last descriptor of
   7930 		 * the packet. If we're in the interrupt delay window,
   7931 		 * delay the interrupt.
   7932 		 */
   7933 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7934 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7935 
   7936 		/*
   7937 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7938 		 * up the descriptor to encapsulate the packet for us.
   7939 		 *
   7940 		 * This is only valid on the last descriptor of the packet.
   7941 		 */
   7942 		if (vlan_has_tag(m0)) {
   7943 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7944 			    htole32(WTX_CMD_VLE);
   7945 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7946 			    = htole16(vlan_get_tag(m0));
   7947 		}
   7948 
   7949 		txs->txs_lastdesc = lasttx;
   7950 
   7951 		DPRINTF(WM_DEBUG_TX,
   7952 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7953 			device_xname(sc->sc_dev),
   7954 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7955 
   7956 		/* Sync the descriptors we're using. */
   7957 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7958 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7959 
   7960 		/* Give the packet to the chip. */
   7961 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7962 
   7963 		DPRINTF(WM_DEBUG_TX,
   7964 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7965 
   7966 		DPRINTF(WM_DEBUG_TX,
   7967 		    ("%s: TX: finished transmitting packet, job %d\n",
   7968 			device_xname(sc->sc_dev), txq->txq_snext));
   7969 
   7970 		/* Advance the tx pointer. */
   7971 		txq->txq_free -= txs->txs_ndesc;
   7972 		txq->txq_next = nexttx;
   7973 
   7974 		txq->txq_sfree--;
   7975 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7976 
   7977 		/* Pass the packet to any BPF listeners. */
   7978 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7979 	}
   7980 
   7981 	if (m0 != NULL) {
   7982 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7983 		WM_Q_EVCNT_INCR(txq, descdrop);
   7984 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7985 			__func__));
   7986 		m_freem(m0);
   7987 	}
   7988 
   7989 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7990 		/* No more slots; notify upper layer. */
   7991 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7992 	}
   7993 
   7994 	if (txq->txq_free != ofree) {
   7995 		/* Set a watchdog timer in case the chip flakes out. */
   7996 		txq->txq_lastsent = time_uptime;
   7997 		txq->txq_sending = true;
   7998 	}
   7999 }
   8000 
   8001 /*
   8002  * wm_nq_tx_offload:
   8003  *
   8004  *	Set up TCP/IP checksumming parameters for the
   8005  *	specified packet, for NEWQUEUE devices
   8006  */
   8007 static void
   8008 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8009     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8010 {
   8011 	struct mbuf *m0 = txs->txs_mbuf;
   8012 	uint32_t vl_len, mssidx, cmdc;
   8013 	struct ether_header *eh;
   8014 	int offset, iphl;
   8015 
   8016 	/*
   8017 	 * XXX It would be nice if the mbuf pkthdr had offset
   8018 	 * fields for the protocol headers.
   8019 	 */
   8020 	*cmdlenp = 0;
   8021 	*fieldsp = 0;
   8022 
   8023 	eh = mtod(m0, struct ether_header *);
   8024 	switch (htons(eh->ether_type)) {
   8025 	case ETHERTYPE_IP:
   8026 	case ETHERTYPE_IPV6:
   8027 		offset = ETHER_HDR_LEN;
   8028 		break;
   8029 
   8030 	case ETHERTYPE_VLAN:
   8031 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8032 		break;
   8033 
   8034 	default:
   8035 		/* Don't support this protocol or encapsulation. */
   8036 		*do_csum = false;
   8037 		return;
   8038 	}
   8039 	*do_csum = true;
   8040 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8041 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8042 
   8043 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8044 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8045 
   8046 	if ((m0->m_pkthdr.csum_flags &
   8047 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8048 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8049 	} else {
   8050 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8051 	}
   8052 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8053 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8054 
   8055 	if (vlan_has_tag(m0)) {
   8056 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8057 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8058 		*cmdlenp |= NQTX_CMD_VLE;
   8059 	}
   8060 
   8061 	mssidx = 0;
   8062 
   8063 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8064 		int hlen = offset + iphl;
   8065 		int tcp_hlen;
   8066 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8067 
   8068 		if (__predict_false(m0->m_len <
   8069 				    (hlen + sizeof(struct tcphdr)))) {
   8070 			/*
   8071 			 * TCP/IP headers are not in the first mbuf; we need
   8072 			 * to do this the slow and painful way. Let's just
   8073 			 * hope this doesn't happen very often.
   8074 			 */
   8075 			struct tcphdr th;
   8076 
   8077 			WM_Q_EVCNT_INCR(txq, tsopain);
   8078 
   8079 			m_copydata(m0, hlen, sizeof(th), &th);
   8080 			if (v4) {
   8081 				struct ip ip;
   8082 
   8083 				m_copydata(m0, offset, sizeof(ip), &ip);
   8084 				ip.ip_len = 0;
   8085 				m_copyback(m0,
   8086 				    offset + offsetof(struct ip, ip_len),
   8087 				    sizeof(ip.ip_len), &ip.ip_len);
   8088 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8089 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8090 			} else {
   8091 				struct ip6_hdr ip6;
   8092 
   8093 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8094 				ip6.ip6_plen = 0;
   8095 				m_copyback(m0,
   8096 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8097 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8098 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8099 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8100 			}
   8101 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8102 			    sizeof(th.th_sum), &th.th_sum);
   8103 
   8104 			tcp_hlen = th.th_off << 2;
   8105 		} else {
   8106 			/*
   8107 			 * TCP/IP headers are in the first mbuf; we can do
   8108 			 * this the easy way.
   8109 			 */
   8110 			struct tcphdr *th;
   8111 
   8112 			if (v4) {
   8113 				struct ip *ip =
   8114 				    (void *)(mtod(m0, char *) + offset);
   8115 				th = (void *)(mtod(m0, char *) + hlen);
   8116 
   8117 				ip->ip_len = 0;
   8118 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8119 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8120 			} else {
   8121 				struct ip6_hdr *ip6 =
   8122 				    (void *)(mtod(m0, char *) + offset);
   8123 				th = (void *)(mtod(m0, char *) + hlen);
   8124 
   8125 				ip6->ip6_plen = 0;
   8126 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8127 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8128 			}
   8129 			tcp_hlen = th->th_off << 2;
   8130 		}
   8131 		hlen += tcp_hlen;
   8132 		*cmdlenp |= NQTX_CMD_TSE;
   8133 
   8134 		if (v4) {
   8135 			WM_Q_EVCNT_INCR(txq, tso);
   8136 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8137 		} else {
   8138 			WM_Q_EVCNT_INCR(txq, tso6);
   8139 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8140 		}
   8141 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8142 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8143 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8144 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8145 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8146 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8147 	} else {
   8148 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8149 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8150 	}
   8151 
   8152 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8153 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8154 		cmdc |= NQTXC_CMD_IP4;
   8155 	}
   8156 
   8157 	if (m0->m_pkthdr.csum_flags &
   8158 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8159 		WM_Q_EVCNT_INCR(txq, tusum);
   8160 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8161 			cmdc |= NQTXC_CMD_TCP;
   8162 		else
   8163 			cmdc |= NQTXC_CMD_UDP;
   8164 
   8165 		cmdc |= NQTXC_CMD_IP4;
   8166 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8167 	}
   8168 	if (m0->m_pkthdr.csum_flags &
   8169 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8170 		WM_Q_EVCNT_INCR(txq, tusum6);
   8171 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8172 			cmdc |= NQTXC_CMD_TCP;
   8173 		else
   8174 			cmdc |= NQTXC_CMD_UDP;
   8175 
   8176 		cmdc |= NQTXC_CMD_IP6;
   8177 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8178 	}
   8179 
   8180 	/*
   8181 	 * We don't have to write context descriptor for every packet to
   8182 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8183 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8184 	 * controllers.
   8185 	 * It would be overhead to write context descriptor for every packet,
   8186 	 * however it does not cause problems.
   8187 	 */
   8188 	/* Fill in the context descriptor. */
   8189 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8190 	    htole32(vl_len);
   8191 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8192 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8193 	    htole32(cmdc);
   8194 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8195 	    htole32(mssidx);
   8196 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8197 	DPRINTF(WM_DEBUG_TX,
   8198 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8199 		txq->txq_next, 0, vl_len));
   8200 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8201 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8202 	txs->txs_ndesc++;
   8203 }
   8204 
   8205 /*
   8206  * wm_nq_start:		[ifnet interface function]
   8207  *
   8208  *	Start packet transmission on the interface for NEWQUEUE devices
   8209  */
   8210 static void
   8211 wm_nq_start(struct ifnet *ifp)
   8212 {
   8213 	struct wm_softc *sc = ifp->if_softc;
   8214 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8215 
   8216 #ifdef WM_MPSAFE
   8217 	KASSERT(if_is_mpsafe(ifp));
   8218 #endif
   8219 	/*
   8220 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8221 	 */
   8222 
   8223 	mutex_enter(txq->txq_lock);
   8224 	if (!txq->txq_stopping)
   8225 		wm_nq_start_locked(ifp);
   8226 	mutex_exit(txq->txq_lock);
   8227 }
   8228 
   8229 static void
   8230 wm_nq_start_locked(struct ifnet *ifp)
   8231 {
   8232 	struct wm_softc *sc = ifp->if_softc;
   8233 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8234 
   8235 	wm_nq_send_common_locked(ifp, txq, false);
   8236 }
   8237 
   8238 static int
   8239 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8240 {
   8241 	int qid;
   8242 	struct wm_softc *sc = ifp->if_softc;
   8243 	struct wm_txqueue *txq;
   8244 
   8245 	qid = wm_select_txqueue(ifp, m);
   8246 	txq = &sc->sc_queue[qid].wmq_txq;
   8247 
   8248 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8249 		m_freem(m);
   8250 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8251 		return ENOBUFS;
   8252 	}
   8253 
   8254 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8255 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8256 	if (m->m_flags & M_MCAST)
   8257 		if_statinc_ref(nsr, if_omcasts);
   8258 	IF_STAT_PUTREF(ifp);
   8259 
   8260 	/*
   8261 	 * The situations which this mutex_tryenter() fails at running time
   8262 	 * are below two patterns.
   8263 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8264 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8265 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8266 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8267 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8268 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8269 	 * stuck, either.
   8270 	 */
   8271 	if (mutex_tryenter(txq->txq_lock)) {
   8272 		if (!txq->txq_stopping)
   8273 			wm_nq_transmit_locked(ifp, txq);
   8274 		mutex_exit(txq->txq_lock);
   8275 	}
   8276 
   8277 	return 0;
   8278 }
   8279 
   8280 static void
   8281 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8282 {
   8283 
   8284 	wm_nq_send_common_locked(ifp, txq, true);
   8285 }
   8286 
   8287 static void
   8288 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8289     bool is_transmit)
   8290 {
   8291 	struct wm_softc *sc = ifp->if_softc;
   8292 	struct mbuf *m0;
   8293 	struct wm_txsoft *txs;
   8294 	bus_dmamap_t dmamap;
   8295 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8296 	bool do_csum, sent;
   8297 	bool remap = true;
   8298 
   8299 	KASSERT(mutex_owned(txq->txq_lock));
   8300 
   8301 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8302 		return;
   8303 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8304 		return;
   8305 
   8306 	sent = false;
   8307 
   8308 	/*
   8309 	 * Loop through the send queue, setting up transmit descriptors
   8310 	 * until we drain the queue, or use up all available transmit
   8311 	 * descriptors.
   8312 	 */
   8313 	for (;;) {
   8314 		m0 = NULL;
   8315 
   8316 		/* Get a work queue entry. */
   8317 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8318 			wm_txeof(txq, UINT_MAX);
   8319 			if (txq->txq_sfree == 0) {
   8320 				DPRINTF(WM_DEBUG_TX,
   8321 				    ("%s: TX: no free job descriptors\n",
   8322 					device_xname(sc->sc_dev)));
   8323 				WM_Q_EVCNT_INCR(txq, txsstall);
   8324 				break;
   8325 			}
   8326 		}
   8327 
   8328 		/* Grab a packet off the queue. */
   8329 		if (is_transmit)
   8330 			m0 = pcq_get(txq->txq_interq);
   8331 		else
   8332 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8333 		if (m0 == NULL)
   8334 			break;
   8335 
   8336 		DPRINTF(WM_DEBUG_TX,
   8337 		    ("%s: TX: have packet to transmit: %p\n",
   8338 		    device_xname(sc->sc_dev), m0));
   8339 
   8340 		txs = &txq->txq_soft[txq->txq_snext];
   8341 		dmamap = txs->txs_dmamap;
   8342 
   8343 		/*
   8344 		 * Load the DMA map.  If this fails, the packet either
   8345 		 * didn't fit in the allotted number of segments, or we
   8346 		 * were short on resources.  For the too-many-segments
   8347 		 * case, we simply report an error and drop the packet,
   8348 		 * since we can't sanely copy a jumbo packet to a single
   8349 		 * buffer.
   8350 		 */
   8351 retry:
   8352 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8353 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8354 		if (__predict_false(error)) {
   8355 			if (error == EFBIG) {
   8356 				if (remap == true) {
   8357 					struct mbuf *m;
   8358 
   8359 					remap = false;
   8360 					m = m_defrag(m0, M_NOWAIT);
   8361 					if (m != NULL) {
   8362 						WM_Q_EVCNT_INCR(txq, defrag);
   8363 						m0 = m;
   8364 						goto retry;
   8365 					}
   8366 				}
   8367 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8368 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8369 				    "DMA segments, dropping...\n",
   8370 				    device_xname(sc->sc_dev));
   8371 				wm_dump_mbuf_chain(sc, m0);
   8372 				m_freem(m0);
   8373 				continue;
   8374 			}
   8375 			/* Short on resources, just stop for now. */
   8376 			DPRINTF(WM_DEBUG_TX,
   8377 			    ("%s: TX: dmamap load failed: %d\n",
   8378 				device_xname(sc->sc_dev), error));
   8379 			break;
   8380 		}
   8381 
   8382 		segs_needed = dmamap->dm_nsegs;
   8383 
   8384 		/*
   8385 		 * Ensure we have enough descriptors free to describe
   8386 		 * the packet. Note, we always reserve one descriptor
   8387 		 * at the end of the ring due to the semantics of the
   8388 		 * TDT register, plus one more in the event we need
   8389 		 * to load offload context.
   8390 		 */
   8391 		if (segs_needed > txq->txq_free - 2) {
   8392 			/*
   8393 			 * Not enough free descriptors to transmit this
   8394 			 * packet.  We haven't committed anything yet,
   8395 			 * so just unload the DMA map, put the packet
   8396 			 * pack on the queue, and punt. Notify the upper
   8397 			 * layer that there are no more slots left.
   8398 			 */
   8399 			DPRINTF(WM_DEBUG_TX,
   8400 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8401 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8402 				segs_needed, txq->txq_free - 1));
   8403 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8404 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8405 			WM_Q_EVCNT_INCR(txq, txdstall);
   8406 			break;
   8407 		}
   8408 
   8409 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8410 
   8411 		DPRINTF(WM_DEBUG_TX,
   8412 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8413 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8414 
   8415 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8416 
   8417 		/*
   8418 		 * Store a pointer to the packet so that we can free it
   8419 		 * later.
   8420 		 *
   8421 		 * Initially, we consider the number of descriptors the
   8422 		 * packet uses the number of DMA segments.  This may be
   8423 		 * incremented by 1 if we do checksum offload (a descriptor
   8424 		 * is used to set the checksum context).
   8425 		 */
   8426 		txs->txs_mbuf = m0;
   8427 		txs->txs_firstdesc = txq->txq_next;
   8428 		txs->txs_ndesc = segs_needed;
   8429 
   8430 		/* Set up offload parameters for this packet. */
   8431 		uint32_t cmdlen, fields, dcmdlen;
   8432 		if (m0->m_pkthdr.csum_flags &
   8433 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8434 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8435 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8436 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8437 			    &do_csum);
   8438 		} else {
   8439 			do_csum = false;
   8440 			cmdlen = 0;
   8441 			fields = 0;
   8442 		}
   8443 
   8444 		/* Sync the DMA map. */
   8445 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8446 		    BUS_DMASYNC_PREWRITE);
   8447 
   8448 		/* Initialize the first transmit descriptor. */
   8449 		nexttx = txq->txq_next;
   8450 		if (!do_csum) {
   8451 			/* Setup a legacy descriptor */
   8452 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8453 			    dmamap->dm_segs[0].ds_addr);
   8454 			txq->txq_descs[nexttx].wtx_cmdlen =
   8455 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8456 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8457 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8458 			if (vlan_has_tag(m0)) {
   8459 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8460 				    htole32(WTX_CMD_VLE);
   8461 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8462 				    htole16(vlan_get_tag(m0));
   8463 			} else
   8464 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8465 
   8466 			dcmdlen = 0;
   8467 		} else {
   8468 			/* Setup an advanced data descriptor */
   8469 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8470 			    htole64(dmamap->dm_segs[0].ds_addr);
   8471 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8472 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8473 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   8474 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8475 			    htole32(fields);
   8476 			DPRINTF(WM_DEBUG_TX,
   8477 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8478 				device_xname(sc->sc_dev), nexttx,
   8479 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8480 			DPRINTF(WM_DEBUG_TX,
   8481 			    ("\t 0x%08x%08x\n", fields,
   8482 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8483 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8484 		}
   8485 
   8486 		lasttx = nexttx;
   8487 		nexttx = WM_NEXTTX(txq, nexttx);
   8488 		/*
   8489 		 * Fill in the next descriptors. legacy or advanced format
   8490 		 * is the same here
   8491 		 */
   8492 		for (seg = 1; seg < dmamap->dm_nsegs;
   8493 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8494 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8495 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8496 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8497 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8498 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8499 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8500 			lasttx = nexttx;
   8501 
   8502 			DPRINTF(WM_DEBUG_TX,
   8503 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8504 				device_xname(sc->sc_dev), nexttx,
   8505 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8506 				dmamap->dm_segs[seg].ds_len));
   8507 		}
   8508 
   8509 		KASSERT(lasttx != -1);
   8510 
   8511 		/*
   8512 		 * Set up the command byte on the last descriptor of
   8513 		 * the packet. If we're in the interrupt delay window,
   8514 		 * delay the interrupt.
   8515 		 */
   8516 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8517 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8518 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8519 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8520 
   8521 		txs->txs_lastdesc = lasttx;
   8522 
   8523 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8524 		    device_xname(sc->sc_dev),
   8525 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8526 
   8527 		/* Sync the descriptors we're using. */
   8528 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8529 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8530 
   8531 		/* Give the packet to the chip. */
   8532 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8533 		sent = true;
   8534 
   8535 		DPRINTF(WM_DEBUG_TX,
   8536 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8537 
   8538 		DPRINTF(WM_DEBUG_TX,
   8539 		    ("%s: TX: finished transmitting packet, job %d\n",
   8540 			device_xname(sc->sc_dev), txq->txq_snext));
   8541 
   8542 		/* Advance the tx pointer. */
   8543 		txq->txq_free -= txs->txs_ndesc;
   8544 		txq->txq_next = nexttx;
   8545 
   8546 		txq->txq_sfree--;
   8547 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8548 
   8549 		/* Pass the packet to any BPF listeners. */
   8550 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8551 	}
   8552 
   8553 	if (m0 != NULL) {
   8554 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8555 		WM_Q_EVCNT_INCR(txq, descdrop);
   8556 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8557 			__func__));
   8558 		m_freem(m0);
   8559 	}
   8560 
   8561 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8562 		/* No more slots; notify upper layer. */
   8563 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8564 	}
   8565 
   8566 	if (sent) {
   8567 		/* Set a watchdog timer in case the chip flakes out. */
   8568 		txq->txq_lastsent = time_uptime;
   8569 		txq->txq_sending = true;
   8570 	}
   8571 }
   8572 
   8573 static void
   8574 wm_deferred_start_locked(struct wm_txqueue *txq)
   8575 {
   8576 	struct wm_softc *sc = txq->txq_sc;
   8577 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8578 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8579 	int qid = wmq->wmq_id;
   8580 
   8581 	KASSERT(mutex_owned(txq->txq_lock));
   8582 
   8583 	if (txq->txq_stopping) {
   8584 		mutex_exit(txq->txq_lock);
   8585 		return;
   8586 	}
   8587 
   8588 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8589 		/* XXX need for ALTQ or one CPU system */
   8590 		if (qid == 0)
   8591 			wm_nq_start_locked(ifp);
   8592 		wm_nq_transmit_locked(ifp, txq);
   8593 	} else {
   8594 		/* XXX need for ALTQ or one CPU system */
   8595 		if (qid == 0)
   8596 			wm_start_locked(ifp);
   8597 		wm_transmit_locked(ifp, txq);
   8598 	}
   8599 }
   8600 
   8601 /* Interrupt */
   8602 
   8603 /*
   8604  * wm_txeof:
   8605  *
   8606  *	Helper; handle transmit interrupts.
   8607  */
   8608 static bool
   8609 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8610 {
   8611 	struct wm_softc *sc = txq->txq_sc;
   8612 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8613 	struct wm_txsoft *txs;
   8614 	int count = 0;
   8615 	int i;
   8616 	uint8_t status;
   8617 	bool more = false;
   8618 
   8619 	KASSERT(mutex_owned(txq->txq_lock));
   8620 
   8621 	if (txq->txq_stopping)
   8622 		return false;
   8623 
   8624 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8625 
   8626 	/*
   8627 	 * Go through the Tx list and free mbufs for those
   8628 	 * frames which have been transmitted.
   8629 	 */
   8630 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8631 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8632 		if (limit-- == 0) {
   8633 			more = true;
   8634 			DPRINTF(WM_DEBUG_TX,
   8635 			    ("%s: TX: loop limited, job %d is not processed\n",
   8636 				device_xname(sc->sc_dev), i));
   8637 			break;
   8638 		}
   8639 
   8640 		txs = &txq->txq_soft[i];
   8641 
   8642 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8643 			device_xname(sc->sc_dev), i));
   8644 
   8645 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8646 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8647 
   8648 		status =
   8649 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8650 		if ((status & WTX_ST_DD) == 0) {
   8651 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8652 			    BUS_DMASYNC_PREREAD);
   8653 			break;
   8654 		}
   8655 
   8656 		count++;
   8657 		DPRINTF(WM_DEBUG_TX,
   8658 		    ("%s: TX: job %d done: descs %d..%d\n",
   8659 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8660 		    txs->txs_lastdesc));
   8661 
   8662 		/*
   8663 		 * XXX We should probably be using the statistics
   8664 		 * XXX registers, but I don't know if they exist
   8665 		 * XXX on chips before the i82544.
   8666 		 */
   8667 
   8668 #ifdef WM_EVENT_COUNTERS
   8669 		if (status & WTX_ST_TU)
   8670 			WM_Q_EVCNT_INCR(txq, underrun);
   8671 #endif /* WM_EVENT_COUNTERS */
   8672 
   8673 		/*
   8674 		 * 82574 and newer's document says the status field has neither
   8675 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8676 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8677 		 * Developer's Manual", 82574 datasheet and newer.
   8678 		 *
   8679 		 * XXX I saw the LC bit was set on I218 even though the media
   8680 		 * was full duplex, so the bit might be used for other
   8681 		 * meaning ...(I have no document).
   8682 		 */
   8683 
   8684 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8685 		    && ((sc->sc_type < WM_T_82574)
   8686 			|| (sc->sc_type == WM_T_80003))) {
   8687 			if_statinc(ifp, if_oerrors);
   8688 			if (status & WTX_ST_LC)
   8689 				log(LOG_WARNING, "%s: late collision\n",
   8690 				    device_xname(sc->sc_dev));
   8691 			else if (status & WTX_ST_EC) {
   8692 				if_statadd(ifp, if_collisions,
   8693 				    TX_COLLISION_THRESHOLD + 1);
   8694 				log(LOG_WARNING, "%s: excessive collisions\n",
   8695 				    device_xname(sc->sc_dev));
   8696 			}
   8697 		} else
   8698 			if_statinc(ifp, if_opackets);
   8699 
   8700 		txq->txq_packets++;
   8701 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8702 
   8703 		txq->txq_free += txs->txs_ndesc;
   8704 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8705 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8706 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8707 		m_freem(txs->txs_mbuf);
   8708 		txs->txs_mbuf = NULL;
   8709 	}
   8710 
   8711 	/* Update the dirty transmit buffer pointer. */
   8712 	txq->txq_sdirty = i;
   8713 	DPRINTF(WM_DEBUG_TX,
   8714 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8715 
   8716 	if (count != 0)
   8717 		rnd_add_uint32(&sc->rnd_source, count);
   8718 
   8719 	/*
   8720 	 * If there are no more pending transmissions, cancel the watchdog
   8721 	 * timer.
   8722 	 */
   8723 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8724 		txq->txq_sending = false;
   8725 
   8726 	return more;
   8727 }
   8728 
   8729 static inline uint32_t
   8730 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8731 {
   8732 	struct wm_softc *sc = rxq->rxq_sc;
   8733 
   8734 	if (sc->sc_type == WM_T_82574)
   8735 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8736 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8737 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8738 	else
   8739 		return rxq->rxq_descs[idx].wrx_status;
   8740 }
   8741 
   8742 static inline uint32_t
   8743 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8744 {
   8745 	struct wm_softc *sc = rxq->rxq_sc;
   8746 
   8747 	if (sc->sc_type == WM_T_82574)
   8748 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8749 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8750 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8751 	else
   8752 		return rxq->rxq_descs[idx].wrx_errors;
   8753 }
   8754 
   8755 static inline uint16_t
   8756 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8757 {
   8758 	struct wm_softc *sc = rxq->rxq_sc;
   8759 
   8760 	if (sc->sc_type == WM_T_82574)
   8761 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8762 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8763 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8764 	else
   8765 		return rxq->rxq_descs[idx].wrx_special;
   8766 }
   8767 
   8768 static inline int
   8769 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8770 {
   8771 	struct wm_softc *sc = rxq->rxq_sc;
   8772 
   8773 	if (sc->sc_type == WM_T_82574)
   8774 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8775 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8776 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8777 	else
   8778 		return rxq->rxq_descs[idx].wrx_len;
   8779 }
   8780 
   8781 #ifdef WM_DEBUG
   8782 static inline uint32_t
   8783 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8784 {
   8785 	struct wm_softc *sc = rxq->rxq_sc;
   8786 
   8787 	if (sc->sc_type == WM_T_82574)
   8788 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8789 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8790 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8791 	else
   8792 		return 0;
   8793 }
   8794 
   8795 static inline uint8_t
   8796 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8797 {
   8798 	struct wm_softc *sc = rxq->rxq_sc;
   8799 
   8800 	if (sc->sc_type == WM_T_82574)
   8801 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8802 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8803 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8804 	else
   8805 		return 0;
   8806 }
   8807 #endif /* WM_DEBUG */
   8808 
   8809 static inline bool
   8810 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8811     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8812 {
   8813 
   8814 	if (sc->sc_type == WM_T_82574)
   8815 		return (status & ext_bit) != 0;
   8816 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8817 		return (status & nq_bit) != 0;
   8818 	else
   8819 		return (status & legacy_bit) != 0;
   8820 }
   8821 
   8822 static inline bool
   8823 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8824     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8825 {
   8826 
   8827 	if (sc->sc_type == WM_T_82574)
   8828 		return (error & ext_bit) != 0;
   8829 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8830 		return (error & nq_bit) != 0;
   8831 	else
   8832 		return (error & legacy_bit) != 0;
   8833 }
   8834 
   8835 static inline bool
   8836 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8837 {
   8838 
   8839 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8840 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8841 		return true;
   8842 	else
   8843 		return false;
   8844 }
   8845 
   8846 static inline bool
   8847 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8848 {
   8849 	struct wm_softc *sc = rxq->rxq_sc;
   8850 
   8851 	/* XXX missing error bit for newqueue? */
   8852 	if (wm_rxdesc_is_set_error(sc, errors,
   8853 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8854 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8855 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8856 		NQRXC_ERROR_RXE)) {
   8857 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8858 		    EXTRXC_ERROR_SE, 0))
   8859 			log(LOG_WARNING, "%s: symbol error\n",
   8860 			    device_xname(sc->sc_dev));
   8861 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8862 		    EXTRXC_ERROR_SEQ, 0))
   8863 			log(LOG_WARNING, "%s: receive sequence error\n",
   8864 			    device_xname(sc->sc_dev));
   8865 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8866 		    EXTRXC_ERROR_CE, 0))
   8867 			log(LOG_WARNING, "%s: CRC error\n",
   8868 			    device_xname(sc->sc_dev));
   8869 		return true;
   8870 	}
   8871 
   8872 	return false;
   8873 }
   8874 
   8875 static inline bool
   8876 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8877 {
   8878 	struct wm_softc *sc = rxq->rxq_sc;
   8879 
   8880 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8881 		NQRXC_STATUS_DD)) {
   8882 		/* We have processed all of the receive descriptors. */
   8883 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8884 		return false;
   8885 	}
   8886 
   8887 	return true;
   8888 }
   8889 
   8890 static inline bool
   8891 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8892     uint16_t vlantag, struct mbuf *m)
   8893 {
   8894 
   8895 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8896 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8897 		vlan_set_tag(m, le16toh(vlantag));
   8898 	}
   8899 
   8900 	return true;
   8901 }
   8902 
   8903 static inline void
   8904 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8905     uint32_t errors, struct mbuf *m)
   8906 {
   8907 	struct wm_softc *sc = rxq->rxq_sc;
   8908 
   8909 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8910 		if (wm_rxdesc_is_set_status(sc, status,
   8911 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8912 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8913 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8914 			if (wm_rxdesc_is_set_error(sc, errors,
   8915 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8916 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8917 		}
   8918 		if (wm_rxdesc_is_set_status(sc, status,
   8919 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8920 			/*
   8921 			 * Note: we don't know if this was TCP or UDP,
   8922 			 * so we just set both bits, and expect the
   8923 			 * upper layers to deal.
   8924 			 */
   8925 			WM_Q_EVCNT_INCR(rxq, tusum);
   8926 			m->m_pkthdr.csum_flags |=
   8927 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8928 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8929 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8930 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8931 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8932 		}
   8933 	}
   8934 }
   8935 
   8936 /*
   8937  * wm_rxeof:
   8938  *
   8939  *	Helper; handle receive interrupts.
   8940  */
   8941 static bool
   8942 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8943 {
   8944 	struct wm_softc *sc = rxq->rxq_sc;
   8945 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8946 	struct wm_rxsoft *rxs;
   8947 	struct mbuf *m;
   8948 	int i, len;
   8949 	int count = 0;
   8950 	uint32_t status, errors;
   8951 	uint16_t vlantag;
   8952 	bool more = false;
   8953 
   8954 	KASSERT(mutex_owned(rxq->rxq_lock));
   8955 
   8956 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8957 		if (limit-- == 0) {
   8958 			rxq->rxq_ptr = i;
   8959 			more = true;
   8960 			DPRINTF(WM_DEBUG_RX,
   8961 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8962 				device_xname(sc->sc_dev), i));
   8963 			break;
   8964 		}
   8965 
   8966 		rxs = &rxq->rxq_soft[i];
   8967 
   8968 		DPRINTF(WM_DEBUG_RX,
   8969 		    ("%s: RX: checking descriptor %d\n",
   8970 			device_xname(sc->sc_dev), i));
   8971 		wm_cdrxsync(rxq, i,
   8972 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8973 
   8974 		status = wm_rxdesc_get_status(rxq, i);
   8975 		errors = wm_rxdesc_get_errors(rxq, i);
   8976 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8977 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8978 #ifdef WM_DEBUG
   8979 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8980 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8981 #endif
   8982 
   8983 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8984 			/*
   8985 			 * Update the receive pointer holding rxq_lock
   8986 			 * consistent with increment counter.
   8987 			 */
   8988 			rxq->rxq_ptr = i;
   8989 			break;
   8990 		}
   8991 
   8992 		count++;
   8993 		if (__predict_false(rxq->rxq_discard)) {
   8994 			DPRINTF(WM_DEBUG_RX,
   8995 			    ("%s: RX: discarding contents of descriptor %d\n",
   8996 				device_xname(sc->sc_dev), i));
   8997 			wm_init_rxdesc(rxq, i);
   8998 			if (wm_rxdesc_is_eop(rxq, status)) {
   8999 				/* Reset our state. */
   9000 				DPRINTF(WM_DEBUG_RX,
   9001 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9002 					device_xname(sc->sc_dev)));
   9003 				rxq->rxq_discard = 0;
   9004 			}
   9005 			continue;
   9006 		}
   9007 
   9008 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9009 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9010 
   9011 		m = rxs->rxs_mbuf;
   9012 
   9013 		/*
   9014 		 * Add a new receive buffer to the ring, unless of
   9015 		 * course the length is zero. Treat the latter as a
   9016 		 * failed mapping.
   9017 		 */
   9018 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9019 			/*
   9020 			 * Failed, throw away what we've done so
   9021 			 * far, and discard the rest of the packet.
   9022 			 */
   9023 			if_statinc(ifp, if_ierrors);
   9024 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9025 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9026 			wm_init_rxdesc(rxq, i);
   9027 			if (!wm_rxdesc_is_eop(rxq, status))
   9028 				rxq->rxq_discard = 1;
   9029 			if (rxq->rxq_head != NULL)
   9030 				m_freem(rxq->rxq_head);
   9031 			WM_RXCHAIN_RESET(rxq);
   9032 			DPRINTF(WM_DEBUG_RX,
   9033 			    ("%s: RX: Rx buffer allocation failed, "
   9034 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9035 				rxq->rxq_discard ? " (discard)" : ""));
   9036 			continue;
   9037 		}
   9038 
   9039 		m->m_len = len;
   9040 		rxq->rxq_len += len;
   9041 		DPRINTF(WM_DEBUG_RX,
   9042 		    ("%s: RX: buffer at %p len %d\n",
   9043 			device_xname(sc->sc_dev), m->m_data, len));
   9044 
   9045 		/* If this is not the end of the packet, keep looking. */
   9046 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9047 			WM_RXCHAIN_LINK(rxq, m);
   9048 			DPRINTF(WM_DEBUG_RX,
   9049 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9050 				device_xname(sc->sc_dev), rxq->rxq_len));
   9051 			continue;
   9052 		}
   9053 
   9054 		/*
   9055 		 * Okay, we have the entire packet now. The chip is
   9056 		 * configured to include the FCS except I350 and I21[01]
   9057 		 * (not all chips can be configured to strip it),
   9058 		 * so we need to trim it.
   9059 		 * May need to adjust length of previous mbuf in the
   9060 		 * chain if the current mbuf is too short.
   9061 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   9062 		 * is always set in I350, so we don't trim it.
   9063 		 */
   9064 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   9065 		    && (sc->sc_type != WM_T_I210)
   9066 		    && (sc->sc_type != WM_T_I211)) {
   9067 			if (m->m_len < ETHER_CRC_LEN) {
   9068 				rxq->rxq_tail->m_len
   9069 				    -= (ETHER_CRC_LEN - m->m_len);
   9070 				m->m_len = 0;
   9071 			} else
   9072 				m->m_len -= ETHER_CRC_LEN;
   9073 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9074 		} else
   9075 			len = rxq->rxq_len;
   9076 
   9077 		WM_RXCHAIN_LINK(rxq, m);
   9078 
   9079 		*rxq->rxq_tailp = NULL;
   9080 		m = rxq->rxq_head;
   9081 
   9082 		WM_RXCHAIN_RESET(rxq);
   9083 
   9084 		DPRINTF(WM_DEBUG_RX,
   9085 		    ("%s: RX: have entire packet, len -> %d\n",
   9086 			device_xname(sc->sc_dev), len));
   9087 
   9088 		/* If an error occurred, update stats and drop the packet. */
   9089 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9090 			m_freem(m);
   9091 			continue;
   9092 		}
   9093 
   9094 		/* No errors.  Receive the packet. */
   9095 		m_set_rcvif(m, ifp);
   9096 		m->m_pkthdr.len = len;
   9097 		/*
   9098 		 * TODO
   9099 		 * should be save rsshash and rsstype to this mbuf.
   9100 		 */
   9101 		DPRINTF(WM_DEBUG_RX,
   9102 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9103 			device_xname(sc->sc_dev), rsstype, rsshash));
   9104 
   9105 		/*
   9106 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9107 		 * for us.  Associate the tag with the packet.
   9108 		 */
   9109 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9110 			continue;
   9111 
   9112 		/* Set up checksum info for this packet. */
   9113 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9114 		/*
   9115 		 * Update the receive pointer holding rxq_lock consistent with
   9116 		 * increment counter.
   9117 		 */
   9118 		rxq->rxq_ptr = i;
   9119 		rxq->rxq_packets++;
   9120 		rxq->rxq_bytes += len;
   9121 		mutex_exit(rxq->rxq_lock);
   9122 
   9123 		/* Pass it on. */
   9124 		if_percpuq_enqueue(sc->sc_ipq, m);
   9125 
   9126 		mutex_enter(rxq->rxq_lock);
   9127 
   9128 		if (rxq->rxq_stopping)
   9129 			break;
   9130 	}
   9131 
   9132 	if (count != 0)
   9133 		rnd_add_uint32(&sc->rnd_source, count);
   9134 
   9135 	DPRINTF(WM_DEBUG_RX,
   9136 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9137 
   9138 	return more;
   9139 }
   9140 
   9141 /*
   9142  * wm_linkintr_gmii:
   9143  *
   9144  *	Helper; handle link interrupts for GMII.
   9145  */
   9146 static void
   9147 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9148 {
   9149 	device_t dev = sc->sc_dev;
   9150 	uint32_t status, reg;
   9151 	bool link;
   9152 	int rv;
   9153 
   9154 	KASSERT(WM_CORE_LOCKED(sc));
   9155 
   9156 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9157 		__func__));
   9158 
   9159 	if ((icr & ICR_LSC) == 0) {
   9160 		if (icr & ICR_RXSEQ)
   9161 			DPRINTF(WM_DEBUG_LINK,
   9162 			    ("%s: LINK Receive sequence error\n",
   9163 				device_xname(dev)));
   9164 		return;
   9165 	}
   9166 
   9167 	/* Link status changed */
   9168 	status = CSR_READ(sc, WMREG_STATUS);
   9169 	link = status & STATUS_LU;
   9170 	if (link) {
   9171 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9172 			device_xname(dev),
   9173 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9174 	} else {
   9175 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9176 			device_xname(dev)));
   9177 	}
   9178 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9179 		wm_gig_downshift_workaround_ich8lan(sc);
   9180 
   9181 	if ((sc->sc_type == WM_T_ICH8)
   9182 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9183 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9184 	}
   9185 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9186 		device_xname(dev)));
   9187 	mii_pollstat(&sc->sc_mii);
   9188 	if (sc->sc_type == WM_T_82543) {
   9189 		int miistatus, active;
   9190 
   9191 		/*
   9192 		 * With 82543, we need to force speed and
   9193 		 * duplex on the MAC equal to what the PHY
   9194 		 * speed and duplex configuration is.
   9195 		 */
   9196 		miistatus = sc->sc_mii.mii_media_status;
   9197 
   9198 		if (miistatus & IFM_ACTIVE) {
   9199 			active = sc->sc_mii.mii_media_active;
   9200 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9201 			switch (IFM_SUBTYPE(active)) {
   9202 			case IFM_10_T:
   9203 				sc->sc_ctrl |= CTRL_SPEED_10;
   9204 				break;
   9205 			case IFM_100_TX:
   9206 				sc->sc_ctrl |= CTRL_SPEED_100;
   9207 				break;
   9208 			case IFM_1000_T:
   9209 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9210 				break;
   9211 			default:
   9212 				/*
   9213 				 * Fiber?
   9214 				 * Shoud not enter here.
   9215 				 */
   9216 				device_printf(dev, "unknown media (%x)\n",
   9217 				    active);
   9218 				break;
   9219 			}
   9220 			if (active & IFM_FDX)
   9221 				sc->sc_ctrl |= CTRL_FD;
   9222 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9223 		}
   9224 	} else if (sc->sc_type == WM_T_PCH) {
   9225 		wm_k1_gig_workaround_hv(sc,
   9226 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9227 	}
   9228 
   9229 	/*
   9230 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9231 	 * aggressive resulting in many collisions. To avoid this, increase
   9232 	 * the IPG and reduce Rx latency in the PHY.
   9233 	 */
   9234 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9235 	    && link) {
   9236 		uint32_t tipg_reg;
   9237 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9238 		bool fdx;
   9239 		uint16_t emi_addr, emi_val;
   9240 
   9241 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9242 		tipg_reg &= ~TIPG_IPGT_MASK;
   9243 		fdx = status & STATUS_FD;
   9244 
   9245 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9246 			tipg_reg |= 0xff;
   9247 			/* Reduce Rx latency in analog PHY */
   9248 			emi_val = 0;
   9249 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9250 		    fdx && speed != STATUS_SPEED_1000) {
   9251 			tipg_reg |= 0xc;
   9252 			emi_val = 1;
   9253 		} else {
   9254 			/* Roll back the default values */
   9255 			tipg_reg |= 0x08;
   9256 			emi_val = 1;
   9257 		}
   9258 
   9259 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9260 
   9261 		rv = sc->phy.acquire(sc);
   9262 		if (rv)
   9263 			return;
   9264 
   9265 		if (sc->sc_type == WM_T_PCH2)
   9266 			emi_addr = I82579_RX_CONFIG;
   9267 		else
   9268 			emi_addr = I217_RX_CONFIG;
   9269 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9270 
   9271 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9272 			uint16_t phy_reg;
   9273 
   9274 			sc->phy.readreg_locked(dev, 2,
   9275 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9276 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9277 			if (speed == STATUS_SPEED_100
   9278 			    || speed == STATUS_SPEED_10)
   9279 				phy_reg |= 0x3e8;
   9280 			else
   9281 				phy_reg |= 0xfa;
   9282 			sc->phy.writereg_locked(dev, 2,
   9283 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9284 
   9285 			if (speed == STATUS_SPEED_1000) {
   9286 				sc->phy.readreg_locked(dev, 2,
   9287 				    HV_PM_CTRL, &phy_reg);
   9288 
   9289 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9290 
   9291 				sc->phy.writereg_locked(dev, 2,
   9292 				    HV_PM_CTRL, phy_reg);
   9293 			}
   9294 		}
   9295 		sc->phy.release(sc);
   9296 
   9297 		if (rv)
   9298 			return;
   9299 
   9300 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9301 			uint16_t data, ptr_gap;
   9302 
   9303 			if (speed == STATUS_SPEED_1000) {
   9304 				rv = sc->phy.acquire(sc);
   9305 				if (rv)
   9306 					return;
   9307 
   9308 				rv = sc->phy.readreg_locked(dev, 2,
   9309 				    I219_UNKNOWN1, &data);
   9310 				if (rv) {
   9311 					sc->phy.release(sc);
   9312 					return;
   9313 				}
   9314 
   9315 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9316 				if (ptr_gap < 0x18) {
   9317 					data &= ~(0x3ff << 2);
   9318 					data |= (0x18 << 2);
   9319 					rv = sc->phy.writereg_locked(dev,
   9320 					    2, I219_UNKNOWN1, data);
   9321 				}
   9322 				sc->phy.release(sc);
   9323 				if (rv)
   9324 					return;
   9325 			} else {
   9326 				rv = sc->phy.acquire(sc);
   9327 				if (rv)
   9328 					return;
   9329 
   9330 				rv = sc->phy.writereg_locked(dev, 2,
   9331 				    I219_UNKNOWN1, 0xc023);
   9332 				sc->phy.release(sc);
   9333 				if (rv)
   9334 					return;
   9335 
   9336 			}
   9337 		}
   9338 	}
   9339 
   9340 	/*
   9341 	 * I217 Packet Loss issue:
   9342 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9343 	 * on power up.
   9344 	 * Set the Beacon Duration for I217 to 8 usec
   9345 	 */
   9346 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9347 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9348 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9349 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9350 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9351 	}
   9352 
   9353 	/* Work-around I218 hang issue */
   9354 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9355 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9356 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9357 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9358 		wm_k1_workaround_lpt_lp(sc, link);
   9359 
   9360 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9361 		/*
   9362 		 * Set platform power management values for Latency
   9363 		 * Tolerance Reporting (LTR)
   9364 		 */
   9365 		wm_platform_pm_pch_lpt(sc,
   9366 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9367 	}
   9368 
   9369 	/* Clear link partner's EEE ability */
   9370 	sc->eee_lp_ability = 0;
   9371 
   9372 	/* FEXTNVM6 K1-off workaround */
   9373 	if (sc->sc_type == WM_T_PCH_SPT) {
   9374 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9375 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9376 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9377 		else
   9378 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9379 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9380 	}
   9381 
   9382 	if (!link)
   9383 		return;
   9384 
   9385 	switch (sc->sc_type) {
   9386 	case WM_T_PCH2:
   9387 		wm_k1_workaround_lv(sc);
   9388 		/* FALLTHROUGH */
   9389 	case WM_T_PCH:
   9390 		if (sc->sc_phytype == WMPHY_82578)
   9391 			wm_link_stall_workaround_hv(sc);
   9392 		break;
   9393 	default:
   9394 		break;
   9395 	}
   9396 
   9397 	/* Enable/Disable EEE after link up */
   9398 	if (sc->sc_phytype > WMPHY_82579)
   9399 		wm_set_eee_pchlan(sc);
   9400 }
   9401 
   9402 /*
   9403  * wm_linkintr_tbi:
   9404  *
   9405  *	Helper; handle link interrupts for TBI mode.
   9406  */
   9407 static void
   9408 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9409 {
   9410 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9411 	uint32_t status;
   9412 
   9413 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9414 		__func__));
   9415 
   9416 	status = CSR_READ(sc, WMREG_STATUS);
   9417 	if (icr & ICR_LSC) {
   9418 		wm_check_for_link(sc);
   9419 		if (status & STATUS_LU) {
   9420 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9421 				device_xname(sc->sc_dev),
   9422 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9423 			/*
   9424 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9425 			 * so we should update sc->sc_ctrl
   9426 			 */
   9427 
   9428 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9429 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9430 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9431 			if (status & STATUS_FD)
   9432 				sc->sc_tctl |=
   9433 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9434 			else
   9435 				sc->sc_tctl |=
   9436 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9437 			if (sc->sc_ctrl & CTRL_TFCE)
   9438 				sc->sc_fcrtl |= FCRTL_XONE;
   9439 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9440 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9441 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9442 			sc->sc_tbi_linkup = 1;
   9443 			if_link_state_change(ifp, LINK_STATE_UP);
   9444 		} else {
   9445 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9446 				device_xname(sc->sc_dev)));
   9447 			sc->sc_tbi_linkup = 0;
   9448 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9449 		}
   9450 		/* Update LED */
   9451 		wm_tbi_serdes_set_linkled(sc);
   9452 	} else if (icr & ICR_RXSEQ)
   9453 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9454 			device_xname(sc->sc_dev)));
   9455 }
   9456 
   9457 /*
   9458  * wm_linkintr_serdes:
   9459  *
   9460  *	Helper; handle link interrupts for TBI mode.
   9461  */
   9462 static void
   9463 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9464 {
   9465 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9466 	struct mii_data *mii = &sc->sc_mii;
   9467 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9468 	uint32_t pcs_adv, pcs_lpab, reg;
   9469 
   9470 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9471 		__func__));
   9472 
   9473 	if (icr & ICR_LSC) {
   9474 		/* Check PCS */
   9475 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9476 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9477 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9478 				device_xname(sc->sc_dev)));
   9479 			mii->mii_media_status |= IFM_ACTIVE;
   9480 			sc->sc_tbi_linkup = 1;
   9481 			if_link_state_change(ifp, LINK_STATE_UP);
   9482 		} else {
   9483 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9484 				device_xname(sc->sc_dev)));
   9485 			mii->mii_media_status |= IFM_NONE;
   9486 			sc->sc_tbi_linkup = 0;
   9487 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9488 			wm_tbi_serdes_set_linkled(sc);
   9489 			return;
   9490 		}
   9491 		mii->mii_media_active |= IFM_1000_SX;
   9492 		if ((reg & PCS_LSTS_FDX) != 0)
   9493 			mii->mii_media_active |= IFM_FDX;
   9494 		else
   9495 			mii->mii_media_active |= IFM_HDX;
   9496 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9497 			/* Check flow */
   9498 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9499 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9500 				DPRINTF(WM_DEBUG_LINK,
   9501 				    ("XXX LINKOK but not ACOMP\n"));
   9502 				return;
   9503 			}
   9504 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9505 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9506 			DPRINTF(WM_DEBUG_LINK,
   9507 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9508 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9509 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9510 				mii->mii_media_active |= IFM_FLOW
   9511 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9512 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9513 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9514 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9515 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9516 				mii->mii_media_active |= IFM_FLOW
   9517 				    | IFM_ETH_TXPAUSE;
   9518 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9519 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9520 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9521 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9522 				mii->mii_media_active |= IFM_FLOW
   9523 				    | IFM_ETH_RXPAUSE;
   9524 		}
   9525 		/* Update LED */
   9526 		wm_tbi_serdes_set_linkled(sc);
   9527 	} else
   9528 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9529 		    device_xname(sc->sc_dev)));
   9530 }
   9531 
   9532 /*
   9533  * wm_linkintr:
   9534  *
   9535  *	Helper; handle link interrupts.
   9536  */
   9537 static void
   9538 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9539 {
   9540 
   9541 	KASSERT(WM_CORE_LOCKED(sc));
   9542 
   9543 	if (sc->sc_flags & WM_F_HAS_MII)
   9544 		wm_linkintr_gmii(sc, icr);
   9545 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9546 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9547 		wm_linkintr_serdes(sc, icr);
   9548 	else
   9549 		wm_linkintr_tbi(sc, icr);
   9550 }
   9551 
   9552 
   9553 static inline void
   9554 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   9555 {
   9556 
   9557 	if (wmq->wmq_txrx_use_workqueue)
   9558 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   9559 	else
   9560 		softint_schedule(wmq->wmq_si);
   9561 }
   9562 
   9563 /*
   9564  * wm_intr_legacy:
   9565  *
   9566  *	Interrupt service routine for INTx and MSI.
   9567  */
   9568 static int
   9569 wm_intr_legacy(void *arg)
   9570 {
   9571 	struct wm_softc *sc = arg;
   9572 	struct wm_queue *wmq = &sc->sc_queue[0];
   9573 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9574 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9575 	uint32_t icr, rndval = 0;
   9576 	int handled = 0;
   9577 
   9578 	while (1 /* CONSTCOND */) {
   9579 		icr = CSR_READ(sc, WMREG_ICR);
   9580 		if ((icr & sc->sc_icr) == 0)
   9581 			break;
   9582 		if (handled == 0)
   9583 			DPRINTF(WM_DEBUG_TX,
   9584 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9585 		if (rndval == 0)
   9586 			rndval = icr;
   9587 
   9588 		mutex_enter(rxq->rxq_lock);
   9589 
   9590 		if (rxq->rxq_stopping) {
   9591 			mutex_exit(rxq->rxq_lock);
   9592 			break;
   9593 		}
   9594 
   9595 		handled = 1;
   9596 
   9597 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9598 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9599 			DPRINTF(WM_DEBUG_RX,
   9600 			    ("%s: RX: got Rx intr 0x%08x\n",
   9601 				device_xname(sc->sc_dev),
   9602 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9603 			WM_Q_EVCNT_INCR(rxq, intr);
   9604 		}
   9605 #endif
   9606 		/*
   9607 		 * wm_rxeof() does *not* call upper layer functions directly,
   9608 		 * as if_percpuq_enqueue() just call softint_schedule().
   9609 		 * So, we can call wm_rxeof() in interrupt context.
   9610 		 */
   9611 		wm_rxeof(rxq, UINT_MAX);
   9612 
   9613 		mutex_exit(rxq->rxq_lock);
   9614 		mutex_enter(txq->txq_lock);
   9615 
   9616 		if (txq->txq_stopping) {
   9617 			mutex_exit(txq->txq_lock);
   9618 			break;
   9619 		}
   9620 
   9621 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9622 		if (icr & ICR_TXDW) {
   9623 			DPRINTF(WM_DEBUG_TX,
   9624 			    ("%s: TX: got TXDW interrupt\n",
   9625 				device_xname(sc->sc_dev)));
   9626 			WM_Q_EVCNT_INCR(txq, txdw);
   9627 		}
   9628 #endif
   9629 		wm_txeof(txq, UINT_MAX);
   9630 
   9631 		mutex_exit(txq->txq_lock);
   9632 		WM_CORE_LOCK(sc);
   9633 
   9634 		if (sc->sc_core_stopping) {
   9635 			WM_CORE_UNLOCK(sc);
   9636 			break;
   9637 		}
   9638 
   9639 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9640 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9641 			wm_linkintr(sc, icr);
   9642 		}
   9643 		if ((icr & ICR_GPI(0)) != 0)
   9644 			device_printf(sc->sc_dev, "got module interrupt\n");
   9645 
   9646 		WM_CORE_UNLOCK(sc);
   9647 
   9648 		if (icr & ICR_RXO) {
   9649 #if defined(WM_DEBUG)
   9650 			log(LOG_WARNING, "%s: Receive overrun\n",
   9651 			    device_xname(sc->sc_dev));
   9652 #endif /* defined(WM_DEBUG) */
   9653 		}
   9654 	}
   9655 
   9656 	rnd_add_uint32(&sc->rnd_source, rndval);
   9657 
   9658 	if (handled) {
   9659 		/* Try to get more packets going. */
   9660 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9661 		wm_sched_handle_queue(sc, wmq);
   9662 	}
   9663 
   9664 	return handled;
   9665 }
   9666 
   9667 static inline void
   9668 wm_txrxintr_disable(struct wm_queue *wmq)
   9669 {
   9670 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9671 
   9672 	if (sc->sc_type == WM_T_82574)
   9673 		CSR_WRITE(sc, WMREG_IMC,
   9674 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9675 	else if (sc->sc_type == WM_T_82575)
   9676 		CSR_WRITE(sc, WMREG_EIMC,
   9677 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9678 	else
   9679 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9680 }
   9681 
   9682 static inline void
   9683 wm_txrxintr_enable(struct wm_queue *wmq)
   9684 {
   9685 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9686 
   9687 	wm_itrs_calculate(sc, wmq);
   9688 
   9689 	/*
   9690 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9691 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9692 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9693 	 * while each wm_handle_queue(wmq) is runnig.
   9694 	 */
   9695 	if (sc->sc_type == WM_T_82574)
   9696 		CSR_WRITE(sc, WMREG_IMS,
   9697 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9698 	else if (sc->sc_type == WM_T_82575)
   9699 		CSR_WRITE(sc, WMREG_EIMS,
   9700 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9701 	else
   9702 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9703 }
   9704 
   9705 static int
   9706 wm_txrxintr_msix(void *arg)
   9707 {
   9708 	struct wm_queue *wmq = arg;
   9709 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9710 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9711 	struct wm_softc *sc = txq->txq_sc;
   9712 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9713 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9714 	bool txmore;
   9715 	bool rxmore;
   9716 
   9717 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9718 
   9719 	DPRINTF(WM_DEBUG_TX,
   9720 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9721 
   9722 	wm_txrxintr_disable(wmq);
   9723 
   9724 	mutex_enter(txq->txq_lock);
   9725 
   9726 	if (txq->txq_stopping) {
   9727 		mutex_exit(txq->txq_lock);
   9728 		return 0;
   9729 	}
   9730 
   9731 	WM_Q_EVCNT_INCR(txq, txdw);
   9732 	txmore = wm_txeof(txq, txlimit);
   9733 	/* wm_deferred start() is done in wm_handle_queue(). */
   9734 	mutex_exit(txq->txq_lock);
   9735 
   9736 	DPRINTF(WM_DEBUG_RX,
   9737 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9738 	mutex_enter(rxq->rxq_lock);
   9739 
   9740 	if (rxq->rxq_stopping) {
   9741 		mutex_exit(rxq->rxq_lock);
   9742 		return 0;
   9743 	}
   9744 
   9745 	WM_Q_EVCNT_INCR(rxq, intr);
   9746 	rxmore = wm_rxeof(rxq, rxlimit);
   9747 	mutex_exit(rxq->rxq_lock);
   9748 
   9749 	wm_itrs_writereg(sc, wmq);
   9750 
   9751 	if (txmore || rxmore) {
   9752 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9753 		wm_sched_handle_queue(sc, wmq);
   9754 	} else
   9755 		wm_txrxintr_enable(wmq);
   9756 
   9757 	return 1;
   9758 }
   9759 
   9760 static void
   9761 wm_handle_queue(void *arg)
   9762 {
   9763 	struct wm_queue *wmq = arg;
   9764 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9765 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9766 	struct wm_softc *sc = txq->txq_sc;
   9767 	u_int txlimit = sc->sc_tx_process_limit;
   9768 	u_int rxlimit = sc->sc_rx_process_limit;
   9769 	bool txmore;
   9770 	bool rxmore;
   9771 
   9772 	mutex_enter(txq->txq_lock);
   9773 	if (txq->txq_stopping) {
   9774 		mutex_exit(txq->txq_lock);
   9775 		return;
   9776 	}
   9777 	txmore = wm_txeof(txq, txlimit);
   9778 	wm_deferred_start_locked(txq);
   9779 	mutex_exit(txq->txq_lock);
   9780 
   9781 	mutex_enter(rxq->rxq_lock);
   9782 	if (rxq->rxq_stopping) {
   9783 		mutex_exit(rxq->rxq_lock);
   9784 		return;
   9785 	}
   9786 	WM_Q_EVCNT_INCR(rxq, defer);
   9787 	rxmore = wm_rxeof(rxq, rxlimit);
   9788 	mutex_exit(rxq->rxq_lock);
   9789 
   9790 	if (txmore || rxmore) {
   9791 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9792 		wm_sched_handle_queue(sc, wmq);
   9793 	} else
   9794 		wm_txrxintr_enable(wmq);
   9795 }
   9796 
   9797 static void
   9798 wm_handle_queue_work(struct work *wk, void *context)
   9799 {
   9800 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   9801 
   9802 	/*
   9803 	 * "enqueued flag" is not required here.
   9804 	 */
   9805 	wm_handle_queue(wmq);
   9806 }
   9807 
   9808 /*
   9809  * wm_linkintr_msix:
   9810  *
   9811  *	Interrupt service routine for link status change for MSI-X.
   9812  */
   9813 static int
   9814 wm_linkintr_msix(void *arg)
   9815 {
   9816 	struct wm_softc *sc = arg;
   9817 	uint32_t reg;
   9818 	bool has_rxo;
   9819 
   9820 	reg = CSR_READ(sc, WMREG_ICR);
   9821 	WM_CORE_LOCK(sc);
   9822 	DPRINTF(WM_DEBUG_LINK,
   9823 	    ("%s: LINK: got link intr. ICR = %08x\n",
   9824 		device_xname(sc->sc_dev), reg));
   9825 
   9826 	if (sc->sc_core_stopping)
   9827 		goto out;
   9828 
   9829 	if ((reg & ICR_LSC) != 0) {
   9830 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9831 		wm_linkintr(sc, ICR_LSC);
   9832 	}
   9833 	if ((reg & ICR_GPI(0)) != 0)
   9834 		device_printf(sc->sc_dev, "got module interrupt\n");
   9835 
   9836 	/*
   9837 	 * XXX 82574 MSI-X mode workaround
   9838 	 *
   9839 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9840 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9841 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9842 	 * interrupts by writing WMREG_ICS to process receive packets.
   9843 	 */
   9844 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9845 #if defined(WM_DEBUG)
   9846 		log(LOG_WARNING, "%s: Receive overrun\n",
   9847 		    device_xname(sc->sc_dev));
   9848 #endif /* defined(WM_DEBUG) */
   9849 
   9850 		has_rxo = true;
   9851 		/*
   9852 		 * The RXO interrupt is very high rate when receive traffic is
   9853 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9854 		 * interrupts. ICR_OTHER will be enabled at the end of
   9855 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9856 		 * ICR_RXQ(1) interrupts.
   9857 		 */
   9858 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9859 
   9860 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9861 	}
   9862 
   9863 
   9864 
   9865 out:
   9866 	WM_CORE_UNLOCK(sc);
   9867 
   9868 	if (sc->sc_type == WM_T_82574) {
   9869 		if (!has_rxo)
   9870 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9871 		else
   9872 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9873 	} else if (sc->sc_type == WM_T_82575)
   9874 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9875 	else
   9876 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9877 
   9878 	return 1;
   9879 }
   9880 
   9881 /*
   9882  * Media related.
   9883  * GMII, SGMII, TBI (and SERDES)
   9884  */
   9885 
   9886 /* Common */
   9887 
   9888 /*
   9889  * wm_tbi_serdes_set_linkled:
   9890  *
   9891  *	Update the link LED on TBI and SERDES devices.
   9892  */
   9893 static void
   9894 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9895 {
   9896 
   9897 	if (sc->sc_tbi_linkup)
   9898 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9899 	else
   9900 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9901 
   9902 	/* 82540 or newer devices are active low */
   9903 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9904 
   9905 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9906 }
   9907 
   9908 /* GMII related */
   9909 
   9910 /*
   9911  * wm_gmii_reset:
   9912  *
   9913  *	Reset the PHY.
   9914  */
   9915 static void
   9916 wm_gmii_reset(struct wm_softc *sc)
   9917 {
   9918 	uint32_t reg;
   9919 	int rv;
   9920 
   9921 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9922 		device_xname(sc->sc_dev), __func__));
   9923 
   9924 	rv = sc->phy.acquire(sc);
   9925 	if (rv != 0) {
   9926 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9927 		    __func__);
   9928 		return;
   9929 	}
   9930 
   9931 	switch (sc->sc_type) {
   9932 	case WM_T_82542_2_0:
   9933 	case WM_T_82542_2_1:
   9934 		/* null */
   9935 		break;
   9936 	case WM_T_82543:
   9937 		/*
   9938 		 * With 82543, we need to force speed and duplex on the MAC
   9939 		 * equal to what the PHY speed and duplex configuration is.
   9940 		 * In addition, we need to perform a hardware reset on the PHY
   9941 		 * to take it out of reset.
   9942 		 */
   9943 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9944 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9945 
   9946 		/* The PHY reset pin is active-low. */
   9947 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9948 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9949 		    CTRL_EXT_SWDPIN(4));
   9950 		reg |= CTRL_EXT_SWDPIO(4);
   9951 
   9952 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9953 		CSR_WRITE_FLUSH(sc);
   9954 		delay(10*1000);
   9955 
   9956 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9957 		CSR_WRITE_FLUSH(sc);
   9958 		delay(150);
   9959 #if 0
   9960 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9961 #endif
   9962 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9963 		break;
   9964 	case WM_T_82544:	/* Reset 10000us */
   9965 	case WM_T_82540:
   9966 	case WM_T_82545:
   9967 	case WM_T_82545_3:
   9968 	case WM_T_82546:
   9969 	case WM_T_82546_3:
   9970 	case WM_T_82541:
   9971 	case WM_T_82541_2:
   9972 	case WM_T_82547:
   9973 	case WM_T_82547_2:
   9974 	case WM_T_82571:	/* Reset 100us */
   9975 	case WM_T_82572:
   9976 	case WM_T_82573:
   9977 	case WM_T_82574:
   9978 	case WM_T_82575:
   9979 	case WM_T_82576:
   9980 	case WM_T_82580:
   9981 	case WM_T_I350:
   9982 	case WM_T_I354:
   9983 	case WM_T_I210:
   9984 	case WM_T_I211:
   9985 	case WM_T_82583:
   9986 	case WM_T_80003:
   9987 		/* Generic reset */
   9988 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9989 		CSR_WRITE_FLUSH(sc);
   9990 		delay(20000);
   9991 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9992 		CSR_WRITE_FLUSH(sc);
   9993 		delay(20000);
   9994 
   9995 		if ((sc->sc_type == WM_T_82541)
   9996 		    || (sc->sc_type == WM_T_82541_2)
   9997 		    || (sc->sc_type == WM_T_82547)
   9998 		    || (sc->sc_type == WM_T_82547_2)) {
   9999 			/* Workaround for igp are done in igp_reset() */
   10000 			/* XXX add code to set LED after phy reset */
   10001 		}
   10002 		break;
   10003 	case WM_T_ICH8:
   10004 	case WM_T_ICH9:
   10005 	case WM_T_ICH10:
   10006 	case WM_T_PCH:
   10007 	case WM_T_PCH2:
   10008 	case WM_T_PCH_LPT:
   10009 	case WM_T_PCH_SPT:
   10010 	case WM_T_PCH_CNP:
   10011 		/* Generic reset */
   10012 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10013 		CSR_WRITE_FLUSH(sc);
   10014 		delay(100);
   10015 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10016 		CSR_WRITE_FLUSH(sc);
   10017 		delay(150);
   10018 		break;
   10019 	default:
   10020 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10021 		    __func__);
   10022 		break;
   10023 	}
   10024 
   10025 	sc->phy.release(sc);
   10026 
   10027 	/* get_cfg_done */
   10028 	wm_get_cfg_done(sc);
   10029 
   10030 	/* Extra setup */
   10031 	switch (sc->sc_type) {
   10032 	case WM_T_82542_2_0:
   10033 	case WM_T_82542_2_1:
   10034 	case WM_T_82543:
   10035 	case WM_T_82544:
   10036 	case WM_T_82540:
   10037 	case WM_T_82545:
   10038 	case WM_T_82545_3:
   10039 	case WM_T_82546:
   10040 	case WM_T_82546_3:
   10041 	case WM_T_82541_2:
   10042 	case WM_T_82547_2:
   10043 	case WM_T_82571:
   10044 	case WM_T_82572:
   10045 	case WM_T_82573:
   10046 	case WM_T_82574:
   10047 	case WM_T_82583:
   10048 	case WM_T_82575:
   10049 	case WM_T_82576:
   10050 	case WM_T_82580:
   10051 	case WM_T_I350:
   10052 	case WM_T_I354:
   10053 	case WM_T_I210:
   10054 	case WM_T_I211:
   10055 	case WM_T_80003:
   10056 		/* Null */
   10057 		break;
   10058 	case WM_T_82541:
   10059 	case WM_T_82547:
   10060 		/* XXX Configure actively LED after PHY reset */
   10061 		break;
   10062 	case WM_T_ICH8:
   10063 	case WM_T_ICH9:
   10064 	case WM_T_ICH10:
   10065 	case WM_T_PCH:
   10066 	case WM_T_PCH2:
   10067 	case WM_T_PCH_LPT:
   10068 	case WM_T_PCH_SPT:
   10069 	case WM_T_PCH_CNP:
   10070 		wm_phy_post_reset(sc);
   10071 		break;
   10072 	default:
   10073 		panic("%s: unknown type\n", __func__);
   10074 		break;
   10075 	}
   10076 }
   10077 
   10078 /*
   10079  * Setup sc_phytype and mii_{read|write}reg.
   10080  *
   10081  *  To identify PHY type, correct read/write function should be selected.
   10082  * To select correct read/write function, PCI ID or MAC type are required
   10083  * without accessing PHY registers.
   10084  *
   10085  *  On the first call of this function, PHY ID is not known yet. Check
   10086  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10087  * result might be incorrect.
   10088  *
   10089  *  In the second call, PHY OUI and model is used to identify PHY type.
   10090  * It might not be perfect because of the lack of compared entry, but it
   10091  * would be better than the first call.
   10092  *
   10093  *  If the detected new result and previous assumption is different,
   10094  * diagnous message will be printed.
   10095  */
   10096 static void
   10097 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10098     uint16_t phy_model)
   10099 {
   10100 	device_t dev = sc->sc_dev;
   10101 	struct mii_data *mii = &sc->sc_mii;
   10102 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10103 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10104 	mii_readreg_t new_readreg;
   10105 	mii_writereg_t new_writereg;
   10106 	bool dodiag = true;
   10107 
   10108 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10109 		device_xname(sc->sc_dev), __func__));
   10110 
   10111 	/*
   10112 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10113 	 * incorrect. So don't print diag output when it's 2nd call.
   10114 	 */
   10115 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10116 		dodiag = false;
   10117 
   10118 	if (mii->mii_readreg == NULL) {
   10119 		/*
   10120 		 *  This is the first call of this function. For ICH and PCH
   10121 		 * variants, it's difficult to determine the PHY access method
   10122 		 * by sc_type, so use the PCI product ID for some devices.
   10123 		 */
   10124 
   10125 		switch (sc->sc_pcidevid) {
   10126 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10127 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10128 			/* 82577 */
   10129 			new_phytype = WMPHY_82577;
   10130 			break;
   10131 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10132 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10133 			/* 82578 */
   10134 			new_phytype = WMPHY_82578;
   10135 			break;
   10136 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10137 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10138 			/* 82579 */
   10139 			new_phytype = WMPHY_82579;
   10140 			break;
   10141 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10142 		case PCI_PRODUCT_INTEL_82801I_BM:
   10143 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10144 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10145 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10146 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10147 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10148 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10149 			/* ICH8, 9, 10 with 82567 */
   10150 			new_phytype = WMPHY_BM;
   10151 			break;
   10152 		default:
   10153 			break;
   10154 		}
   10155 	} else {
   10156 		/* It's not the first call. Use PHY OUI and model */
   10157 		switch (phy_oui) {
   10158 		case MII_OUI_ATTANSIC: /* XXX ??? */
   10159 			switch (phy_model) {
   10160 			case 0x0004: /* XXX */
   10161 				new_phytype = WMPHY_82578;
   10162 				break;
   10163 			default:
   10164 				break;
   10165 			}
   10166 			break;
   10167 		case MII_OUI_xxMARVELL:
   10168 			switch (phy_model) {
   10169 			case MII_MODEL_xxMARVELL_I210:
   10170 				new_phytype = WMPHY_I210;
   10171 				break;
   10172 			case MII_MODEL_xxMARVELL_E1011:
   10173 			case MII_MODEL_xxMARVELL_E1000_3:
   10174 			case MII_MODEL_xxMARVELL_E1000_5:
   10175 			case MII_MODEL_xxMARVELL_E1112:
   10176 				new_phytype = WMPHY_M88;
   10177 				break;
   10178 			case MII_MODEL_xxMARVELL_E1149:
   10179 				new_phytype = WMPHY_BM;
   10180 				break;
   10181 			case MII_MODEL_xxMARVELL_E1111:
   10182 			case MII_MODEL_xxMARVELL_I347:
   10183 			case MII_MODEL_xxMARVELL_E1512:
   10184 			case MII_MODEL_xxMARVELL_E1340M:
   10185 			case MII_MODEL_xxMARVELL_E1543:
   10186 				new_phytype = WMPHY_M88;
   10187 				break;
   10188 			case MII_MODEL_xxMARVELL_I82563:
   10189 				new_phytype = WMPHY_GG82563;
   10190 				break;
   10191 			default:
   10192 				break;
   10193 			}
   10194 			break;
   10195 		case MII_OUI_INTEL:
   10196 			switch (phy_model) {
   10197 			case MII_MODEL_INTEL_I82577:
   10198 				new_phytype = WMPHY_82577;
   10199 				break;
   10200 			case MII_MODEL_INTEL_I82579:
   10201 				new_phytype = WMPHY_82579;
   10202 				break;
   10203 			case MII_MODEL_INTEL_I217:
   10204 				new_phytype = WMPHY_I217;
   10205 				break;
   10206 			case MII_MODEL_INTEL_I82580:
   10207 			case MII_MODEL_INTEL_I350:
   10208 				new_phytype = WMPHY_82580;
   10209 				break;
   10210 			default:
   10211 				break;
   10212 			}
   10213 			break;
   10214 		case MII_OUI_yyINTEL:
   10215 			switch (phy_model) {
   10216 			case MII_MODEL_yyINTEL_I82562G:
   10217 			case MII_MODEL_yyINTEL_I82562EM:
   10218 			case MII_MODEL_yyINTEL_I82562ET:
   10219 				new_phytype = WMPHY_IFE;
   10220 				break;
   10221 			case MII_MODEL_yyINTEL_IGP01E1000:
   10222 				new_phytype = WMPHY_IGP;
   10223 				break;
   10224 			case MII_MODEL_yyINTEL_I82566:
   10225 				new_phytype = WMPHY_IGP_3;
   10226 				break;
   10227 			default:
   10228 				break;
   10229 			}
   10230 			break;
   10231 		default:
   10232 			break;
   10233 		}
   10234 
   10235 		if (dodiag) {
   10236 			if (new_phytype == WMPHY_UNKNOWN)
   10237 				aprint_verbose_dev(dev,
   10238 				    "%s: Unknown PHY model. OUI=%06x, "
   10239 				    "model=%04x\n", __func__, phy_oui,
   10240 				    phy_model);
   10241 
   10242 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10243 			    && (sc->sc_phytype != new_phytype)) {
   10244 				aprint_error_dev(dev, "Previously assumed PHY "
   10245 				    "type(%u) was incorrect. PHY type from PHY"
   10246 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   10247 			}
   10248 		}
   10249 	}
   10250 
   10251 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10252 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10253 		/* SGMII */
   10254 		new_readreg = wm_sgmii_readreg;
   10255 		new_writereg = wm_sgmii_writereg;
   10256 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10257 		/* BM2 (phyaddr == 1) */
   10258 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10259 		    && (new_phytype != WMPHY_BM)
   10260 		    && (new_phytype != WMPHY_UNKNOWN))
   10261 			doubt_phytype = new_phytype;
   10262 		new_phytype = WMPHY_BM;
   10263 		new_readreg = wm_gmii_bm_readreg;
   10264 		new_writereg = wm_gmii_bm_writereg;
   10265 	} else if (sc->sc_type >= WM_T_PCH) {
   10266 		/* All PCH* use _hv_ */
   10267 		new_readreg = wm_gmii_hv_readreg;
   10268 		new_writereg = wm_gmii_hv_writereg;
   10269 	} else if (sc->sc_type >= WM_T_ICH8) {
   10270 		/* non-82567 ICH8, 9 and 10 */
   10271 		new_readreg = wm_gmii_i82544_readreg;
   10272 		new_writereg = wm_gmii_i82544_writereg;
   10273 	} else if (sc->sc_type >= WM_T_80003) {
   10274 		/* 80003 */
   10275 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10276 		    && (new_phytype != WMPHY_GG82563)
   10277 		    && (new_phytype != WMPHY_UNKNOWN))
   10278 			doubt_phytype = new_phytype;
   10279 		new_phytype = WMPHY_GG82563;
   10280 		new_readreg = wm_gmii_i80003_readreg;
   10281 		new_writereg = wm_gmii_i80003_writereg;
   10282 	} else if (sc->sc_type >= WM_T_I210) {
   10283 		/* I210 and I211 */
   10284 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10285 		    && (new_phytype != WMPHY_I210)
   10286 		    && (new_phytype != WMPHY_UNKNOWN))
   10287 			doubt_phytype = new_phytype;
   10288 		new_phytype = WMPHY_I210;
   10289 		new_readreg = wm_gmii_gs40g_readreg;
   10290 		new_writereg = wm_gmii_gs40g_writereg;
   10291 	} else if (sc->sc_type >= WM_T_82580) {
   10292 		/* 82580, I350 and I354 */
   10293 		new_readreg = wm_gmii_82580_readreg;
   10294 		new_writereg = wm_gmii_82580_writereg;
   10295 	} else if (sc->sc_type >= WM_T_82544) {
   10296 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10297 		new_readreg = wm_gmii_i82544_readreg;
   10298 		new_writereg = wm_gmii_i82544_writereg;
   10299 	} else {
   10300 		new_readreg = wm_gmii_i82543_readreg;
   10301 		new_writereg = wm_gmii_i82543_writereg;
   10302 	}
   10303 
   10304 	if (new_phytype == WMPHY_BM) {
   10305 		/* All BM use _bm_ */
   10306 		new_readreg = wm_gmii_bm_readreg;
   10307 		new_writereg = wm_gmii_bm_writereg;
   10308 	}
   10309 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10310 		/* All PCH* use _hv_ */
   10311 		new_readreg = wm_gmii_hv_readreg;
   10312 		new_writereg = wm_gmii_hv_writereg;
   10313 	}
   10314 
   10315 	/* Diag output */
   10316 	if (dodiag) {
   10317 		if (doubt_phytype != WMPHY_UNKNOWN)
   10318 			aprint_error_dev(dev, "Assumed new PHY type was "
   10319 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10320 			    new_phytype);
   10321 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10322 		    && (sc->sc_phytype != new_phytype))
   10323 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10324 			    "was incorrect. New PHY type = %u\n",
   10325 			    sc->sc_phytype, new_phytype);
   10326 
   10327 		if ((mii->mii_readreg != NULL) &&
   10328 		    (new_phytype == WMPHY_UNKNOWN))
   10329 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   10330 
   10331 		if ((mii->mii_readreg != NULL) &&
   10332 		    (mii->mii_readreg != new_readreg))
   10333 			aprint_error_dev(dev, "Previously assumed PHY "
   10334 			    "read/write function was incorrect.\n");
   10335 	}
   10336 
   10337 	/* Update now */
   10338 	sc->sc_phytype = new_phytype;
   10339 	mii->mii_readreg = new_readreg;
   10340 	mii->mii_writereg = new_writereg;
   10341 	if (new_readreg == wm_gmii_hv_readreg) {
   10342 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10343 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10344 	} else if (new_readreg == wm_sgmii_readreg) {
   10345 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10346 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10347 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10348 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10349 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10350 	}
   10351 }
   10352 
   10353 /*
   10354  * wm_get_phy_id_82575:
   10355  *
   10356  * Return PHY ID. Return -1 if it failed.
   10357  */
   10358 static int
   10359 wm_get_phy_id_82575(struct wm_softc *sc)
   10360 {
   10361 	uint32_t reg;
   10362 	int phyid = -1;
   10363 
   10364 	/* XXX */
   10365 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10366 		return -1;
   10367 
   10368 	if (wm_sgmii_uses_mdio(sc)) {
   10369 		switch (sc->sc_type) {
   10370 		case WM_T_82575:
   10371 		case WM_T_82576:
   10372 			reg = CSR_READ(sc, WMREG_MDIC);
   10373 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10374 			break;
   10375 		case WM_T_82580:
   10376 		case WM_T_I350:
   10377 		case WM_T_I354:
   10378 		case WM_T_I210:
   10379 		case WM_T_I211:
   10380 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10381 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10382 			break;
   10383 		default:
   10384 			return -1;
   10385 		}
   10386 	}
   10387 
   10388 	return phyid;
   10389 }
   10390 
   10391 
   10392 /*
   10393  * wm_gmii_mediainit:
   10394  *
   10395  *	Initialize media for use on 1000BASE-T devices.
   10396  */
   10397 static void
   10398 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10399 {
   10400 	device_t dev = sc->sc_dev;
   10401 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10402 	struct mii_data *mii = &sc->sc_mii;
   10403 
   10404 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10405 		device_xname(sc->sc_dev), __func__));
   10406 
   10407 	/* We have GMII. */
   10408 	sc->sc_flags |= WM_F_HAS_MII;
   10409 
   10410 	if (sc->sc_type == WM_T_80003)
   10411 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10412 	else
   10413 		sc->sc_tipg = TIPG_1000T_DFLT;
   10414 
   10415 	/*
   10416 	 * Let the chip set speed/duplex on its own based on
   10417 	 * signals from the PHY.
   10418 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10419 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10420 	 */
   10421 	sc->sc_ctrl |= CTRL_SLU;
   10422 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10423 
   10424 	/* Initialize our media structures and probe the GMII. */
   10425 	mii->mii_ifp = ifp;
   10426 
   10427 	mii->mii_statchg = wm_gmii_statchg;
   10428 
   10429 	/* get PHY control from SMBus to PCIe */
   10430 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10431 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10432 	    || (sc->sc_type == WM_T_PCH_CNP))
   10433 		wm_init_phy_workarounds_pchlan(sc);
   10434 
   10435 	wm_gmii_reset(sc);
   10436 
   10437 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10438 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10439 	    wm_gmii_mediastatus, sc->sc_core_lock);
   10440 
   10441 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10442 	    || (sc->sc_type == WM_T_82580)
   10443 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10444 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10445 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10446 			/* Attach only one port */
   10447 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10448 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10449 		} else {
   10450 			int i, id;
   10451 			uint32_t ctrl_ext;
   10452 
   10453 			id = wm_get_phy_id_82575(sc);
   10454 			if (id != -1) {
   10455 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10456 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10457 			}
   10458 			if ((id == -1)
   10459 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10460 				/* Power on sgmii phy if it is disabled */
   10461 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10462 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10463 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10464 				CSR_WRITE_FLUSH(sc);
   10465 				delay(300*1000); /* XXX too long */
   10466 
   10467 				/*
   10468 				 * From 1 to 8.
   10469 				 *
   10470 				 * I2C access fails with I2C register's ERROR
   10471 				 * bit set, so prevent error message while
   10472 				 * scanning.
   10473 				 */
   10474 				sc->phy.no_errprint = true;
   10475 				for (i = 1; i < 8; i++)
   10476 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10477 					    0xffffffff, i, MII_OFFSET_ANY,
   10478 					    MIIF_DOPAUSE);
   10479 				sc->phy.no_errprint = false;
   10480 
   10481 				/* Restore previous sfp cage power state */
   10482 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10483 			}
   10484 		}
   10485 	} else
   10486 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10487 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10488 
   10489 	/*
   10490 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10491 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10492 	 */
   10493 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10494 		|| (sc->sc_type == WM_T_PCH_SPT)
   10495 		|| (sc->sc_type == WM_T_PCH_CNP))
   10496 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10497 		wm_set_mdio_slow_mode_hv(sc);
   10498 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10499 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10500 	}
   10501 
   10502 	/*
   10503 	 * (For ICH8 variants)
   10504 	 * If PHY detection failed, use BM's r/w function and retry.
   10505 	 */
   10506 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10507 		/* if failed, retry with *_bm_* */
   10508 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10509 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10510 		    sc->sc_phytype);
   10511 		sc->sc_phytype = WMPHY_BM;
   10512 		mii->mii_readreg = wm_gmii_bm_readreg;
   10513 		mii->mii_writereg = wm_gmii_bm_writereg;
   10514 
   10515 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10516 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10517 	}
   10518 
   10519 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10520 		/* Any PHY wasn't find */
   10521 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10522 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10523 		sc->sc_phytype = WMPHY_NONE;
   10524 	} else {
   10525 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10526 
   10527 		/*
   10528 		 * PHY Found! Check PHY type again by the second call of
   10529 		 * wm_gmii_setup_phytype.
   10530 		 */
   10531 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10532 		    child->mii_mpd_model);
   10533 
   10534 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10535 	}
   10536 }
   10537 
   10538 /*
   10539  * wm_gmii_mediachange:	[ifmedia interface function]
   10540  *
   10541  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10542  */
   10543 static int
   10544 wm_gmii_mediachange(struct ifnet *ifp)
   10545 {
   10546 	struct wm_softc *sc = ifp->if_softc;
   10547 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10548 	uint32_t reg;
   10549 	int rc;
   10550 
   10551 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10552 		device_xname(sc->sc_dev), __func__));
   10553 	if ((ifp->if_flags & IFF_UP) == 0)
   10554 		return 0;
   10555 
   10556 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10557 	if ((sc->sc_type == WM_T_82580)
   10558 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10559 	    || (sc->sc_type == WM_T_I211)) {
   10560 		reg = CSR_READ(sc, WMREG_PHPM);
   10561 		reg &= ~PHPM_GO_LINK_D;
   10562 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10563 	}
   10564 
   10565 	/* Disable D0 LPLU. */
   10566 	wm_lplu_d0_disable(sc);
   10567 
   10568 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10569 	sc->sc_ctrl |= CTRL_SLU;
   10570 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10571 	    || (sc->sc_type > WM_T_82543)) {
   10572 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10573 	} else {
   10574 		sc->sc_ctrl &= ~CTRL_ASDE;
   10575 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10576 		if (ife->ifm_media & IFM_FDX)
   10577 			sc->sc_ctrl |= CTRL_FD;
   10578 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10579 		case IFM_10_T:
   10580 			sc->sc_ctrl |= CTRL_SPEED_10;
   10581 			break;
   10582 		case IFM_100_TX:
   10583 			sc->sc_ctrl |= CTRL_SPEED_100;
   10584 			break;
   10585 		case IFM_1000_T:
   10586 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10587 			break;
   10588 		case IFM_NONE:
   10589 			/* There is no specific setting for IFM_NONE */
   10590 			break;
   10591 		default:
   10592 			panic("wm_gmii_mediachange: bad media 0x%x",
   10593 			    ife->ifm_media);
   10594 		}
   10595 	}
   10596 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10597 	CSR_WRITE_FLUSH(sc);
   10598 
   10599 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   10600 		wm_serdes_mediachange(ifp);
   10601 
   10602 	if (sc->sc_type <= WM_T_82543)
   10603 		wm_gmii_reset(sc);
   10604 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   10605 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   10606 		/* allow time for SFP cage time to power up phy */
   10607 		delay(300 * 1000);
   10608 		wm_gmii_reset(sc);
   10609 	}
   10610 
   10611 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10612 		return 0;
   10613 	return rc;
   10614 }
   10615 
   10616 /*
   10617  * wm_gmii_mediastatus:	[ifmedia interface function]
   10618  *
   10619  *	Get the current interface media status on a 1000BASE-T device.
   10620  */
   10621 static void
   10622 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10623 {
   10624 	struct wm_softc *sc = ifp->if_softc;
   10625 
   10626 	ether_mediastatus(ifp, ifmr);
   10627 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10628 	    | sc->sc_flowflags;
   10629 }
   10630 
   10631 #define	MDI_IO		CTRL_SWDPIN(2)
   10632 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10633 #define	MDI_CLK		CTRL_SWDPIN(3)
   10634 
   10635 static void
   10636 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10637 {
   10638 	uint32_t i, v;
   10639 
   10640 	v = CSR_READ(sc, WMREG_CTRL);
   10641 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10642 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10643 
   10644 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10645 		if (data & i)
   10646 			v |= MDI_IO;
   10647 		else
   10648 			v &= ~MDI_IO;
   10649 		CSR_WRITE(sc, WMREG_CTRL, v);
   10650 		CSR_WRITE_FLUSH(sc);
   10651 		delay(10);
   10652 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10653 		CSR_WRITE_FLUSH(sc);
   10654 		delay(10);
   10655 		CSR_WRITE(sc, WMREG_CTRL, v);
   10656 		CSR_WRITE_FLUSH(sc);
   10657 		delay(10);
   10658 	}
   10659 }
   10660 
   10661 static uint16_t
   10662 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10663 {
   10664 	uint32_t v, i;
   10665 	uint16_t data = 0;
   10666 
   10667 	v = CSR_READ(sc, WMREG_CTRL);
   10668 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10669 	v |= CTRL_SWDPIO(3);
   10670 
   10671 	CSR_WRITE(sc, WMREG_CTRL, v);
   10672 	CSR_WRITE_FLUSH(sc);
   10673 	delay(10);
   10674 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10675 	CSR_WRITE_FLUSH(sc);
   10676 	delay(10);
   10677 	CSR_WRITE(sc, WMREG_CTRL, v);
   10678 	CSR_WRITE_FLUSH(sc);
   10679 	delay(10);
   10680 
   10681 	for (i = 0; i < 16; i++) {
   10682 		data <<= 1;
   10683 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10684 		CSR_WRITE_FLUSH(sc);
   10685 		delay(10);
   10686 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10687 			data |= 1;
   10688 		CSR_WRITE(sc, WMREG_CTRL, v);
   10689 		CSR_WRITE_FLUSH(sc);
   10690 		delay(10);
   10691 	}
   10692 
   10693 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10694 	CSR_WRITE_FLUSH(sc);
   10695 	delay(10);
   10696 	CSR_WRITE(sc, WMREG_CTRL, v);
   10697 	CSR_WRITE_FLUSH(sc);
   10698 	delay(10);
   10699 
   10700 	return data;
   10701 }
   10702 
   10703 #undef MDI_IO
   10704 #undef MDI_DIR
   10705 #undef MDI_CLK
   10706 
   10707 /*
   10708  * wm_gmii_i82543_readreg:	[mii interface function]
   10709  *
   10710  *	Read a PHY register on the GMII (i82543 version).
   10711  */
   10712 static int
   10713 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10714 {
   10715 	struct wm_softc *sc = device_private(dev);
   10716 
   10717 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10718 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10719 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10720 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10721 
   10722 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10723 		device_xname(dev), phy, reg, *val));
   10724 
   10725 	return 0;
   10726 }
   10727 
   10728 /*
   10729  * wm_gmii_i82543_writereg:	[mii interface function]
   10730  *
   10731  *	Write a PHY register on the GMII (i82543 version).
   10732  */
   10733 static int
   10734 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10735 {
   10736 	struct wm_softc *sc = device_private(dev);
   10737 
   10738 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10739 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10740 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10741 	    (MII_COMMAND_START << 30), 32);
   10742 
   10743 	return 0;
   10744 }
   10745 
   10746 /*
   10747  * wm_gmii_mdic_readreg:	[mii interface function]
   10748  *
   10749  *	Read a PHY register on the GMII.
   10750  */
   10751 static int
   10752 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10753 {
   10754 	struct wm_softc *sc = device_private(dev);
   10755 	uint32_t mdic = 0;
   10756 	int i;
   10757 
   10758 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10759 	    && (reg > MII_ADDRMASK)) {
   10760 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10761 		    __func__, sc->sc_phytype, reg);
   10762 		reg &= MII_ADDRMASK;
   10763 	}
   10764 
   10765 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10766 	    MDIC_REGADD(reg));
   10767 
   10768 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10769 		delay(50);
   10770 		mdic = CSR_READ(sc, WMREG_MDIC);
   10771 		if (mdic & MDIC_READY)
   10772 			break;
   10773 	}
   10774 
   10775 	if ((mdic & MDIC_READY) == 0) {
   10776 		DPRINTF(WM_DEBUG_GMII,
   10777 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10778 			device_xname(dev), phy, reg));
   10779 		return ETIMEDOUT;
   10780 	} else if (mdic & MDIC_E) {
   10781 		/* This is normal if no PHY is present. */
   10782 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10783 			device_xname(sc->sc_dev), phy, reg));
   10784 		return -1;
   10785 	} else
   10786 		*val = MDIC_DATA(mdic);
   10787 
   10788 	/*
   10789 	 * Allow some time after each MDIC transaction to avoid
   10790 	 * reading duplicate data in the next MDIC transaction.
   10791 	 */
   10792 	if (sc->sc_type == WM_T_PCH2)
   10793 		delay(100);
   10794 
   10795 	return 0;
   10796 }
   10797 
   10798 /*
   10799  * wm_gmii_mdic_writereg:	[mii interface function]
   10800  *
   10801  *	Write a PHY register on the GMII.
   10802  */
   10803 static int
   10804 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10805 {
   10806 	struct wm_softc *sc = device_private(dev);
   10807 	uint32_t mdic = 0;
   10808 	int i;
   10809 
   10810 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10811 	    && (reg > MII_ADDRMASK)) {
   10812 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10813 		    __func__, sc->sc_phytype, reg);
   10814 		reg &= MII_ADDRMASK;
   10815 	}
   10816 
   10817 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10818 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10819 
   10820 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10821 		delay(50);
   10822 		mdic = CSR_READ(sc, WMREG_MDIC);
   10823 		if (mdic & MDIC_READY)
   10824 			break;
   10825 	}
   10826 
   10827 	if ((mdic & MDIC_READY) == 0) {
   10828 		DPRINTF(WM_DEBUG_GMII,
   10829 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10830 			device_xname(dev), phy, reg));
   10831 		return ETIMEDOUT;
   10832 	} else if (mdic & MDIC_E) {
   10833 		DPRINTF(WM_DEBUG_GMII,
   10834 		    ("%s: MDIC write error: phy %d reg %d\n",
   10835 			device_xname(dev), phy, reg));
   10836 		return -1;
   10837 	}
   10838 
   10839 	/*
   10840 	 * Allow some time after each MDIC transaction to avoid
   10841 	 * reading duplicate data in the next MDIC transaction.
   10842 	 */
   10843 	if (sc->sc_type == WM_T_PCH2)
   10844 		delay(100);
   10845 
   10846 	return 0;
   10847 }
   10848 
   10849 /*
   10850  * wm_gmii_i82544_readreg:	[mii interface function]
   10851  *
   10852  *	Read a PHY register on the GMII.
   10853  */
   10854 static int
   10855 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10856 {
   10857 	struct wm_softc *sc = device_private(dev);
   10858 	int rv;
   10859 
   10860 	if (sc->phy.acquire(sc)) {
   10861 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10862 		return -1;
   10863 	}
   10864 
   10865 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10866 
   10867 	sc->phy.release(sc);
   10868 
   10869 	return rv;
   10870 }
   10871 
   10872 static int
   10873 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10874 {
   10875 	struct wm_softc *sc = device_private(dev);
   10876 	int rv;
   10877 
   10878 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10879 		switch (sc->sc_phytype) {
   10880 		case WMPHY_IGP:
   10881 		case WMPHY_IGP_2:
   10882 		case WMPHY_IGP_3:
   10883 			rv = wm_gmii_mdic_writereg(dev, phy,
   10884 			    MII_IGPHY_PAGE_SELECT, reg);
   10885 			if (rv != 0)
   10886 				return rv;
   10887 			break;
   10888 		default:
   10889 #ifdef WM_DEBUG
   10890 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10891 			    __func__, sc->sc_phytype, reg);
   10892 #endif
   10893 			break;
   10894 		}
   10895 	}
   10896 
   10897 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10898 }
   10899 
   10900 /*
   10901  * wm_gmii_i82544_writereg:	[mii interface function]
   10902  *
   10903  *	Write a PHY register on the GMII.
   10904  */
   10905 static int
   10906 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10907 {
   10908 	struct wm_softc *sc = device_private(dev);
   10909 	int rv;
   10910 
   10911 	if (sc->phy.acquire(sc)) {
   10912 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10913 		return -1;
   10914 	}
   10915 
   10916 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10917 	sc->phy.release(sc);
   10918 
   10919 	return rv;
   10920 }
   10921 
   10922 static int
   10923 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10924 {
   10925 	struct wm_softc *sc = device_private(dev);
   10926 	int rv;
   10927 
   10928 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10929 		switch (sc->sc_phytype) {
   10930 		case WMPHY_IGP:
   10931 		case WMPHY_IGP_2:
   10932 		case WMPHY_IGP_3:
   10933 			rv = wm_gmii_mdic_writereg(dev, phy,
   10934 			    MII_IGPHY_PAGE_SELECT, reg);
   10935 			if (rv != 0)
   10936 				return rv;
   10937 			break;
   10938 		default:
   10939 #ifdef WM_DEBUG
   10940 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10941 			    __func__, sc->sc_phytype, reg);
   10942 #endif
   10943 			break;
   10944 		}
   10945 	}
   10946 
   10947 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10948 }
   10949 
   10950 /*
   10951  * wm_gmii_i80003_readreg:	[mii interface function]
   10952  *
   10953  *	Read a PHY register on the kumeran
   10954  * This could be handled by the PHY layer if we didn't have to lock the
   10955  * ressource ...
   10956  */
   10957 static int
   10958 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10959 {
   10960 	struct wm_softc *sc = device_private(dev);
   10961 	int page_select;
   10962 	uint16_t temp, temp2;
   10963 	int rv = 0;
   10964 
   10965 	if (phy != 1) /* Only one PHY on kumeran bus */
   10966 		return -1;
   10967 
   10968 	if (sc->phy.acquire(sc)) {
   10969 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10970 		return -1;
   10971 	}
   10972 
   10973 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10974 		page_select = GG82563_PHY_PAGE_SELECT;
   10975 	else {
   10976 		/*
   10977 		 * Use Alternative Page Select register to access registers
   10978 		 * 30 and 31.
   10979 		 */
   10980 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10981 	}
   10982 	temp = reg >> GG82563_PAGE_SHIFT;
   10983 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10984 		goto out;
   10985 
   10986 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10987 		/*
   10988 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10989 		 * register.
   10990 		 */
   10991 		delay(200);
   10992 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10993 		if ((rv != 0) || (temp2 != temp)) {
   10994 			device_printf(dev, "%s failed\n", __func__);
   10995 			rv = -1;
   10996 			goto out;
   10997 		}
   10998 		delay(200);
   10999 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11000 		delay(200);
   11001 	} else
   11002 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11003 
   11004 out:
   11005 	sc->phy.release(sc);
   11006 	return rv;
   11007 }
   11008 
   11009 /*
   11010  * wm_gmii_i80003_writereg:	[mii interface function]
   11011  *
   11012  *	Write a PHY register on the kumeran.
   11013  * This could be handled by the PHY layer if we didn't have to lock the
   11014  * ressource ...
   11015  */
   11016 static int
   11017 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11018 {
   11019 	struct wm_softc *sc = device_private(dev);
   11020 	int page_select, rv;
   11021 	uint16_t temp, temp2;
   11022 
   11023 	if (phy != 1) /* Only one PHY on kumeran bus */
   11024 		return -1;
   11025 
   11026 	if (sc->phy.acquire(sc)) {
   11027 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11028 		return -1;
   11029 	}
   11030 
   11031 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11032 		page_select = GG82563_PHY_PAGE_SELECT;
   11033 	else {
   11034 		/*
   11035 		 * Use Alternative Page Select register to access registers
   11036 		 * 30 and 31.
   11037 		 */
   11038 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11039 	}
   11040 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11041 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11042 		goto out;
   11043 
   11044 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11045 		/*
   11046 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11047 		 * register.
   11048 		 */
   11049 		delay(200);
   11050 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11051 		if ((rv != 0) || (temp2 != temp)) {
   11052 			device_printf(dev, "%s failed\n", __func__);
   11053 			rv = -1;
   11054 			goto out;
   11055 		}
   11056 		delay(200);
   11057 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11058 		delay(200);
   11059 	} else
   11060 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11061 
   11062 out:
   11063 	sc->phy.release(sc);
   11064 	return rv;
   11065 }
   11066 
   11067 /*
   11068  * wm_gmii_bm_readreg:	[mii interface function]
   11069  *
   11070  *	Read a PHY register on the kumeran
   11071  * This could be handled by the PHY layer if we didn't have to lock the
   11072  * ressource ...
   11073  */
   11074 static int
   11075 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11076 {
   11077 	struct wm_softc *sc = device_private(dev);
   11078 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11079 	int rv;
   11080 
   11081 	if (sc->phy.acquire(sc)) {
   11082 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11083 		return -1;
   11084 	}
   11085 
   11086 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11087 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11088 		    || (reg == 31)) ? 1 : phy;
   11089 	/* Page 800 works differently than the rest so it has its own func */
   11090 	if (page == BM_WUC_PAGE) {
   11091 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11092 		goto release;
   11093 	}
   11094 
   11095 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11096 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11097 		    && (sc->sc_type != WM_T_82583))
   11098 			rv = wm_gmii_mdic_writereg(dev, phy,
   11099 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11100 		else
   11101 			rv = wm_gmii_mdic_writereg(dev, phy,
   11102 			    BME1000_PHY_PAGE_SELECT, page);
   11103 		if (rv != 0)
   11104 			goto release;
   11105 	}
   11106 
   11107 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11108 
   11109 release:
   11110 	sc->phy.release(sc);
   11111 	return rv;
   11112 }
   11113 
   11114 /*
   11115  * wm_gmii_bm_writereg:	[mii interface function]
   11116  *
   11117  *	Write a PHY register on the kumeran.
   11118  * This could be handled by the PHY layer if we didn't have to lock the
   11119  * ressource ...
   11120  */
   11121 static int
   11122 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11123 {
   11124 	struct wm_softc *sc = device_private(dev);
   11125 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11126 	int rv;
   11127 
   11128 	if (sc->phy.acquire(sc)) {
   11129 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11130 		return -1;
   11131 	}
   11132 
   11133 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11134 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11135 		    || (reg == 31)) ? 1 : phy;
   11136 	/* Page 800 works differently than the rest so it has its own func */
   11137 	if (page == BM_WUC_PAGE) {
   11138 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11139 		goto release;
   11140 	}
   11141 
   11142 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11143 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11144 		    && (sc->sc_type != WM_T_82583))
   11145 			rv = wm_gmii_mdic_writereg(dev, phy,
   11146 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11147 		else
   11148 			rv = wm_gmii_mdic_writereg(dev, phy,
   11149 			    BME1000_PHY_PAGE_SELECT, page);
   11150 		if (rv != 0)
   11151 			goto release;
   11152 	}
   11153 
   11154 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11155 
   11156 release:
   11157 	sc->phy.release(sc);
   11158 	return rv;
   11159 }
   11160 
   11161 /*
   11162  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11163  *  @dev: pointer to the HW structure
   11164  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11165  *
   11166  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11167  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11168  */
   11169 static int
   11170 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11171 {
   11172 	uint16_t temp;
   11173 	int rv;
   11174 
   11175 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11176 		device_xname(dev), __func__));
   11177 
   11178 	if (!phy_regp)
   11179 		return -1;
   11180 
   11181 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11182 
   11183 	/* Select Port Control Registers page */
   11184 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11185 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11186 	if (rv != 0)
   11187 		return rv;
   11188 
   11189 	/* Read WUCE and save it */
   11190 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11191 	if (rv != 0)
   11192 		return rv;
   11193 
   11194 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11195 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11196 	 */
   11197 	temp = *phy_regp;
   11198 	temp |= BM_WUC_ENABLE_BIT;
   11199 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11200 
   11201 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11202 		return rv;
   11203 
   11204 	/* Select Host Wakeup Registers page - caller now able to write
   11205 	 * registers on the Wakeup registers page
   11206 	 */
   11207 	return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11208 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11209 }
   11210 
   11211 /*
   11212  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11213  *  @dev: pointer to the HW structure
   11214  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11215  *
   11216  *  Restore BM_WUC_ENABLE_REG to its original value.
   11217  *
   11218  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11219  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11220  *  caller.
   11221  */
   11222 static int
   11223 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11224 {
   11225 
   11226 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11227 		device_xname(dev), __func__));
   11228 
   11229 	if (!phy_regp)
   11230 		return -1;
   11231 
   11232 	/* Select Port Control Registers page */
   11233 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11234 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11235 
   11236 	/* Restore 769.17 to its original value */
   11237 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11238 
   11239 	return 0;
   11240 }
   11241 
   11242 /*
   11243  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11244  *  @sc: pointer to the HW structure
   11245  *  @offset: register offset to be read or written
   11246  *  @val: pointer to the data to read or write
   11247  *  @rd: determines if operation is read or write
   11248  *  @page_set: BM_WUC_PAGE already set and access enabled
   11249  *
   11250  *  Read the PHY register at offset and store the retrieved information in
   11251  *  data, or write data to PHY register at offset.  Note the procedure to
   11252  *  access the PHY wakeup registers is different than reading the other PHY
   11253  *  registers. It works as such:
   11254  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11255  *  2) Set page to 800 for host (801 if we were manageability)
   11256  *  3) Write the address using the address opcode (0x11)
   11257  *  4) Read or write the data using the data opcode (0x12)
   11258  *  5) Restore 769.17.2 to its original value
   11259  *
   11260  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11261  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11262  *
   11263  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11264  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11265  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11266  */
   11267 static int
   11268 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11269 	bool page_set)
   11270 {
   11271 	struct wm_softc *sc = device_private(dev);
   11272 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11273 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11274 	uint16_t wuce;
   11275 	int rv = 0;
   11276 
   11277 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11278 		device_xname(dev), __func__));
   11279 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11280 	if ((sc->sc_type == WM_T_PCH)
   11281 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11282 		device_printf(dev,
   11283 		    "Attempting to access page %d while gig enabled.\n", page);
   11284 	}
   11285 
   11286 	if (!page_set) {
   11287 		/* Enable access to PHY wakeup registers */
   11288 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11289 		if (rv != 0) {
   11290 			device_printf(dev,
   11291 			    "%s: Could not enable PHY wakeup reg access\n",
   11292 			    __func__);
   11293 			return rv;
   11294 		}
   11295 	}
   11296 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11297 		device_xname(sc->sc_dev), __func__, page, regnum));
   11298 
   11299 	/*
   11300 	 * 2) Access PHY wakeup register.
   11301 	 * See wm_access_phy_wakeup_reg_bm.
   11302 	 */
   11303 
   11304 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11305 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11306 	if (rv != 0)
   11307 		return rv;
   11308 
   11309 	if (rd) {
   11310 		/* Read the Wakeup register page value using opcode 0x12 */
   11311 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11312 	} else {
   11313 		/* Write the Wakeup register page value using opcode 0x12 */
   11314 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11315 	}
   11316 	if (rv != 0)
   11317 		return rv;
   11318 
   11319 	if (!page_set)
   11320 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11321 
   11322 	return rv;
   11323 }
   11324 
   11325 /*
   11326  * wm_gmii_hv_readreg:	[mii interface function]
   11327  *
   11328  *	Read a PHY register on the kumeran
   11329  * This could be handled by the PHY layer if we didn't have to lock the
   11330  * ressource ...
   11331  */
   11332 static int
   11333 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11334 {
   11335 	struct wm_softc *sc = device_private(dev);
   11336 	int rv;
   11337 
   11338 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11339 		device_xname(dev), __func__));
   11340 	if (sc->phy.acquire(sc)) {
   11341 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11342 		return -1;
   11343 	}
   11344 
   11345 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11346 	sc->phy.release(sc);
   11347 	return rv;
   11348 }
   11349 
   11350 static int
   11351 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11352 {
   11353 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11354 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11355 	int rv;
   11356 
   11357 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11358 
   11359 	/* Page 800 works differently than the rest so it has its own func */
   11360 	if (page == BM_WUC_PAGE)
   11361 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11362 
   11363 	/*
   11364 	 * Lower than page 768 works differently than the rest so it has its
   11365 	 * own func
   11366 	 */
   11367 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11368 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11369 		return -1;
   11370 	}
   11371 
   11372 	/*
   11373 	 * XXX I21[789] documents say that the SMBus Address register is at
   11374 	 * PHY address 01, Page 0 (not 768), Register 26.
   11375 	 */
   11376 	if (page == HV_INTC_FC_PAGE_START)
   11377 		page = 0;
   11378 
   11379 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11380 		rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11381 		    page << BME1000_PAGE_SHIFT);
   11382 		if (rv != 0)
   11383 			return rv;
   11384 	}
   11385 
   11386 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11387 }
   11388 
   11389 /*
   11390  * wm_gmii_hv_writereg:	[mii interface function]
   11391  *
   11392  *	Write a PHY register on the kumeran.
   11393  * This could be handled by the PHY layer if we didn't have to lock the
   11394  * ressource ...
   11395  */
   11396 static int
   11397 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11398 {
   11399 	struct wm_softc *sc = device_private(dev);
   11400 	int rv;
   11401 
   11402 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11403 		device_xname(dev), __func__));
   11404 
   11405 	if (sc->phy.acquire(sc)) {
   11406 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11407 		return -1;
   11408 	}
   11409 
   11410 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11411 	sc->phy.release(sc);
   11412 
   11413 	return rv;
   11414 }
   11415 
   11416 static int
   11417 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11418 {
   11419 	struct wm_softc *sc = device_private(dev);
   11420 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11421 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11422 	int rv;
   11423 
   11424 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11425 
   11426 	/* Page 800 works differently than the rest so it has its own func */
   11427 	if (page == BM_WUC_PAGE)
   11428 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11429 		    false);
   11430 
   11431 	/*
   11432 	 * Lower than page 768 works differently than the rest so it has its
   11433 	 * own func
   11434 	 */
   11435 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11436 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11437 		return -1;
   11438 	}
   11439 
   11440 	{
   11441 		/*
   11442 		 * XXX I21[789] documents say that the SMBus Address register
   11443 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11444 		 */
   11445 		if (page == HV_INTC_FC_PAGE_START)
   11446 			page = 0;
   11447 
   11448 		/*
   11449 		 * XXX Workaround MDIO accesses being disabled after entering
   11450 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11451 		 * register is set)
   11452 		 */
   11453 		if (sc->sc_phytype == WMPHY_82578) {
   11454 			struct mii_softc *child;
   11455 
   11456 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11457 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11458 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11459 			    && ((val & (1 << 11)) != 0)) {
   11460 				device_printf(dev, "XXX need workaround\n");
   11461 			}
   11462 		}
   11463 
   11464 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11465 			rv = wm_gmii_mdic_writereg(dev, 1,
   11466 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11467 			if (rv != 0)
   11468 				return rv;
   11469 		}
   11470 	}
   11471 
   11472 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11473 }
   11474 
   11475 /*
   11476  * wm_gmii_82580_readreg:	[mii interface function]
   11477  *
   11478  *	Read a PHY register on the 82580 and I350.
   11479  * This could be handled by the PHY layer if we didn't have to lock the
   11480  * ressource ...
   11481  */
   11482 static int
   11483 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11484 {
   11485 	struct wm_softc *sc = device_private(dev);
   11486 	int rv;
   11487 
   11488 	if (sc->phy.acquire(sc) != 0) {
   11489 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11490 		return -1;
   11491 	}
   11492 
   11493 #ifdef DIAGNOSTIC
   11494 	if (reg > MII_ADDRMASK) {
   11495 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11496 		    __func__, sc->sc_phytype, reg);
   11497 		reg &= MII_ADDRMASK;
   11498 	}
   11499 #endif
   11500 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11501 
   11502 	sc->phy.release(sc);
   11503 	return rv;
   11504 }
   11505 
   11506 /*
   11507  * wm_gmii_82580_writereg:	[mii interface function]
   11508  *
   11509  *	Write a PHY register on the 82580 and I350.
   11510  * This could be handled by the PHY layer if we didn't have to lock the
   11511  * ressource ...
   11512  */
   11513 static int
   11514 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11515 {
   11516 	struct wm_softc *sc = device_private(dev);
   11517 	int rv;
   11518 
   11519 	if (sc->phy.acquire(sc) != 0) {
   11520 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11521 		return -1;
   11522 	}
   11523 
   11524 #ifdef DIAGNOSTIC
   11525 	if (reg > MII_ADDRMASK) {
   11526 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11527 		    __func__, sc->sc_phytype, reg);
   11528 		reg &= MII_ADDRMASK;
   11529 	}
   11530 #endif
   11531 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11532 
   11533 	sc->phy.release(sc);
   11534 	return rv;
   11535 }
   11536 
   11537 /*
   11538  * wm_gmii_gs40g_readreg:	[mii interface function]
   11539  *
   11540  *	Read a PHY register on the I2100 and I211.
   11541  * This could be handled by the PHY layer if we didn't have to lock the
   11542  * ressource ...
   11543  */
   11544 static int
   11545 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11546 {
   11547 	struct wm_softc *sc = device_private(dev);
   11548 	int page, offset;
   11549 	int rv;
   11550 
   11551 	/* Acquire semaphore */
   11552 	if (sc->phy.acquire(sc)) {
   11553 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11554 		return -1;
   11555 	}
   11556 
   11557 	/* Page select */
   11558 	page = reg >> GS40G_PAGE_SHIFT;
   11559 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11560 	if (rv != 0)
   11561 		goto release;
   11562 
   11563 	/* Read reg */
   11564 	offset = reg & GS40G_OFFSET_MASK;
   11565 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11566 
   11567 release:
   11568 	sc->phy.release(sc);
   11569 	return rv;
   11570 }
   11571 
   11572 /*
   11573  * wm_gmii_gs40g_writereg:	[mii interface function]
   11574  *
   11575  *	Write a PHY register on the I210 and I211.
   11576  * This could be handled by the PHY layer if we didn't have to lock the
   11577  * ressource ...
   11578  */
   11579 static int
   11580 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11581 {
   11582 	struct wm_softc *sc = device_private(dev);
   11583 	uint16_t page;
   11584 	int offset, rv;
   11585 
   11586 	/* Acquire semaphore */
   11587 	if (sc->phy.acquire(sc)) {
   11588 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11589 		return -1;
   11590 	}
   11591 
   11592 	/* Page select */
   11593 	page = reg >> GS40G_PAGE_SHIFT;
   11594 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11595 	if (rv != 0)
   11596 		goto release;
   11597 
   11598 	/* Write reg */
   11599 	offset = reg & GS40G_OFFSET_MASK;
   11600 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11601 
   11602 release:
   11603 	/* Release semaphore */
   11604 	sc->phy.release(sc);
   11605 	return rv;
   11606 }
   11607 
   11608 /*
   11609  * wm_gmii_statchg:	[mii interface function]
   11610  *
   11611  *	Callback from MII layer when media changes.
   11612  */
   11613 static void
   11614 wm_gmii_statchg(struct ifnet *ifp)
   11615 {
   11616 	struct wm_softc *sc = ifp->if_softc;
   11617 	struct mii_data *mii = &sc->sc_mii;
   11618 
   11619 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11620 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11621 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11622 
   11623 	/* Get flow control negotiation result. */
   11624 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11625 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11626 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11627 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11628 	}
   11629 
   11630 	if (sc->sc_flowflags & IFM_FLOW) {
   11631 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11632 			sc->sc_ctrl |= CTRL_TFCE;
   11633 			sc->sc_fcrtl |= FCRTL_XONE;
   11634 		}
   11635 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11636 			sc->sc_ctrl |= CTRL_RFCE;
   11637 	}
   11638 
   11639 	if (mii->mii_media_active & IFM_FDX) {
   11640 		DPRINTF(WM_DEBUG_LINK,
   11641 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11642 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11643 	} else {
   11644 		DPRINTF(WM_DEBUG_LINK,
   11645 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11646 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11647 	}
   11648 
   11649 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11650 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11651 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11652 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11653 	if (sc->sc_type == WM_T_80003) {
   11654 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   11655 		case IFM_1000_T:
   11656 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11657 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11658 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11659 			break;
   11660 		default:
   11661 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11662 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11663 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11664 			break;
   11665 		}
   11666 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11667 	}
   11668 }
   11669 
   11670 /* kumeran related (80003, ICH* and PCH*) */
   11671 
   11672 /*
   11673  * wm_kmrn_readreg:
   11674  *
   11675  *	Read a kumeran register
   11676  */
   11677 static int
   11678 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11679 {
   11680 	int rv;
   11681 
   11682 	if (sc->sc_type == WM_T_80003)
   11683 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11684 	else
   11685 		rv = sc->phy.acquire(sc);
   11686 	if (rv != 0) {
   11687 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11688 		    __func__);
   11689 		return rv;
   11690 	}
   11691 
   11692 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11693 
   11694 	if (sc->sc_type == WM_T_80003)
   11695 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11696 	else
   11697 		sc->phy.release(sc);
   11698 
   11699 	return rv;
   11700 }
   11701 
   11702 static int
   11703 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11704 {
   11705 
   11706 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11707 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11708 	    KUMCTRLSTA_REN);
   11709 	CSR_WRITE_FLUSH(sc);
   11710 	delay(2);
   11711 
   11712 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11713 
   11714 	return 0;
   11715 }
   11716 
   11717 /*
   11718  * wm_kmrn_writereg:
   11719  *
   11720  *	Write a kumeran register
   11721  */
   11722 static int
   11723 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11724 {
   11725 	int rv;
   11726 
   11727 	if (sc->sc_type == WM_T_80003)
   11728 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11729 	else
   11730 		rv = sc->phy.acquire(sc);
   11731 	if (rv != 0) {
   11732 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11733 		    __func__);
   11734 		return rv;
   11735 	}
   11736 
   11737 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11738 
   11739 	if (sc->sc_type == WM_T_80003)
   11740 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11741 	else
   11742 		sc->phy.release(sc);
   11743 
   11744 	return rv;
   11745 }
   11746 
   11747 static int
   11748 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11749 {
   11750 
   11751 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11752 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11753 
   11754 	return 0;
   11755 }
   11756 
   11757 /*
   11758  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11759  * This access method is different from IEEE MMD.
   11760  */
   11761 static int
   11762 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11763 {
   11764 	struct wm_softc *sc = device_private(dev);
   11765 	int rv;
   11766 
   11767 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11768 	if (rv != 0)
   11769 		return rv;
   11770 
   11771 	if (rd)
   11772 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11773 	else
   11774 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11775 	return rv;
   11776 }
   11777 
   11778 static int
   11779 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11780 {
   11781 
   11782 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11783 }
   11784 
   11785 static int
   11786 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11787 {
   11788 
   11789 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11790 }
   11791 
   11792 /* SGMII related */
   11793 
   11794 /*
   11795  * wm_sgmii_uses_mdio
   11796  *
   11797  * Check whether the transaction is to the internal PHY or the external
   11798  * MDIO interface. Return true if it's MDIO.
   11799  */
   11800 static bool
   11801 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11802 {
   11803 	uint32_t reg;
   11804 	bool ismdio = false;
   11805 
   11806 	switch (sc->sc_type) {
   11807 	case WM_T_82575:
   11808 	case WM_T_82576:
   11809 		reg = CSR_READ(sc, WMREG_MDIC);
   11810 		ismdio = ((reg & MDIC_DEST) != 0);
   11811 		break;
   11812 	case WM_T_82580:
   11813 	case WM_T_I350:
   11814 	case WM_T_I354:
   11815 	case WM_T_I210:
   11816 	case WM_T_I211:
   11817 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11818 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11819 		break;
   11820 	default:
   11821 		break;
   11822 	}
   11823 
   11824 	return ismdio;
   11825 }
   11826 
   11827 /*
   11828  * wm_sgmii_readreg:	[mii interface function]
   11829  *
   11830  *	Read a PHY register on the SGMII
   11831  * This could be handled by the PHY layer if we didn't have to lock the
   11832  * ressource ...
   11833  */
   11834 static int
   11835 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11836 {
   11837 	struct wm_softc *sc = device_private(dev);
   11838 	int rv;
   11839 
   11840 	if (sc->phy.acquire(sc)) {
   11841 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11842 		return -1;
   11843 	}
   11844 
   11845 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11846 
   11847 	sc->phy.release(sc);
   11848 	return rv;
   11849 }
   11850 
   11851 static int
   11852 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11853 {
   11854 	struct wm_softc *sc = device_private(dev);
   11855 	uint32_t i2ccmd;
   11856 	int i, rv = 0;
   11857 
   11858 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11859 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11860 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11861 
   11862 	/* Poll the ready bit */
   11863 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11864 		delay(50);
   11865 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11866 		if (i2ccmd & I2CCMD_READY)
   11867 			break;
   11868 	}
   11869 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11870 		device_printf(dev, "I2CCMD Read did not complete\n");
   11871 		rv = ETIMEDOUT;
   11872 	}
   11873 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11874 		if (!sc->phy.no_errprint)
   11875 			device_printf(dev, "I2CCMD Error bit set\n");
   11876 		rv = EIO;
   11877 	}
   11878 
   11879 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11880 
   11881 	return rv;
   11882 }
   11883 
   11884 /*
   11885  * wm_sgmii_writereg:	[mii interface function]
   11886  *
   11887  *	Write a PHY register on the SGMII.
   11888  * This could be handled by the PHY layer if we didn't have to lock the
   11889  * ressource ...
   11890  */
   11891 static int
   11892 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11893 {
   11894 	struct wm_softc *sc = device_private(dev);
   11895 	int rv;
   11896 
   11897 	if (sc->phy.acquire(sc) != 0) {
   11898 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11899 		return -1;
   11900 	}
   11901 
   11902 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11903 
   11904 	sc->phy.release(sc);
   11905 
   11906 	return rv;
   11907 }
   11908 
   11909 static int
   11910 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11911 {
   11912 	struct wm_softc *sc = device_private(dev);
   11913 	uint32_t i2ccmd;
   11914 	uint16_t swapdata;
   11915 	int rv = 0;
   11916 	int i;
   11917 
   11918 	/* Swap the data bytes for the I2C interface */
   11919 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11920 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11921 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11922 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11923 
   11924 	/* Poll the ready bit */
   11925 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11926 		delay(50);
   11927 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11928 		if (i2ccmd & I2CCMD_READY)
   11929 			break;
   11930 	}
   11931 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11932 		device_printf(dev, "I2CCMD Write did not complete\n");
   11933 		rv = ETIMEDOUT;
   11934 	}
   11935 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11936 		device_printf(dev, "I2CCMD Error bit set\n");
   11937 		rv = EIO;
   11938 	}
   11939 
   11940 	return rv;
   11941 }
   11942 
   11943 /* TBI related */
   11944 
   11945 static bool
   11946 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11947 {
   11948 	bool sig;
   11949 
   11950 	sig = ctrl & CTRL_SWDPIN(1);
   11951 
   11952 	/*
   11953 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11954 	 * detect a signal, 1 if they don't.
   11955 	 */
   11956 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11957 		sig = !sig;
   11958 
   11959 	return sig;
   11960 }
   11961 
   11962 /*
   11963  * wm_tbi_mediainit:
   11964  *
   11965  *	Initialize media for use on 1000BASE-X devices.
   11966  */
   11967 static void
   11968 wm_tbi_mediainit(struct wm_softc *sc)
   11969 {
   11970 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11971 	const char *sep = "";
   11972 
   11973 	if (sc->sc_type < WM_T_82543)
   11974 		sc->sc_tipg = TIPG_WM_DFLT;
   11975 	else
   11976 		sc->sc_tipg = TIPG_LG_DFLT;
   11977 
   11978 	sc->sc_tbi_serdes_anegticks = 5;
   11979 
   11980 	/* Initialize our media structures */
   11981 	sc->sc_mii.mii_ifp = ifp;
   11982 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11983 
   11984 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11985 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   11986 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   11987 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   11988 		    sc->sc_core_lock);
   11989 	} else {
   11990 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   11991 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   11992 	}
   11993 
   11994 	/*
   11995 	 * SWD Pins:
   11996 	 *
   11997 	 *	0 = Link LED (output)
   11998 	 *	1 = Loss Of Signal (input)
   11999 	 */
   12000 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12001 
   12002 	/* XXX Perhaps this is only for TBI */
   12003 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12004 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12005 
   12006 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12007 		sc->sc_ctrl &= ~CTRL_LRST;
   12008 
   12009 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12010 
   12011 #define	ADD(ss, mm, dd)							\
   12012 do {									\
   12013 	aprint_normal("%s%s", sep, ss);					\
   12014 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12015 	sep = ", ";							\
   12016 } while (/*CONSTCOND*/0)
   12017 
   12018 	aprint_normal_dev(sc->sc_dev, "");
   12019 
   12020 	if (sc->sc_type == WM_T_I354) {
   12021 		uint32_t status;
   12022 
   12023 		status = CSR_READ(sc, WMREG_STATUS);
   12024 		if (((status & STATUS_2P5_SKU) != 0)
   12025 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12026 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12027 		} else
   12028 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12029 	} else if (sc->sc_type == WM_T_82545) {
   12030 		/* Only 82545 is LX (XXX except SFP) */
   12031 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12032 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12033 	} else if (sc->sc_sfptype != 0) {
   12034 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12035 		switch (sc->sc_sfptype) {
   12036 		default:
   12037 		case SFF_SFP_ETH_FLAGS_1000SX:
   12038 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12039 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12040 			break;
   12041 		case SFF_SFP_ETH_FLAGS_1000LX:
   12042 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12043 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12044 			break;
   12045 		case SFF_SFP_ETH_FLAGS_1000CX:
   12046 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12047 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12048 			break;
   12049 		case SFF_SFP_ETH_FLAGS_1000T:
   12050 			ADD("1000baseT", IFM_1000_T, 0);
   12051 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12052 			break;
   12053 		case SFF_SFP_ETH_FLAGS_100FX:
   12054 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12055 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12056 			break;
   12057 		}
   12058 	} else {
   12059 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12060 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12061 	}
   12062 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12063 	aprint_normal("\n");
   12064 
   12065 #undef ADD
   12066 
   12067 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12068 }
   12069 
   12070 /*
   12071  * wm_tbi_mediachange:	[ifmedia interface function]
   12072  *
   12073  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12074  */
   12075 static int
   12076 wm_tbi_mediachange(struct ifnet *ifp)
   12077 {
   12078 	struct wm_softc *sc = ifp->if_softc;
   12079 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12080 	uint32_t status, ctrl;
   12081 	bool signal;
   12082 	int i;
   12083 
   12084 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12085 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12086 		/* XXX need some work for >= 82571 and < 82575 */
   12087 		if (sc->sc_type < WM_T_82575)
   12088 			return 0;
   12089 	}
   12090 
   12091 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12092 	    || (sc->sc_type >= WM_T_82575))
   12093 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12094 
   12095 	sc->sc_ctrl &= ~CTRL_LRST;
   12096 	sc->sc_txcw = TXCW_ANE;
   12097 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12098 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12099 	else if (ife->ifm_media & IFM_FDX)
   12100 		sc->sc_txcw |= TXCW_FD;
   12101 	else
   12102 		sc->sc_txcw |= TXCW_HD;
   12103 
   12104 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12105 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12106 
   12107 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12108 		device_xname(sc->sc_dev), sc->sc_txcw));
   12109 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12110 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12111 	CSR_WRITE_FLUSH(sc);
   12112 	delay(1000);
   12113 
   12114 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12115 	signal = wm_tbi_havesignal(sc, ctrl);
   12116 
   12117 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   12118 		signal));
   12119 
   12120 	if (signal) {
   12121 		/* Have signal; wait for the link to come up. */
   12122 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12123 			delay(10000);
   12124 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12125 				break;
   12126 		}
   12127 
   12128 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   12129 			device_xname(sc->sc_dev), i));
   12130 
   12131 		status = CSR_READ(sc, WMREG_STATUS);
   12132 		DPRINTF(WM_DEBUG_LINK,
   12133 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   12134 			device_xname(sc->sc_dev), status, STATUS_LU));
   12135 		if (status & STATUS_LU) {
   12136 			/* Link is up. */
   12137 			DPRINTF(WM_DEBUG_LINK,
   12138 			    ("%s: LINK: set media -> link up %s\n",
   12139 				device_xname(sc->sc_dev),
   12140 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12141 
   12142 			/*
   12143 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12144 			 * so we should update sc->sc_ctrl
   12145 			 */
   12146 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12147 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12148 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12149 			if (status & STATUS_FD)
   12150 				sc->sc_tctl |=
   12151 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12152 			else
   12153 				sc->sc_tctl |=
   12154 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12155 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12156 				sc->sc_fcrtl |= FCRTL_XONE;
   12157 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12158 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12159 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12160 			sc->sc_tbi_linkup = 1;
   12161 		} else {
   12162 			if (i == WM_LINKUP_TIMEOUT)
   12163 				wm_check_for_link(sc);
   12164 			/* Link is down. */
   12165 			DPRINTF(WM_DEBUG_LINK,
   12166 			    ("%s: LINK: set media -> link down\n",
   12167 				device_xname(sc->sc_dev)));
   12168 			sc->sc_tbi_linkup = 0;
   12169 		}
   12170 	} else {
   12171 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   12172 			device_xname(sc->sc_dev)));
   12173 		sc->sc_tbi_linkup = 0;
   12174 	}
   12175 
   12176 	wm_tbi_serdes_set_linkled(sc);
   12177 
   12178 	return 0;
   12179 }
   12180 
   12181 /*
   12182  * wm_tbi_mediastatus:	[ifmedia interface function]
   12183  *
   12184  *	Get the current interface media status on a 1000BASE-X device.
   12185  */
   12186 static void
   12187 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12188 {
   12189 	struct wm_softc *sc = ifp->if_softc;
   12190 	uint32_t ctrl, status;
   12191 
   12192 	ifmr->ifm_status = IFM_AVALID;
   12193 	ifmr->ifm_active = IFM_ETHER;
   12194 
   12195 	status = CSR_READ(sc, WMREG_STATUS);
   12196 	if ((status & STATUS_LU) == 0) {
   12197 		ifmr->ifm_active |= IFM_NONE;
   12198 		return;
   12199 	}
   12200 
   12201 	ifmr->ifm_status |= IFM_ACTIVE;
   12202 	/* Only 82545 is LX */
   12203 	if (sc->sc_type == WM_T_82545)
   12204 		ifmr->ifm_active |= IFM_1000_LX;
   12205 	else
   12206 		ifmr->ifm_active |= IFM_1000_SX;
   12207 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   12208 		ifmr->ifm_active |= IFM_FDX;
   12209 	else
   12210 		ifmr->ifm_active |= IFM_HDX;
   12211 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12212 	if (ctrl & CTRL_RFCE)
   12213 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   12214 	if (ctrl & CTRL_TFCE)
   12215 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   12216 }
   12217 
   12218 /* XXX TBI only */
   12219 static int
   12220 wm_check_for_link(struct wm_softc *sc)
   12221 {
   12222 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12223 	uint32_t rxcw;
   12224 	uint32_t ctrl;
   12225 	uint32_t status;
   12226 	bool signal;
   12227 
   12228 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   12229 		device_xname(sc->sc_dev), __func__));
   12230 
   12231 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12232 		/* XXX need some work for >= 82571 */
   12233 		if (sc->sc_type >= WM_T_82571) {
   12234 			sc->sc_tbi_linkup = 1;
   12235 			return 0;
   12236 		}
   12237 	}
   12238 
   12239 	rxcw = CSR_READ(sc, WMREG_RXCW);
   12240 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12241 	status = CSR_READ(sc, WMREG_STATUS);
   12242 	signal = wm_tbi_havesignal(sc, ctrl);
   12243 
   12244 	DPRINTF(WM_DEBUG_LINK,
   12245 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   12246 		device_xname(sc->sc_dev), __func__, signal,
   12247 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   12248 
   12249 	/*
   12250 	 * SWDPIN   LU RXCW
   12251 	 *	0    0	  0
   12252 	 *	0    0	  1	(should not happen)
   12253 	 *	0    1	  0	(should not happen)
   12254 	 *	0    1	  1	(should not happen)
   12255 	 *	1    0	  0	Disable autonego and force linkup
   12256 	 *	1    0	  1	got /C/ but not linkup yet
   12257 	 *	1    1	  0	(linkup)
   12258 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12259 	 *
   12260 	 */
   12261 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12262 		DPRINTF(WM_DEBUG_LINK,
   12263 		    ("%s: %s: force linkup and fullduplex\n",
   12264 			device_xname(sc->sc_dev), __func__));
   12265 		sc->sc_tbi_linkup = 0;
   12266 		/* Disable auto-negotiation in the TXCW register */
   12267 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12268 
   12269 		/*
   12270 		 * Force link-up and also force full-duplex.
   12271 		 *
   12272 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12273 		 * so we should update sc->sc_ctrl
   12274 		 */
   12275 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12276 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12277 	} else if (((status & STATUS_LU) != 0)
   12278 	    && ((rxcw & RXCW_C) != 0)
   12279 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12280 		sc->sc_tbi_linkup = 1;
   12281 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12282 			device_xname(sc->sc_dev),
   12283 			__func__));
   12284 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12285 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12286 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12287 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   12288 			device_xname(sc->sc_dev), __func__));
   12289 	} else {
   12290 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12291 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12292 			status));
   12293 	}
   12294 
   12295 	return 0;
   12296 }
   12297 
   12298 /*
   12299  * wm_tbi_tick:
   12300  *
   12301  *	Check the link on TBI devices.
   12302  *	This function acts as mii_tick().
   12303  */
   12304 static void
   12305 wm_tbi_tick(struct wm_softc *sc)
   12306 {
   12307 	struct mii_data *mii = &sc->sc_mii;
   12308 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12309 	uint32_t status;
   12310 
   12311 	KASSERT(WM_CORE_LOCKED(sc));
   12312 
   12313 	status = CSR_READ(sc, WMREG_STATUS);
   12314 
   12315 	/* XXX is this needed? */
   12316 	(void)CSR_READ(sc, WMREG_RXCW);
   12317 	(void)CSR_READ(sc, WMREG_CTRL);
   12318 
   12319 	/* set link status */
   12320 	if ((status & STATUS_LU) == 0) {
   12321 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12322 			device_xname(sc->sc_dev)));
   12323 		sc->sc_tbi_linkup = 0;
   12324 	} else if (sc->sc_tbi_linkup == 0) {
   12325 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12326 			device_xname(sc->sc_dev),
   12327 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12328 		sc->sc_tbi_linkup = 1;
   12329 		sc->sc_tbi_serdes_ticks = 0;
   12330 	}
   12331 
   12332 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12333 		goto setled;
   12334 
   12335 	if ((status & STATUS_LU) == 0) {
   12336 		sc->sc_tbi_linkup = 0;
   12337 		/* If the timer expired, retry autonegotiation */
   12338 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12339 		    && (++sc->sc_tbi_serdes_ticks
   12340 			>= sc->sc_tbi_serdes_anegticks)) {
   12341 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12342 				device_xname(sc->sc_dev), __func__));
   12343 			sc->sc_tbi_serdes_ticks = 0;
   12344 			/*
   12345 			 * Reset the link, and let autonegotiation do
   12346 			 * its thing
   12347 			 */
   12348 			sc->sc_ctrl |= CTRL_LRST;
   12349 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12350 			CSR_WRITE_FLUSH(sc);
   12351 			delay(1000);
   12352 			sc->sc_ctrl &= ~CTRL_LRST;
   12353 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12354 			CSR_WRITE_FLUSH(sc);
   12355 			delay(1000);
   12356 			CSR_WRITE(sc, WMREG_TXCW,
   12357 			    sc->sc_txcw & ~TXCW_ANE);
   12358 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12359 		}
   12360 	}
   12361 
   12362 setled:
   12363 	wm_tbi_serdes_set_linkled(sc);
   12364 }
   12365 
   12366 /* SERDES related */
   12367 static void
   12368 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12369 {
   12370 	uint32_t reg;
   12371 
   12372 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12373 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12374 		return;
   12375 
   12376 	/* Enable PCS to turn on link */
   12377 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12378 	reg |= PCS_CFG_PCS_EN;
   12379 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12380 
   12381 	/* Power up the laser */
   12382 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12383 	reg &= ~CTRL_EXT_SWDPIN(3);
   12384 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12385 
   12386 	/* Flush the write to verify completion */
   12387 	CSR_WRITE_FLUSH(sc);
   12388 	delay(1000);
   12389 }
   12390 
   12391 static int
   12392 wm_serdes_mediachange(struct ifnet *ifp)
   12393 {
   12394 	struct wm_softc *sc = ifp->if_softc;
   12395 	bool pcs_autoneg = true; /* XXX */
   12396 	uint32_t ctrl_ext, pcs_lctl, reg;
   12397 
   12398 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12399 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12400 		return 0;
   12401 
   12402 	/* XXX Currently, this function is not called on 8257[12] */
   12403 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12404 	    || (sc->sc_type >= WM_T_82575))
   12405 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12406 
   12407 	/* Power on the sfp cage if present */
   12408 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12409 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12410 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   12411 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12412 
   12413 	sc->sc_ctrl |= CTRL_SLU;
   12414 
   12415 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   12416 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12417 
   12418 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12419 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12420 	case CTRL_EXT_LINK_MODE_SGMII:
   12421 		/* SGMII mode lets the phy handle forcing speed/duplex */
   12422 		pcs_autoneg = true;
   12423 		/* Autoneg time out should be disabled for SGMII mode */
   12424 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12425 		break;
   12426 	case CTRL_EXT_LINK_MODE_1000KX:
   12427 		pcs_autoneg = false;
   12428 		/* FALLTHROUGH */
   12429 	default:
   12430 		if ((sc->sc_type == WM_T_82575)
   12431 		    || (sc->sc_type == WM_T_82576)) {
   12432 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12433 				pcs_autoneg = false;
   12434 		}
   12435 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12436 		    | CTRL_FRCFDX;
   12437 
   12438 		/* Set speed of 1000/Full if speed/duplex is forced */
   12439 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12440 	}
   12441 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12442 
   12443 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   12444 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   12445 
   12446 	if (pcs_autoneg) {
   12447 		/* Set PCS register for autoneg */
   12448 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12449 
   12450 		/* Disable force flow control for autoneg */
   12451 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12452 
   12453 		/* Configure flow control advertisement for autoneg */
   12454 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12455 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12456 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12457 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12458 	} else
   12459 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12460 
   12461 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12462 
   12463 	return 0;
   12464 }
   12465 
   12466 static void
   12467 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12468 {
   12469 	struct wm_softc *sc = ifp->if_softc;
   12470 	struct mii_data *mii = &sc->sc_mii;
   12471 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12472 	uint32_t pcs_adv, pcs_lpab, reg;
   12473 
   12474 	ifmr->ifm_status = IFM_AVALID;
   12475 	ifmr->ifm_active = IFM_ETHER;
   12476 
   12477 	/* Check PCS */
   12478 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12479 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12480 		ifmr->ifm_active |= IFM_NONE;
   12481 		sc->sc_tbi_linkup = 0;
   12482 		goto setled;
   12483 	}
   12484 
   12485 	sc->sc_tbi_linkup = 1;
   12486 	ifmr->ifm_status |= IFM_ACTIVE;
   12487 	if (sc->sc_type == WM_T_I354) {
   12488 		uint32_t status;
   12489 
   12490 		status = CSR_READ(sc, WMREG_STATUS);
   12491 		if (((status & STATUS_2P5_SKU) != 0)
   12492 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12493 			ifmr->ifm_active |= IFM_2500_KX;
   12494 		} else
   12495 			ifmr->ifm_active |= IFM_1000_KX;
   12496 	} else {
   12497 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12498 		case PCS_LSTS_SPEED_10:
   12499 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12500 			break;
   12501 		case PCS_LSTS_SPEED_100:
   12502 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12503 			break;
   12504 		case PCS_LSTS_SPEED_1000:
   12505 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12506 			break;
   12507 		default:
   12508 			device_printf(sc->sc_dev, "Unknown speed\n");
   12509 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12510 			break;
   12511 		}
   12512 	}
   12513 	if ((reg & PCS_LSTS_FDX) != 0)
   12514 		ifmr->ifm_active |= IFM_FDX;
   12515 	else
   12516 		ifmr->ifm_active |= IFM_HDX;
   12517 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12518 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12519 		/* Check flow */
   12520 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12521 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12522 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12523 			goto setled;
   12524 		}
   12525 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12526 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12527 		DPRINTF(WM_DEBUG_LINK,
   12528 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12529 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12530 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12531 			mii->mii_media_active |= IFM_FLOW
   12532 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12533 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12534 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12535 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12536 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12537 			mii->mii_media_active |= IFM_FLOW
   12538 			    | IFM_ETH_TXPAUSE;
   12539 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12540 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12541 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12542 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12543 			mii->mii_media_active |= IFM_FLOW
   12544 			    | IFM_ETH_RXPAUSE;
   12545 		}
   12546 	}
   12547 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12548 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12549 setled:
   12550 	wm_tbi_serdes_set_linkled(sc);
   12551 }
   12552 
   12553 /*
   12554  * wm_serdes_tick:
   12555  *
   12556  *	Check the link on serdes devices.
   12557  */
   12558 static void
   12559 wm_serdes_tick(struct wm_softc *sc)
   12560 {
   12561 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12562 	struct mii_data *mii = &sc->sc_mii;
   12563 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12564 	uint32_t reg;
   12565 
   12566 	KASSERT(WM_CORE_LOCKED(sc));
   12567 
   12568 	mii->mii_media_status = IFM_AVALID;
   12569 	mii->mii_media_active = IFM_ETHER;
   12570 
   12571 	/* Check PCS */
   12572 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12573 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12574 		mii->mii_media_status |= IFM_ACTIVE;
   12575 		sc->sc_tbi_linkup = 1;
   12576 		sc->sc_tbi_serdes_ticks = 0;
   12577 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12578 		if ((reg & PCS_LSTS_FDX) != 0)
   12579 			mii->mii_media_active |= IFM_FDX;
   12580 		else
   12581 			mii->mii_media_active |= IFM_HDX;
   12582 	} else {
   12583 		mii->mii_media_status |= IFM_NONE;
   12584 		sc->sc_tbi_linkup = 0;
   12585 		/* If the timer expired, retry autonegotiation */
   12586 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12587 		    && (++sc->sc_tbi_serdes_ticks
   12588 			>= sc->sc_tbi_serdes_anegticks)) {
   12589 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12590 				device_xname(sc->sc_dev), __func__));
   12591 			sc->sc_tbi_serdes_ticks = 0;
   12592 			/* XXX */
   12593 			wm_serdes_mediachange(ifp);
   12594 		}
   12595 	}
   12596 
   12597 	wm_tbi_serdes_set_linkled(sc);
   12598 }
   12599 
   12600 /* SFP related */
   12601 
   12602 static int
   12603 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12604 {
   12605 	uint32_t i2ccmd;
   12606 	int i;
   12607 
   12608 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12609 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12610 
   12611 	/* Poll the ready bit */
   12612 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12613 		delay(50);
   12614 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12615 		if (i2ccmd & I2CCMD_READY)
   12616 			break;
   12617 	}
   12618 	if ((i2ccmd & I2CCMD_READY) == 0)
   12619 		return -1;
   12620 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12621 		return -1;
   12622 
   12623 	*data = i2ccmd & 0x00ff;
   12624 
   12625 	return 0;
   12626 }
   12627 
   12628 static uint32_t
   12629 wm_sfp_get_media_type(struct wm_softc *sc)
   12630 {
   12631 	uint32_t ctrl_ext;
   12632 	uint8_t val = 0;
   12633 	int timeout = 3;
   12634 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12635 	int rv = -1;
   12636 
   12637 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12638 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12639 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12640 	CSR_WRITE_FLUSH(sc);
   12641 
   12642 	/* Read SFP module data */
   12643 	while (timeout) {
   12644 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12645 		if (rv == 0)
   12646 			break;
   12647 		delay(100*1000); /* XXX too big */
   12648 		timeout--;
   12649 	}
   12650 	if (rv != 0)
   12651 		goto out;
   12652 
   12653 	switch (val) {
   12654 	case SFF_SFP_ID_SFF:
   12655 		aprint_normal_dev(sc->sc_dev,
   12656 		    "Module/Connector soldered to board\n");
   12657 		break;
   12658 	case SFF_SFP_ID_SFP:
   12659 		sc->sc_flags |= WM_F_SFP;
   12660 		break;
   12661 	case SFF_SFP_ID_UNKNOWN:
   12662 		goto out;
   12663 	default:
   12664 		break;
   12665 	}
   12666 
   12667 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12668 	if (rv != 0)
   12669 		goto out;
   12670 
   12671 	sc->sc_sfptype = val;
   12672 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12673 		mediatype = WM_MEDIATYPE_SERDES;
   12674 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12675 		sc->sc_flags |= WM_F_SGMII;
   12676 		mediatype = WM_MEDIATYPE_COPPER;
   12677 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12678 		sc->sc_flags |= WM_F_SGMII;
   12679 		mediatype = WM_MEDIATYPE_SERDES;
   12680 	} else {
   12681 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   12682 		    __func__, sc->sc_sfptype);
   12683 		sc->sc_sfptype = 0; /* XXX unknown */
   12684 	}
   12685 
   12686 out:
   12687 	/* Restore I2C interface setting */
   12688 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12689 
   12690 	return mediatype;
   12691 }
   12692 
   12693 /*
   12694  * NVM related.
   12695  * Microwire, SPI (w/wo EERD) and Flash.
   12696  */
   12697 
   12698 /* Both spi and uwire */
   12699 
   12700 /*
   12701  * wm_eeprom_sendbits:
   12702  *
   12703  *	Send a series of bits to the EEPROM.
   12704  */
   12705 static void
   12706 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12707 {
   12708 	uint32_t reg;
   12709 	int x;
   12710 
   12711 	reg = CSR_READ(sc, WMREG_EECD);
   12712 
   12713 	for (x = nbits; x > 0; x--) {
   12714 		if (bits & (1U << (x - 1)))
   12715 			reg |= EECD_DI;
   12716 		else
   12717 			reg &= ~EECD_DI;
   12718 		CSR_WRITE(sc, WMREG_EECD, reg);
   12719 		CSR_WRITE_FLUSH(sc);
   12720 		delay(2);
   12721 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12722 		CSR_WRITE_FLUSH(sc);
   12723 		delay(2);
   12724 		CSR_WRITE(sc, WMREG_EECD, reg);
   12725 		CSR_WRITE_FLUSH(sc);
   12726 		delay(2);
   12727 	}
   12728 }
   12729 
   12730 /*
   12731  * wm_eeprom_recvbits:
   12732  *
   12733  *	Receive a series of bits from the EEPROM.
   12734  */
   12735 static void
   12736 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12737 {
   12738 	uint32_t reg, val;
   12739 	int x;
   12740 
   12741 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12742 
   12743 	val = 0;
   12744 	for (x = nbits; x > 0; x--) {
   12745 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12746 		CSR_WRITE_FLUSH(sc);
   12747 		delay(2);
   12748 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12749 			val |= (1U << (x - 1));
   12750 		CSR_WRITE(sc, WMREG_EECD, reg);
   12751 		CSR_WRITE_FLUSH(sc);
   12752 		delay(2);
   12753 	}
   12754 	*valp = val;
   12755 }
   12756 
   12757 /* Microwire */
   12758 
   12759 /*
   12760  * wm_nvm_read_uwire:
   12761  *
   12762  *	Read a word from the EEPROM using the MicroWire protocol.
   12763  */
   12764 static int
   12765 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12766 {
   12767 	uint32_t reg, val;
   12768 	int i;
   12769 
   12770 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12771 		device_xname(sc->sc_dev), __func__));
   12772 
   12773 	if (sc->nvm.acquire(sc) != 0)
   12774 		return -1;
   12775 
   12776 	for (i = 0; i < wordcnt; i++) {
   12777 		/* Clear SK and DI. */
   12778 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12779 		CSR_WRITE(sc, WMREG_EECD, reg);
   12780 
   12781 		/*
   12782 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12783 		 * and Xen.
   12784 		 *
   12785 		 * We use this workaround only for 82540 because qemu's
   12786 		 * e1000 act as 82540.
   12787 		 */
   12788 		if (sc->sc_type == WM_T_82540) {
   12789 			reg |= EECD_SK;
   12790 			CSR_WRITE(sc, WMREG_EECD, reg);
   12791 			reg &= ~EECD_SK;
   12792 			CSR_WRITE(sc, WMREG_EECD, reg);
   12793 			CSR_WRITE_FLUSH(sc);
   12794 			delay(2);
   12795 		}
   12796 		/* XXX: end of workaround */
   12797 
   12798 		/* Set CHIP SELECT. */
   12799 		reg |= EECD_CS;
   12800 		CSR_WRITE(sc, WMREG_EECD, reg);
   12801 		CSR_WRITE_FLUSH(sc);
   12802 		delay(2);
   12803 
   12804 		/* Shift in the READ command. */
   12805 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12806 
   12807 		/* Shift in address. */
   12808 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12809 
   12810 		/* Shift out the data. */
   12811 		wm_eeprom_recvbits(sc, &val, 16);
   12812 		data[i] = val & 0xffff;
   12813 
   12814 		/* Clear CHIP SELECT. */
   12815 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12816 		CSR_WRITE(sc, WMREG_EECD, reg);
   12817 		CSR_WRITE_FLUSH(sc);
   12818 		delay(2);
   12819 	}
   12820 
   12821 	sc->nvm.release(sc);
   12822 	return 0;
   12823 }
   12824 
   12825 /* SPI */
   12826 
   12827 /*
   12828  * Set SPI and FLASH related information from the EECD register.
   12829  * For 82541 and 82547, the word size is taken from EEPROM.
   12830  */
   12831 static int
   12832 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12833 {
   12834 	int size;
   12835 	uint32_t reg;
   12836 	uint16_t data;
   12837 
   12838 	reg = CSR_READ(sc, WMREG_EECD);
   12839 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12840 
   12841 	/* Read the size of NVM from EECD by default */
   12842 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12843 	switch (sc->sc_type) {
   12844 	case WM_T_82541:
   12845 	case WM_T_82541_2:
   12846 	case WM_T_82547:
   12847 	case WM_T_82547_2:
   12848 		/* Set dummy value to access EEPROM */
   12849 		sc->sc_nvm_wordsize = 64;
   12850 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12851 			aprint_error_dev(sc->sc_dev,
   12852 			    "%s: failed to read EEPROM size\n", __func__);
   12853 		}
   12854 		reg = data;
   12855 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12856 		if (size == 0)
   12857 			size = 6; /* 64 word size */
   12858 		else
   12859 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12860 		break;
   12861 	case WM_T_80003:
   12862 	case WM_T_82571:
   12863 	case WM_T_82572:
   12864 	case WM_T_82573: /* SPI case */
   12865 	case WM_T_82574: /* SPI case */
   12866 	case WM_T_82583: /* SPI case */
   12867 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12868 		if (size > 14)
   12869 			size = 14;
   12870 		break;
   12871 	case WM_T_82575:
   12872 	case WM_T_82576:
   12873 	case WM_T_82580:
   12874 	case WM_T_I350:
   12875 	case WM_T_I354:
   12876 	case WM_T_I210:
   12877 	case WM_T_I211:
   12878 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12879 		if (size > 15)
   12880 			size = 15;
   12881 		break;
   12882 	default:
   12883 		aprint_error_dev(sc->sc_dev,
   12884 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12885 		return -1;
   12886 		break;
   12887 	}
   12888 
   12889 	sc->sc_nvm_wordsize = 1 << size;
   12890 
   12891 	return 0;
   12892 }
   12893 
   12894 /*
   12895  * wm_nvm_ready_spi:
   12896  *
   12897  *	Wait for a SPI EEPROM to be ready for commands.
   12898  */
   12899 static int
   12900 wm_nvm_ready_spi(struct wm_softc *sc)
   12901 {
   12902 	uint32_t val;
   12903 	int usec;
   12904 
   12905 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12906 		device_xname(sc->sc_dev), __func__));
   12907 
   12908 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12909 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12910 		wm_eeprom_recvbits(sc, &val, 8);
   12911 		if ((val & SPI_SR_RDY) == 0)
   12912 			break;
   12913 	}
   12914 	if (usec >= SPI_MAX_RETRIES) {
   12915 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12916 		return -1;
   12917 	}
   12918 	return 0;
   12919 }
   12920 
   12921 /*
   12922  * wm_nvm_read_spi:
   12923  *
   12924  *	Read a work from the EEPROM using the SPI protocol.
   12925  */
   12926 static int
   12927 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12928 {
   12929 	uint32_t reg, val;
   12930 	int i;
   12931 	uint8_t opc;
   12932 	int rv = 0;
   12933 
   12934 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12935 		device_xname(sc->sc_dev), __func__));
   12936 
   12937 	if (sc->nvm.acquire(sc) != 0)
   12938 		return -1;
   12939 
   12940 	/* Clear SK and CS. */
   12941 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12942 	CSR_WRITE(sc, WMREG_EECD, reg);
   12943 	CSR_WRITE_FLUSH(sc);
   12944 	delay(2);
   12945 
   12946 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12947 		goto out;
   12948 
   12949 	/* Toggle CS to flush commands. */
   12950 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12951 	CSR_WRITE_FLUSH(sc);
   12952 	delay(2);
   12953 	CSR_WRITE(sc, WMREG_EECD, reg);
   12954 	CSR_WRITE_FLUSH(sc);
   12955 	delay(2);
   12956 
   12957 	opc = SPI_OPC_READ;
   12958 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12959 		opc |= SPI_OPC_A8;
   12960 
   12961 	wm_eeprom_sendbits(sc, opc, 8);
   12962 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12963 
   12964 	for (i = 0; i < wordcnt; i++) {
   12965 		wm_eeprom_recvbits(sc, &val, 16);
   12966 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12967 	}
   12968 
   12969 	/* Raise CS and clear SK. */
   12970 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12971 	CSR_WRITE(sc, WMREG_EECD, reg);
   12972 	CSR_WRITE_FLUSH(sc);
   12973 	delay(2);
   12974 
   12975 out:
   12976 	sc->nvm.release(sc);
   12977 	return rv;
   12978 }
   12979 
   12980 /* Using with EERD */
   12981 
   12982 static int
   12983 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12984 {
   12985 	uint32_t attempts = 100000;
   12986 	uint32_t i, reg = 0;
   12987 	int32_t done = -1;
   12988 
   12989 	for (i = 0; i < attempts; i++) {
   12990 		reg = CSR_READ(sc, rw);
   12991 
   12992 		if (reg & EERD_DONE) {
   12993 			done = 0;
   12994 			break;
   12995 		}
   12996 		delay(5);
   12997 	}
   12998 
   12999 	return done;
   13000 }
   13001 
   13002 static int
   13003 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13004 {
   13005 	int i, eerd = 0;
   13006 	int rv = 0;
   13007 
   13008 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13009 		device_xname(sc->sc_dev), __func__));
   13010 
   13011 	if (sc->nvm.acquire(sc) != 0)
   13012 		return -1;
   13013 
   13014 	for (i = 0; i < wordcnt; i++) {
   13015 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13016 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13017 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13018 		if (rv != 0) {
   13019 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13020 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13021 			break;
   13022 		}
   13023 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13024 	}
   13025 
   13026 	sc->nvm.release(sc);
   13027 	return rv;
   13028 }
   13029 
   13030 /* Flash */
   13031 
   13032 static int
   13033 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13034 {
   13035 	uint32_t eecd;
   13036 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13037 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13038 	uint32_t nvm_dword = 0;
   13039 	uint8_t sig_byte = 0;
   13040 	int rv;
   13041 
   13042 	switch (sc->sc_type) {
   13043 	case WM_T_PCH_SPT:
   13044 	case WM_T_PCH_CNP:
   13045 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13046 		act_offset = ICH_NVM_SIG_WORD * 2;
   13047 
   13048 		/* Set bank to 0 in case flash read fails. */
   13049 		*bank = 0;
   13050 
   13051 		/* Check bank 0 */
   13052 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13053 		if (rv != 0)
   13054 			return rv;
   13055 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13056 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13057 			*bank = 0;
   13058 			return 0;
   13059 		}
   13060 
   13061 		/* Check bank 1 */
   13062 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13063 		    &nvm_dword);
   13064 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13065 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13066 			*bank = 1;
   13067 			return 0;
   13068 		}
   13069 		aprint_error_dev(sc->sc_dev,
   13070 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13071 		return -1;
   13072 	case WM_T_ICH8:
   13073 	case WM_T_ICH9:
   13074 		eecd = CSR_READ(sc, WMREG_EECD);
   13075 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13076 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13077 			return 0;
   13078 		}
   13079 		/* FALLTHROUGH */
   13080 	default:
   13081 		/* Default to 0 */
   13082 		*bank = 0;
   13083 
   13084 		/* Check bank 0 */
   13085 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13086 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13087 			*bank = 0;
   13088 			return 0;
   13089 		}
   13090 
   13091 		/* Check bank 1 */
   13092 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13093 		    &sig_byte);
   13094 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13095 			*bank = 1;
   13096 			return 0;
   13097 		}
   13098 	}
   13099 
   13100 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13101 		device_xname(sc->sc_dev)));
   13102 	return -1;
   13103 }
   13104 
   13105 /******************************************************************************
   13106  * This function does initial flash setup so that a new read/write/erase cycle
   13107  * can be started.
   13108  *
   13109  * sc - The pointer to the hw structure
   13110  ****************************************************************************/
   13111 static int32_t
   13112 wm_ich8_cycle_init(struct wm_softc *sc)
   13113 {
   13114 	uint16_t hsfsts;
   13115 	int32_t error = 1;
   13116 	int32_t i     = 0;
   13117 
   13118 	if (sc->sc_type >= WM_T_PCH_SPT)
   13119 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13120 	else
   13121 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13122 
   13123 	/* May be check the Flash Des Valid bit in Hw status */
   13124 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13125 		return error;
   13126 
   13127 	/* Clear FCERR in Hw status by writing 1 */
   13128 	/* Clear DAEL in Hw status by writing a 1 */
   13129 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13130 
   13131 	if (sc->sc_type >= WM_T_PCH_SPT)
   13132 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13133 	else
   13134 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13135 
   13136 	/*
   13137 	 * Either we should have a hardware SPI cycle in progress bit to check
   13138 	 * against, in order to start a new cycle or FDONE bit should be
   13139 	 * changed in the hardware so that it is 1 after hardware reset, which
   13140 	 * can then be used as an indication whether a cycle is in progress or
   13141 	 * has been completed .. we should also have some software semaphore
   13142 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13143 	 * threads access to those bits can be sequentiallized or a way so that
   13144 	 * 2 threads don't start the cycle at the same time
   13145 	 */
   13146 
   13147 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13148 		/*
   13149 		 * There is no cycle running at present, so we can start a
   13150 		 * cycle
   13151 		 */
   13152 
   13153 		/* Begin by setting Flash Cycle Done. */
   13154 		hsfsts |= HSFSTS_DONE;
   13155 		if (sc->sc_type >= WM_T_PCH_SPT)
   13156 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13157 			    hsfsts & 0xffffUL);
   13158 		else
   13159 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13160 		error = 0;
   13161 	} else {
   13162 		/*
   13163 		 * Otherwise poll for sometime so the current cycle has a
   13164 		 * chance to end before giving up.
   13165 		 */
   13166 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   13167 			if (sc->sc_type >= WM_T_PCH_SPT)
   13168 				hsfsts = ICH8_FLASH_READ32(sc,
   13169 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13170 			else
   13171 				hsfsts = ICH8_FLASH_READ16(sc,
   13172 				    ICH_FLASH_HSFSTS);
   13173 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13174 				error = 0;
   13175 				break;
   13176 			}
   13177 			delay(1);
   13178 		}
   13179 		if (error == 0) {
   13180 			/*
   13181 			 * Successful in waiting for previous cycle to timeout,
   13182 			 * now set the Flash Cycle Done.
   13183 			 */
   13184 			hsfsts |= HSFSTS_DONE;
   13185 			if (sc->sc_type >= WM_T_PCH_SPT)
   13186 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13187 				    hsfsts & 0xffffUL);
   13188 			else
   13189 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   13190 				    hsfsts);
   13191 		}
   13192 	}
   13193 	return error;
   13194 }
   13195 
   13196 /******************************************************************************
   13197  * This function starts a flash cycle and waits for its completion
   13198  *
   13199  * sc - The pointer to the hw structure
   13200  ****************************************************************************/
   13201 static int32_t
   13202 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   13203 {
   13204 	uint16_t hsflctl;
   13205 	uint16_t hsfsts;
   13206 	int32_t error = 1;
   13207 	uint32_t i = 0;
   13208 
   13209 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   13210 	if (sc->sc_type >= WM_T_PCH_SPT)
   13211 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   13212 	else
   13213 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13214 	hsflctl |= HSFCTL_GO;
   13215 	if (sc->sc_type >= WM_T_PCH_SPT)
   13216 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13217 		    (uint32_t)hsflctl << 16);
   13218 	else
   13219 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13220 
   13221 	/* Wait till FDONE bit is set to 1 */
   13222 	do {
   13223 		if (sc->sc_type >= WM_T_PCH_SPT)
   13224 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13225 			    & 0xffffUL;
   13226 		else
   13227 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13228 		if (hsfsts & HSFSTS_DONE)
   13229 			break;
   13230 		delay(1);
   13231 		i++;
   13232 	} while (i < timeout);
   13233 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   13234 		error = 0;
   13235 
   13236 	return error;
   13237 }
   13238 
   13239 /******************************************************************************
   13240  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   13241  *
   13242  * sc - The pointer to the hw structure
   13243  * index - The index of the byte or word to read.
   13244  * size - Size of data to read, 1=byte 2=word, 4=dword
   13245  * data - Pointer to the word to store the value read.
   13246  *****************************************************************************/
   13247 static int32_t
   13248 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   13249     uint32_t size, uint32_t *data)
   13250 {
   13251 	uint16_t hsfsts;
   13252 	uint16_t hsflctl;
   13253 	uint32_t flash_linear_address;
   13254 	uint32_t flash_data = 0;
   13255 	int32_t error = 1;
   13256 	int32_t count = 0;
   13257 
   13258 	if (size < 1  || size > 4 || data == 0x0 ||
   13259 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   13260 		return error;
   13261 
   13262 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   13263 	    sc->sc_ich8_flash_base;
   13264 
   13265 	do {
   13266 		delay(1);
   13267 		/* Steps */
   13268 		error = wm_ich8_cycle_init(sc);
   13269 		if (error)
   13270 			break;
   13271 
   13272 		if (sc->sc_type >= WM_T_PCH_SPT)
   13273 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13274 			    >> 16;
   13275 		else
   13276 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13277 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13278 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13279 		    & HSFCTL_BCOUNT_MASK;
   13280 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13281 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13282 			/*
   13283 			 * In SPT, This register is in Lan memory space, not
   13284 			 * flash. Therefore, only 32 bit access is supported.
   13285 			 */
   13286 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13287 			    (uint32_t)hsflctl << 16);
   13288 		} else
   13289 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13290 
   13291 		/*
   13292 		 * Write the last 24 bits of index into Flash Linear address
   13293 		 * field in Flash Address
   13294 		 */
   13295 		/* TODO: TBD maybe check the index against the size of flash */
   13296 
   13297 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13298 
   13299 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13300 
   13301 		/*
   13302 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13303 		 * the whole sequence a few more times, else read in (shift in)
   13304 		 * the Flash Data0, the order is least significant byte first
   13305 		 * msb to lsb
   13306 		 */
   13307 		if (error == 0) {
   13308 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13309 			if (size == 1)
   13310 				*data = (uint8_t)(flash_data & 0x000000FF);
   13311 			else if (size == 2)
   13312 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13313 			else if (size == 4)
   13314 				*data = (uint32_t)flash_data;
   13315 			break;
   13316 		} else {
   13317 			/*
   13318 			 * If we've gotten here, then things are probably
   13319 			 * completely hosed, but if the error condition is
   13320 			 * detected, it won't hurt to give it another try...
   13321 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13322 			 */
   13323 			if (sc->sc_type >= WM_T_PCH_SPT)
   13324 				hsfsts = ICH8_FLASH_READ32(sc,
   13325 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13326 			else
   13327 				hsfsts = ICH8_FLASH_READ16(sc,
   13328 				    ICH_FLASH_HSFSTS);
   13329 
   13330 			if (hsfsts & HSFSTS_ERR) {
   13331 				/* Repeat for some time before giving up. */
   13332 				continue;
   13333 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13334 				break;
   13335 		}
   13336 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13337 
   13338 	return error;
   13339 }
   13340 
   13341 /******************************************************************************
   13342  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13343  *
   13344  * sc - pointer to wm_hw structure
   13345  * index - The index of the byte to read.
   13346  * data - Pointer to a byte to store the value read.
   13347  *****************************************************************************/
   13348 static int32_t
   13349 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13350 {
   13351 	int32_t status;
   13352 	uint32_t word = 0;
   13353 
   13354 	status = wm_read_ich8_data(sc, index, 1, &word);
   13355 	if (status == 0)
   13356 		*data = (uint8_t)word;
   13357 	else
   13358 		*data = 0;
   13359 
   13360 	return status;
   13361 }
   13362 
   13363 /******************************************************************************
   13364  * Reads a word from the NVM using the ICH8 flash access registers.
   13365  *
   13366  * sc - pointer to wm_hw structure
   13367  * index - The starting byte index of the word to read.
   13368  * data - Pointer to a word to store the value read.
   13369  *****************************************************************************/
   13370 static int32_t
   13371 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13372 {
   13373 	int32_t status;
   13374 	uint32_t word = 0;
   13375 
   13376 	status = wm_read_ich8_data(sc, index, 2, &word);
   13377 	if (status == 0)
   13378 		*data = (uint16_t)word;
   13379 	else
   13380 		*data = 0;
   13381 
   13382 	return status;
   13383 }
   13384 
   13385 /******************************************************************************
   13386  * Reads a dword from the NVM using the ICH8 flash access registers.
   13387  *
   13388  * sc - pointer to wm_hw structure
   13389  * index - The starting byte index of the word to read.
   13390  * data - Pointer to a word to store the value read.
   13391  *****************************************************************************/
   13392 static int32_t
   13393 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13394 {
   13395 	int32_t status;
   13396 
   13397 	status = wm_read_ich8_data(sc, index, 4, data);
   13398 	return status;
   13399 }
   13400 
   13401 /******************************************************************************
   13402  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13403  * register.
   13404  *
   13405  * sc - Struct containing variables accessed by shared code
   13406  * offset - offset of word in the EEPROM to read
   13407  * data - word read from the EEPROM
   13408  * words - number of words to read
   13409  *****************************************************************************/
   13410 static int
   13411 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13412 {
   13413 	int32_t	 rv = 0;
   13414 	uint32_t flash_bank = 0;
   13415 	uint32_t act_offset = 0;
   13416 	uint32_t bank_offset = 0;
   13417 	uint16_t word = 0;
   13418 	uint16_t i = 0;
   13419 
   13420 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13421 		device_xname(sc->sc_dev), __func__));
   13422 
   13423 	if (sc->nvm.acquire(sc) != 0)
   13424 		return -1;
   13425 
   13426 	/*
   13427 	 * We need to know which is the valid flash bank.  In the event
   13428 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13429 	 * managing flash_bank. So it cannot be trusted and needs
   13430 	 * to be updated with each read.
   13431 	 */
   13432 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13433 	if (rv) {
   13434 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13435 			device_xname(sc->sc_dev)));
   13436 		flash_bank = 0;
   13437 	}
   13438 
   13439 	/*
   13440 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13441 	 * size
   13442 	 */
   13443 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13444 
   13445 	for (i = 0; i < words; i++) {
   13446 		/* The NVM part needs a byte offset, hence * 2 */
   13447 		act_offset = bank_offset + ((offset + i) * 2);
   13448 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13449 		if (rv) {
   13450 			aprint_error_dev(sc->sc_dev,
   13451 			    "%s: failed to read NVM\n", __func__);
   13452 			break;
   13453 		}
   13454 		data[i] = word;
   13455 	}
   13456 
   13457 	sc->nvm.release(sc);
   13458 	return rv;
   13459 }
   13460 
   13461 /******************************************************************************
   13462  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13463  * register.
   13464  *
   13465  * sc - Struct containing variables accessed by shared code
   13466  * offset - offset of word in the EEPROM to read
   13467  * data - word read from the EEPROM
   13468  * words - number of words to read
   13469  *****************************************************************************/
   13470 static int
   13471 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13472 {
   13473 	int32_t	 rv = 0;
   13474 	uint32_t flash_bank = 0;
   13475 	uint32_t act_offset = 0;
   13476 	uint32_t bank_offset = 0;
   13477 	uint32_t dword = 0;
   13478 	uint16_t i = 0;
   13479 
   13480 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13481 		device_xname(sc->sc_dev), __func__));
   13482 
   13483 	if (sc->nvm.acquire(sc) != 0)
   13484 		return -1;
   13485 
   13486 	/*
   13487 	 * We need to know which is the valid flash bank.  In the event
   13488 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13489 	 * managing flash_bank. So it cannot be trusted and needs
   13490 	 * to be updated with each read.
   13491 	 */
   13492 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13493 	if (rv) {
   13494 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13495 			device_xname(sc->sc_dev)));
   13496 		flash_bank = 0;
   13497 	}
   13498 
   13499 	/*
   13500 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13501 	 * size
   13502 	 */
   13503 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13504 
   13505 	for (i = 0; i < words; i++) {
   13506 		/* The NVM part needs a byte offset, hence * 2 */
   13507 		act_offset = bank_offset + ((offset + i) * 2);
   13508 		/* but we must read dword aligned, so mask ... */
   13509 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13510 		if (rv) {
   13511 			aprint_error_dev(sc->sc_dev,
   13512 			    "%s: failed to read NVM\n", __func__);
   13513 			break;
   13514 		}
   13515 		/* ... and pick out low or high word */
   13516 		if ((act_offset & 0x2) == 0)
   13517 			data[i] = (uint16_t)(dword & 0xFFFF);
   13518 		else
   13519 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13520 	}
   13521 
   13522 	sc->nvm.release(sc);
   13523 	return rv;
   13524 }
   13525 
   13526 /* iNVM */
   13527 
   13528 static int
   13529 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13530 {
   13531 	int32_t	 rv = 0;
   13532 	uint32_t invm_dword;
   13533 	uint16_t i;
   13534 	uint8_t record_type, word_address;
   13535 
   13536 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13537 		device_xname(sc->sc_dev), __func__));
   13538 
   13539 	for (i = 0; i < INVM_SIZE; i++) {
   13540 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13541 		/* Get record type */
   13542 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13543 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13544 			break;
   13545 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13546 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13547 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13548 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13549 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13550 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13551 			if (word_address == address) {
   13552 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13553 				rv = 0;
   13554 				break;
   13555 			}
   13556 		}
   13557 	}
   13558 
   13559 	return rv;
   13560 }
   13561 
   13562 static int
   13563 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13564 {
   13565 	int rv = 0;
   13566 	int i;
   13567 
   13568 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13569 		device_xname(sc->sc_dev), __func__));
   13570 
   13571 	if (sc->nvm.acquire(sc) != 0)
   13572 		return -1;
   13573 
   13574 	for (i = 0; i < words; i++) {
   13575 		switch (offset + i) {
   13576 		case NVM_OFF_MACADDR:
   13577 		case NVM_OFF_MACADDR1:
   13578 		case NVM_OFF_MACADDR2:
   13579 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13580 			if (rv != 0) {
   13581 				data[i] = 0xffff;
   13582 				rv = -1;
   13583 			}
   13584 			break;
   13585 		case NVM_OFF_CFG2:
   13586 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13587 			if (rv != 0) {
   13588 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13589 				rv = 0;
   13590 			}
   13591 			break;
   13592 		case NVM_OFF_CFG4:
   13593 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13594 			if (rv != 0) {
   13595 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13596 				rv = 0;
   13597 			}
   13598 			break;
   13599 		case NVM_OFF_LED_1_CFG:
   13600 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13601 			if (rv != 0) {
   13602 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13603 				rv = 0;
   13604 			}
   13605 			break;
   13606 		case NVM_OFF_LED_0_2_CFG:
   13607 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13608 			if (rv != 0) {
   13609 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13610 				rv = 0;
   13611 			}
   13612 			break;
   13613 		case NVM_OFF_ID_LED_SETTINGS:
   13614 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13615 			if (rv != 0) {
   13616 				*data = ID_LED_RESERVED_FFFF;
   13617 				rv = 0;
   13618 			}
   13619 			break;
   13620 		default:
   13621 			DPRINTF(WM_DEBUG_NVM,
   13622 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13623 			*data = NVM_RESERVED_WORD;
   13624 			break;
   13625 		}
   13626 	}
   13627 
   13628 	sc->nvm.release(sc);
   13629 	return rv;
   13630 }
   13631 
   13632 /* Lock, detecting NVM type, validate checksum, version and read */
   13633 
   13634 static int
   13635 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13636 {
   13637 	uint32_t eecd = 0;
   13638 
   13639 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13640 	    || sc->sc_type == WM_T_82583) {
   13641 		eecd = CSR_READ(sc, WMREG_EECD);
   13642 
   13643 		/* Isolate bits 15 & 16 */
   13644 		eecd = ((eecd >> 15) & 0x03);
   13645 
   13646 		/* If both bits are set, device is Flash type */
   13647 		if (eecd == 0x03)
   13648 			return 0;
   13649 	}
   13650 	return 1;
   13651 }
   13652 
   13653 static int
   13654 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13655 {
   13656 	uint32_t eec;
   13657 
   13658 	eec = CSR_READ(sc, WMREG_EEC);
   13659 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13660 		return 1;
   13661 
   13662 	return 0;
   13663 }
   13664 
   13665 /*
   13666  * wm_nvm_validate_checksum
   13667  *
   13668  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13669  */
   13670 static int
   13671 wm_nvm_validate_checksum(struct wm_softc *sc)
   13672 {
   13673 	uint16_t checksum;
   13674 	uint16_t eeprom_data;
   13675 #ifdef WM_DEBUG
   13676 	uint16_t csum_wordaddr, valid_checksum;
   13677 #endif
   13678 	int i;
   13679 
   13680 	checksum = 0;
   13681 
   13682 	/* Don't check for I211 */
   13683 	if (sc->sc_type == WM_T_I211)
   13684 		return 0;
   13685 
   13686 #ifdef WM_DEBUG
   13687 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13688 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13689 		csum_wordaddr = NVM_OFF_COMPAT;
   13690 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13691 	} else {
   13692 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13693 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13694 	}
   13695 
   13696 	/* Dump EEPROM image for debug */
   13697 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13698 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13699 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13700 		/* XXX PCH_SPT? */
   13701 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13702 		if ((eeprom_data & valid_checksum) == 0)
   13703 			DPRINTF(WM_DEBUG_NVM,
   13704 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13705 				device_xname(sc->sc_dev), eeprom_data,
   13706 				    valid_checksum));
   13707 	}
   13708 
   13709 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13710 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13711 		for (i = 0; i < NVM_SIZE; i++) {
   13712 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13713 				printf("XXXX ");
   13714 			else
   13715 				printf("%04hx ", eeprom_data);
   13716 			if (i % 8 == 7)
   13717 				printf("\n");
   13718 		}
   13719 	}
   13720 
   13721 #endif /* WM_DEBUG */
   13722 
   13723 	for (i = 0; i < NVM_SIZE; i++) {
   13724 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13725 			return 1;
   13726 		checksum += eeprom_data;
   13727 	}
   13728 
   13729 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13730 #ifdef WM_DEBUG
   13731 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13732 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13733 #endif
   13734 	}
   13735 
   13736 	return 0;
   13737 }
   13738 
   13739 static void
   13740 wm_nvm_version_invm(struct wm_softc *sc)
   13741 {
   13742 	uint32_t dword;
   13743 
   13744 	/*
   13745 	 * Linux's code to decode version is very strange, so we don't
   13746 	 * obey that algorithm and just use word 61 as the document.
   13747 	 * Perhaps it's not perfect though...
   13748 	 *
   13749 	 * Example:
   13750 	 *
   13751 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13752 	 */
   13753 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13754 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13755 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13756 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13757 }
   13758 
   13759 static void
   13760 wm_nvm_version(struct wm_softc *sc)
   13761 {
   13762 	uint16_t major, minor, build, patch;
   13763 	uint16_t uid0, uid1;
   13764 	uint16_t nvm_data;
   13765 	uint16_t off;
   13766 	bool check_version = false;
   13767 	bool check_optionrom = false;
   13768 	bool have_build = false;
   13769 	bool have_uid = true;
   13770 
   13771 	/*
   13772 	 * Version format:
   13773 	 *
   13774 	 * XYYZ
   13775 	 * X0YZ
   13776 	 * X0YY
   13777 	 *
   13778 	 * Example:
   13779 	 *
   13780 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13781 	 *	82571	0x50a6	5.10.6?
   13782 	 *	82572	0x506a	5.6.10?
   13783 	 *	82572EI	0x5069	5.6.9?
   13784 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13785 	 *		0x2013	2.1.3?
   13786 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   13787 	 * ICH8+82567	0x0040	0.4.0?
   13788 	 * ICH9+82566	0x1040	1.4.0?
   13789 	 *ICH10+82567	0x0043	0.4.3?
   13790 	 *  PCH+82577	0x00c1	0.12.1?
   13791 	 * PCH2+82579	0x00d3	0.13.3?
   13792 	 *		0x00d4	0.13.4?
   13793 	 *  LPT+I218	0x0023	0.2.3?
   13794 	 *  SPT+I219	0x0084	0.8.4?
   13795 	 *  CNP+I219	0x0054	0.5.4?
   13796 	 */
   13797 
   13798 	/*
   13799 	 * XXX
   13800 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13801 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13802 	 */
   13803 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13804 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13805 		have_uid = false;
   13806 
   13807 	switch (sc->sc_type) {
   13808 	case WM_T_82571:
   13809 	case WM_T_82572:
   13810 	case WM_T_82574:
   13811 	case WM_T_82583:
   13812 		check_version = true;
   13813 		check_optionrom = true;
   13814 		have_build = true;
   13815 		break;
   13816 	case WM_T_ICH8:
   13817 	case WM_T_ICH9:
   13818 	case WM_T_ICH10:
   13819 	case WM_T_PCH:
   13820 	case WM_T_PCH2:
   13821 	case WM_T_PCH_LPT:
   13822 	case WM_T_PCH_SPT:
   13823 	case WM_T_PCH_CNP:
   13824 		check_version = true;
   13825 		have_build = true;
   13826 		have_uid = false;
   13827 		break;
   13828 	case WM_T_82575:
   13829 	case WM_T_82576:
   13830 	case WM_T_82580:
   13831 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13832 			check_version = true;
   13833 		break;
   13834 	case WM_T_I211:
   13835 		wm_nvm_version_invm(sc);
   13836 		have_uid = false;
   13837 		goto printver;
   13838 	case WM_T_I210:
   13839 		if (!wm_nvm_flash_presence_i210(sc)) {
   13840 			wm_nvm_version_invm(sc);
   13841 			have_uid = false;
   13842 			goto printver;
   13843 		}
   13844 		/* FALLTHROUGH */
   13845 	case WM_T_I350:
   13846 	case WM_T_I354:
   13847 		check_version = true;
   13848 		check_optionrom = true;
   13849 		break;
   13850 	default:
   13851 		return;
   13852 	}
   13853 	if (check_version
   13854 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13855 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13856 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13857 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13858 			build = nvm_data & NVM_BUILD_MASK;
   13859 			have_build = true;
   13860 		} else
   13861 			minor = nvm_data & 0x00ff;
   13862 
   13863 		/* Decimal */
   13864 		minor = (minor / 16) * 10 + (minor % 16);
   13865 		sc->sc_nvm_ver_major = major;
   13866 		sc->sc_nvm_ver_minor = minor;
   13867 
   13868 printver:
   13869 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13870 		    sc->sc_nvm_ver_minor);
   13871 		if (have_build) {
   13872 			sc->sc_nvm_ver_build = build;
   13873 			aprint_verbose(".%d", build);
   13874 		}
   13875 	}
   13876 
   13877 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13878 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13879 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13880 		/* Option ROM Version */
   13881 		if ((off != 0x0000) && (off != 0xffff)) {
   13882 			int rv;
   13883 
   13884 			off += NVM_COMBO_VER_OFF;
   13885 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13886 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13887 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13888 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13889 				/* 16bits */
   13890 				major = uid0 >> 8;
   13891 				build = (uid0 << 8) | (uid1 >> 8);
   13892 				patch = uid1 & 0x00ff;
   13893 				aprint_verbose(", option ROM Version %d.%d.%d",
   13894 				    major, build, patch);
   13895 			}
   13896 		}
   13897 	}
   13898 
   13899 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13900 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13901 }
   13902 
   13903 /*
   13904  * wm_nvm_read:
   13905  *
   13906  *	Read data from the serial EEPROM.
   13907  */
   13908 static int
   13909 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13910 {
   13911 	int rv;
   13912 
   13913 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13914 		device_xname(sc->sc_dev), __func__));
   13915 
   13916 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13917 		return -1;
   13918 
   13919 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13920 
   13921 	return rv;
   13922 }
   13923 
   13924 /*
   13925  * Hardware semaphores.
   13926  * Very complexed...
   13927  */
   13928 
   13929 static int
   13930 wm_get_null(struct wm_softc *sc)
   13931 {
   13932 
   13933 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13934 		device_xname(sc->sc_dev), __func__));
   13935 	return 0;
   13936 }
   13937 
   13938 static void
   13939 wm_put_null(struct wm_softc *sc)
   13940 {
   13941 
   13942 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13943 		device_xname(sc->sc_dev), __func__));
   13944 	return;
   13945 }
   13946 
   13947 static int
   13948 wm_get_eecd(struct wm_softc *sc)
   13949 {
   13950 	uint32_t reg;
   13951 	int x;
   13952 
   13953 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13954 		device_xname(sc->sc_dev), __func__));
   13955 
   13956 	reg = CSR_READ(sc, WMREG_EECD);
   13957 
   13958 	/* Request EEPROM access. */
   13959 	reg |= EECD_EE_REQ;
   13960 	CSR_WRITE(sc, WMREG_EECD, reg);
   13961 
   13962 	/* ..and wait for it to be granted. */
   13963 	for (x = 0; x < 1000; x++) {
   13964 		reg = CSR_READ(sc, WMREG_EECD);
   13965 		if (reg & EECD_EE_GNT)
   13966 			break;
   13967 		delay(5);
   13968 	}
   13969 	if ((reg & EECD_EE_GNT) == 0) {
   13970 		aprint_error_dev(sc->sc_dev,
   13971 		    "could not acquire EEPROM GNT\n");
   13972 		reg &= ~EECD_EE_REQ;
   13973 		CSR_WRITE(sc, WMREG_EECD, reg);
   13974 		return -1;
   13975 	}
   13976 
   13977 	return 0;
   13978 }
   13979 
   13980 static void
   13981 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13982 {
   13983 
   13984 	*eecd |= EECD_SK;
   13985 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13986 	CSR_WRITE_FLUSH(sc);
   13987 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13988 		delay(1);
   13989 	else
   13990 		delay(50);
   13991 }
   13992 
   13993 static void
   13994 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13995 {
   13996 
   13997 	*eecd &= ~EECD_SK;
   13998 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13999 	CSR_WRITE_FLUSH(sc);
   14000 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14001 		delay(1);
   14002 	else
   14003 		delay(50);
   14004 }
   14005 
   14006 static void
   14007 wm_put_eecd(struct wm_softc *sc)
   14008 {
   14009 	uint32_t reg;
   14010 
   14011 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14012 		device_xname(sc->sc_dev), __func__));
   14013 
   14014 	/* Stop nvm */
   14015 	reg = CSR_READ(sc, WMREG_EECD);
   14016 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14017 		/* Pull CS high */
   14018 		reg |= EECD_CS;
   14019 		wm_nvm_eec_clock_lower(sc, &reg);
   14020 	} else {
   14021 		/* CS on Microwire is active-high */
   14022 		reg &= ~(EECD_CS | EECD_DI);
   14023 		CSR_WRITE(sc, WMREG_EECD, reg);
   14024 		wm_nvm_eec_clock_raise(sc, &reg);
   14025 		wm_nvm_eec_clock_lower(sc, &reg);
   14026 	}
   14027 
   14028 	reg = CSR_READ(sc, WMREG_EECD);
   14029 	reg &= ~EECD_EE_REQ;
   14030 	CSR_WRITE(sc, WMREG_EECD, reg);
   14031 
   14032 	return;
   14033 }
   14034 
   14035 /*
   14036  * Get hardware semaphore.
   14037  * Same as e1000_get_hw_semaphore_generic()
   14038  */
   14039 static int
   14040 wm_get_swsm_semaphore(struct wm_softc *sc)
   14041 {
   14042 	int32_t timeout;
   14043 	uint32_t swsm;
   14044 
   14045 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14046 		device_xname(sc->sc_dev), __func__));
   14047 	KASSERT(sc->sc_nvm_wordsize > 0);
   14048 
   14049 retry:
   14050 	/* Get the SW semaphore. */
   14051 	timeout = sc->sc_nvm_wordsize + 1;
   14052 	while (timeout) {
   14053 		swsm = CSR_READ(sc, WMREG_SWSM);
   14054 
   14055 		if ((swsm & SWSM_SMBI) == 0)
   14056 			break;
   14057 
   14058 		delay(50);
   14059 		timeout--;
   14060 	}
   14061 
   14062 	if (timeout == 0) {
   14063 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14064 			/*
   14065 			 * In rare circumstances, the SW semaphore may already
   14066 			 * be held unintentionally. Clear the semaphore once
   14067 			 * before giving up.
   14068 			 */
   14069 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14070 			wm_put_swsm_semaphore(sc);
   14071 			goto retry;
   14072 		}
   14073 		aprint_error_dev(sc->sc_dev,
   14074 		    "could not acquire SWSM SMBI\n");
   14075 		return 1;
   14076 	}
   14077 
   14078 	/* Get the FW semaphore. */
   14079 	timeout = sc->sc_nvm_wordsize + 1;
   14080 	while (timeout) {
   14081 		swsm = CSR_READ(sc, WMREG_SWSM);
   14082 		swsm |= SWSM_SWESMBI;
   14083 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14084 		/* If we managed to set the bit we got the semaphore. */
   14085 		swsm = CSR_READ(sc, WMREG_SWSM);
   14086 		if (swsm & SWSM_SWESMBI)
   14087 			break;
   14088 
   14089 		delay(50);
   14090 		timeout--;
   14091 	}
   14092 
   14093 	if (timeout == 0) {
   14094 		aprint_error_dev(sc->sc_dev,
   14095 		    "could not acquire SWSM SWESMBI\n");
   14096 		/* Release semaphores */
   14097 		wm_put_swsm_semaphore(sc);
   14098 		return 1;
   14099 	}
   14100 	return 0;
   14101 }
   14102 
   14103 /*
   14104  * Put hardware semaphore.
   14105  * Same as e1000_put_hw_semaphore_generic()
   14106  */
   14107 static void
   14108 wm_put_swsm_semaphore(struct wm_softc *sc)
   14109 {
   14110 	uint32_t swsm;
   14111 
   14112 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14113 		device_xname(sc->sc_dev), __func__));
   14114 
   14115 	swsm = CSR_READ(sc, WMREG_SWSM);
   14116 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14117 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14118 }
   14119 
   14120 /*
   14121  * Get SW/FW semaphore.
   14122  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14123  */
   14124 static int
   14125 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14126 {
   14127 	uint32_t swfw_sync;
   14128 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14129 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14130 	int timeout;
   14131 
   14132 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14133 		device_xname(sc->sc_dev), __func__));
   14134 
   14135 	if (sc->sc_type == WM_T_80003)
   14136 		timeout = 50;
   14137 	else
   14138 		timeout = 200;
   14139 
   14140 	while (timeout) {
   14141 		if (wm_get_swsm_semaphore(sc)) {
   14142 			aprint_error_dev(sc->sc_dev,
   14143 			    "%s: failed to get semaphore\n",
   14144 			    __func__);
   14145 			return 1;
   14146 		}
   14147 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14148 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14149 			swfw_sync |= swmask;
   14150 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14151 			wm_put_swsm_semaphore(sc);
   14152 			return 0;
   14153 		}
   14154 		wm_put_swsm_semaphore(sc);
   14155 		delay(5000);
   14156 		timeout--;
   14157 	}
   14158 	device_printf(sc->sc_dev,
   14159 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   14160 	    mask, swfw_sync);
   14161 	return 1;
   14162 }
   14163 
   14164 static void
   14165 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14166 {
   14167 	uint32_t swfw_sync;
   14168 
   14169 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14170 		device_xname(sc->sc_dev), __func__));
   14171 
   14172 	while (wm_get_swsm_semaphore(sc) != 0)
   14173 		continue;
   14174 
   14175 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14176 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   14177 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14178 
   14179 	wm_put_swsm_semaphore(sc);
   14180 }
   14181 
   14182 static int
   14183 wm_get_nvm_80003(struct wm_softc *sc)
   14184 {
   14185 	int rv;
   14186 
   14187 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14188 		device_xname(sc->sc_dev), __func__));
   14189 
   14190 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   14191 		aprint_error_dev(sc->sc_dev,
   14192 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   14193 		return rv;
   14194 	}
   14195 
   14196 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14197 	    && (rv = wm_get_eecd(sc)) != 0) {
   14198 		aprint_error_dev(sc->sc_dev,
   14199 		    "%s: failed to get semaphore(EECD)\n", __func__);
   14200 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14201 		return rv;
   14202 	}
   14203 
   14204 	return 0;
   14205 }
   14206 
   14207 static void
   14208 wm_put_nvm_80003(struct wm_softc *sc)
   14209 {
   14210 
   14211 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14212 		device_xname(sc->sc_dev), __func__));
   14213 
   14214 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14215 		wm_put_eecd(sc);
   14216 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14217 }
   14218 
   14219 static int
   14220 wm_get_nvm_82571(struct wm_softc *sc)
   14221 {
   14222 	int rv;
   14223 
   14224 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14225 		device_xname(sc->sc_dev), __func__));
   14226 
   14227 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   14228 		return rv;
   14229 
   14230 	switch (sc->sc_type) {
   14231 	case WM_T_82573:
   14232 		break;
   14233 	default:
   14234 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14235 			rv = wm_get_eecd(sc);
   14236 		break;
   14237 	}
   14238 
   14239 	if (rv != 0) {
   14240 		aprint_error_dev(sc->sc_dev,
   14241 		    "%s: failed to get semaphore\n",
   14242 		    __func__);
   14243 		wm_put_swsm_semaphore(sc);
   14244 	}
   14245 
   14246 	return rv;
   14247 }
   14248 
   14249 static void
   14250 wm_put_nvm_82571(struct wm_softc *sc)
   14251 {
   14252 
   14253 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14254 		device_xname(sc->sc_dev), __func__));
   14255 
   14256 	switch (sc->sc_type) {
   14257 	case WM_T_82573:
   14258 		break;
   14259 	default:
   14260 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14261 			wm_put_eecd(sc);
   14262 		break;
   14263 	}
   14264 
   14265 	wm_put_swsm_semaphore(sc);
   14266 }
   14267 
   14268 static int
   14269 wm_get_phy_82575(struct wm_softc *sc)
   14270 {
   14271 
   14272 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14273 		device_xname(sc->sc_dev), __func__));
   14274 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14275 }
   14276 
   14277 static void
   14278 wm_put_phy_82575(struct wm_softc *sc)
   14279 {
   14280 
   14281 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14282 		device_xname(sc->sc_dev), __func__));
   14283 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14284 }
   14285 
   14286 static int
   14287 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14288 {
   14289 	uint32_t ext_ctrl;
   14290 	int timeout = 200;
   14291 
   14292 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14293 		device_xname(sc->sc_dev), __func__));
   14294 
   14295 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14296 	for (timeout = 0; timeout < 200; timeout++) {
   14297 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14298 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14299 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14300 
   14301 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14302 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14303 			return 0;
   14304 		delay(5000);
   14305 	}
   14306 	device_printf(sc->sc_dev,
   14307 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14308 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14309 	return 1;
   14310 }
   14311 
   14312 static void
   14313 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14314 {
   14315 	uint32_t ext_ctrl;
   14316 
   14317 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14318 		device_xname(sc->sc_dev), __func__));
   14319 
   14320 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14321 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14322 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14323 
   14324 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14325 }
   14326 
   14327 static int
   14328 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14329 {
   14330 	uint32_t ext_ctrl;
   14331 	int timeout;
   14332 
   14333 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14334 		device_xname(sc->sc_dev), __func__));
   14335 	mutex_enter(sc->sc_ich_phymtx);
   14336 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14337 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14338 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14339 			break;
   14340 		delay(1000);
   14341 	}
   14342 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14343 		device_printf(sc->sc_dev,
   14344 		    "SW has already locked the resource\n");
   14345 		goto out;
   14346 	}
   14347 
   14348 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14349 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14350 	for (timeout = 0; timeout < 1000; timeout++) {
   14351 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14352 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14353 			break;
   14354 		delay(1000);
   14355 	}
   14356 	if (timeout >= 1000) {
   14357 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14358 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14359 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14360 		goto out;
   14361 	}
   14362 	return 0;
   14363 
   14364 out:
   14365 	mutex_exit(sc->sc_ich_phymtx);
   14366 	return 1;
   14367 }
   14368 
   14369 static void
   14370 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14371 {
   14372 	uint32_t ext_ctrl;
   14373 
   14374 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14375 		device_xname(sc->sc_dev), __func__));
   14376 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14377 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14378 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14379 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14380 	} else {
   14381 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14382 	}
   14383 
   14384 	mutex_exit(sc->sc_ich_phymtx);
   14385 }
   14386 
   14387 static int
   14388 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14389 {
   14390 
   14391 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14392 		device_xname(sc->sc_dev), __func__));
   14393 	mutex_enter(sc->sc_ich_nvmmtx);
   14394 
   14395 	return 0;
   14396 }
   14397 
   14398 static void
   14399 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14400 {
   14401 
   14402 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14403 		device_xname(sc->sc_dev), __func__));
   14404 	mutex_exit(sc->sc_ich_nvmmtx);
   14405 }
   14406 
   14407 static int
   14408 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14409 {
   14410 	int i = 0;
   14411 	uint32_t reg;
   14412 
   14413 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14414 		device_xname(sc->sc_dev), __func__));
   14415 
   14416 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14417 	do {
   14418 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14419 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14420 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14421 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14422 			break;
   14423 		delay(2*1000);
   14424 		i++;
   14425 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14426 
   14427 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14428 		wm_put_hw_semaphore_82573(sc);
   14429 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14430 		    device_xname(sc->sc_dev));
   14431 		return -1;
   14432 	}
   14433 
   14434 	return 0;
   14435 }
   14436 
   14437 static void
   14438 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14439 {
   14440 	uint32_t reg;
   14441 
   14442 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14443 		device_xname(sc->sc_dev), __func__));
   14444 
   14445 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14446 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14447 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14448 }
   14449 
   14450 /*
   14451  * Management mode and power management related subroutines.
   14452  * BMC, AMT, suspend/resume and EEE.
   14453  */
   14454 
   14455 #ifdef WM_WOL
   14456 static int
   14457 wm_check_mng_mode(struct wm_softc *sc)
   14458 {
   14459 	int rv;
   14460 
   14461 	switch (sc->sc_type) {
   14462 	case WM_T_ICH8:
   14463 	case WM_T_ICH9:
   14464 	case WM_T_ICH10:
   14465 	case WM_T_PCH:
   14466 	case WM_T_PCH2:
   14467 	case WM_T_PCH_LPT:
   14468 	case WM_T_PCH_SPT:
   14469 	case WM_T_PCH_CNP:
   14470 		rv = wm_check_mng_mode_ich8lan(sc);
   14471 		break;
   14472 	case WM_T_82574:
   14473 	case WM_T_82583:
   14474 		rv = wm_check_mng_mode_82574(sc);
   14475 		break;
   14476 	case WM_T_82571:
   14477 	case WM_T_82572:
   14478 	case WM_T_82573:
   14479 	case WM_T_80003:
   14480 		rv = wm_check_mng_mode_generic(sc);
   14481 		break;
   14482 	default:
   14483 		/* Noting to do */
   14484 		rv = 0;
   14485 		break;
   14486 	}
   14487 
   14488 	return rv;
   14489 }
   14490 
   14491 static int
   14492 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14493 {
   14494 	uint32_t fwsm;
   14495 
   14496 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14497 
   14498 	if (((fwsm & FWSM_FW_VALID) != 0)
   14499 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14500 		return 1;
   14501 
   14502 	return 0;
   14503 }
   14504 
   14505 static int
   14506 wm_check_mng_mode_82574(struct wm_softc *sc)
   14507 {
   14508 	uint16_t data;
   14509 
   14510 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14511 
   14512 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14513 		return 1;
   14514 
   14515 	return 0;
   14516 }
   14517 
   14518 static int
   14519 wm_check_mng_mode_generic(struct wm_softc *sc)
   14520 {
   14521 	uint32_t fwsm;
   14522 
   14523 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14524 
   14525 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14526 		return 1;
   14527 
   14528 	return 0;
   14529 }
   14530 #endif /* WM_WOL */
   14531 
   14532 static int
   14533 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14534 {
   14535 	uint32_t manc, fwsm, factps;
   14536 
   14537 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14538 		return 0;
   14539 
   14540 	manc = CSR_READ(sc, WMREG_MANC);
   14541 
   14542 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14543 		device_xname(sc->sc_dev), manc));
   14544 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14545 		return 0;
   14546 
   14547 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14548 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14549 		factps = CSR_READ(sc, WMREG_FACTPS);
   14550 		if (((factps & FACTPS_MNGCG) == 0)
   14551 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14552 			return 1;
   14553 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14554 		uint16_t data;
   14555 
   14556 		factps = CSR_READ(sc, WMREG_FACTPS);
   14557 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14558 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14559 			device_xname(sc->sc_dev), factps, data));
   14560 		if (((factps & FACTPS_MNGCG) == 0)
   14561 		    && ((data & NVM_CFG2_MNGM_MASK)
   14562 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14563 			return 1;
   14564 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14565 	    && ((manc & MANC_ASF_EN) == 0))
   14566 		return 1;
   14567 
   14568 	return 0;
   14569 }
   14570 
   14571 static bool
   14572 wm_phy_resetisblocked(struct wm_softc *sc)
   14573 {
   14574 	bool blocked = false;
   14575 	uint32_t reg;
   14576 	int i = 0;
   14577 
   14578 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14579 		device_xname(sc->sc_dev), __func__));
   14580 
   14581 	switch (sc->sc_type) {
   14582 	case WM_T_ICH8:
   14583 	case WM_T_ICH9:
   14584 	case WM_T_ICH10:
   14585 	case WM_T_PCH:
   14586 	case WM_T_PCH2:
   14587 	case WM_T_PCH_LPT:
   14588 	case WM_T_PCH_SPT:
   14589 	case WM_T_PCH_CNP:
   14590 		do {
   14591 			reg = CSR_READ(sc, WMREG_FWSM);
   14592 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14593 				blocked = true;
   14594 				delay(10*1000);
   14595 				continue;
   14596 			}
   14597 			blocked = false;
   14598 		} while (blocked && (i++ < 30));
   14599 		return blocked;
   14600 		break;
   14601 	case WM_T_82571:
   14602 	case WM_T_82572:
   14603 	case WM_T_82573:
   14604 	case WM_T_82574:
   14605 	case WM_T_82583:
   14606 	case WM_T_80003:
   14607 		reg = CSR_READ(sc, WMREG_MANC);
   14608 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14609 			return true;
   14610 		else
   14611 			return false;
   14612 		break;
   14613 	default:
   14614 		/* No problem */
   14615 		break;
   14616 	}
   14617 
   14618 	return false;
   14619 }
   14620 
   14621 static void
   14622 wm_get_hw_control(struct wm_softc *sc)
   14623 {
   14624 	uint32_t reg;
   14625 
   14626 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14627 		device_xname(sc->sc_dev), __func__));
   14628 
   14629 	if (sc->sc_type == WM_T_82573) {
   14630 		reg = CSR_READ(sc, WMREG_SWSM);
   14631 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14632 	} else if (sc->sc_type >= WM_T_82571) {
   14633 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14634 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14635 	}
   14636 }
   14637 
   14638 static void
   14639 wm_release_hw_control(struct wm_softc *sc)
   14640 {
   14641 	uint32_t reg;
   14642 
   14643 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14644 		device_xname(sc->sc_dev), __func__));
   14645 
   14646 	if (sc->sc_type == WM_T_82573) {
   14647 		reg = CSR_READ(sc, WMREG_SWSM);
   14648 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14649 	} else if (sc->sc_type >= WM_T_82571) {
   14650 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14651 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14652 	}
   14653 }
   14654 
   14655 static void
   14656 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14657 {
   14658 	uint32_t reg;
   14659 
   14660 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14661 		device_xname(sc->sc_dev), __func__));
   14662 
   14663 	if (sc->sc_type < WM_T_PCH2)
   14664 		return;
   14665 
   14666 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14667 
   14668 	if (gate)
   14669 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14670 	else
   14671 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14672 
   14673 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14674 }
   14675 
   14676 static int
   14677 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14678 {
   14679 	uint32_t fwsm, reg;
   14680 	int rv = 0;
   14681 
   14682 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14683 		device_xname(sc->sc_dev), __func__));
   14684 
   14685 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14686 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14687 
   14688 	/* Disable ULP */
   14689 	wm_ulp_disable(sc);
   14690 
   14691 	/* Acquire PHY semaphore */
   14692 	rv = sc->phy.acquire(sc);
   14693 	if (rv != 0) {
   14694 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14695 		device_xname(sc->sc_dev), __func__));
   14696 		return -1;
   14697 	}
   14698 
   14699 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14700 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14701 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14702 	 */
   14703 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14704 	switch (sc->sc_type) {
   14705 	case WM_T_PCH_LPT:
   14706 	case WM_T_PCH_SPT:
   14707 	case WM_T_PCH_CNP:
   14708 		if (wm_phy_is_accessible_pchlan(sc))
   14709 			break;
   14710 
   14711 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14712 		 * forcing MAC to SMBus mode first.
   14713 		 */
   14714 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14715 		reg |= CTRL_EXT_FORCE_SMBUS;
   14716 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14717 #if 0
   14718 		/* XXX Isn't this required??? */
   14719 		CSR_WRITE_FLUSH(sc);
   14720 #endif
   14721 		/* Wait 50 milliseconds for MAC to finish any retries
   14722 		 * that it might be trying to perform from previous
   14723 		 * attempts to acknowledge any phy read requests.
   14724 		 */
   14725 		delay(50 * 1000);
   14726 		/* FALLTHROUGH */
   14727 	case WM_T_PCH2:
   14728 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14729 			break;
   14730 		/* FALLTHROUGH */
   14731 	case WM_T_PCH:
   14732 		if (sc->sc_type == WM_T_PCH)
   14733 			if ((fwsm & FWSM_FW_VALID) != 0)
   14734 				break;
   14735 
   14736 		if (wm_phy_resetisblocked(sc) == true) {
   14737 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   14738 			break;
   14739 		}
   14740 
   14741 		/* Toggle LANPHYPC Value bit */
   14742 		wm_toggle_lanphypc_pch_lpt(sc);
   14743 
   14744 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14745 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14746 				break;
   14747 
   14748 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14749 			 * so ensure that the MAC is also out of SMBus mode
   14750 			 */
   14751 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14752 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14753 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14754 
   14755 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14756 				break;
   14757 			rv = -1;
   14758 		}
   14759 		break;
   14760 	default:
   14761 		break;
   14762 	}
   14763 
   14764 	/* Release semaphore */
   14765 	sc->phy.release(sc);
   14766 
   14767 	if (rv == 0) {
   14768 		/* Check to see if able to reset PHY.  Print error if not */
   14769 		if (wm_phy_resetisblocked(sc)) {
   14770 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14771 			goto out;
   14772 		}
   14773 
   14774 		/* Reset the PHY before any access to it.  Doing so, ensures
   14775 		 * that the PHY is in a known good state before we read/write
   14776 		 * PHY registers.  The generic reset is sufficient here,
   14777 		 * because we haven't determined the PHY type yet.
   14778 		 */
   14779 		if (wm_reset_phy(sc) != 0)
   14780 			goto out;
   14781 
   14782 		/* On a successful reset, possibly need to wait for the PHY
   14783 		 * to quiesce to an accessible state before returning control
   14784 		 * to the calling function.  If the PHY does not quiesce, then
   14785 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14786 		 *  the PHY is in.
   14787 		 */
   14788 		if (wm_phy_resetisblocked(sc))
   14789 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14790 	}
   14791 
   14792 out:
   14793 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14794 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14795 		delay(10*1000);
   14796 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14797 	}
   14798 
   14799 	return 0;
   14800 }
   14801 
   14802 static void
   14803 wm_init_manageability(struct wm_softc *sc)
   14804 {
   14805 
   14806 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14807 		device_xname(sc->sc_dev), __func__));
   14808 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14809 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14810 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14811 
   14812 		/* Disable hardware interception of ARP */
   14813 		manc &= ~MANC_ARP_EN;
   14814 
   14815 		/* Enable receiving management packets to the host */
   14816 		if (sc->sc_type >= WM_T_82571) {
   14817 			manc |= MANC_EN_MNG2HOST;
   14818 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14819 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14820 		}
   14821 
   14822 		CSR_WRITE(sc, WMREG_MANC, manc);
   14823 	}
   14824 }
   14825 
   14826 static void
   14827 wm_release_manageability(struct wm_softc *sc)
   14828 {
   14829 
   14830 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14831 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14832 
   14833 		manc |= MANC_ARP_EN;
   14834 		if (sc->sc_type >= WM_T_82571)
   14835 			manc &= ~MANC_EN_MNG2HOST;
   14836 
   14837 		CSR_WRITE(sc, WMREG_MANC, manc);
   14838 	}
   14839 }
   14840 
   14841 static void
   14842 wm_get_wakeup(struct wm_softc *sc)
   14843 {
   14844 
   14845 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14846 	switch (sc->sc_type) {
   14847 	case WM_T_82573:
   14848 	case WM_T_82583:
   14849 		sc->sc_flags |= WM_F_HAS_AMT;
   14850 		/* FALLTHROUGH */
   14851 	case WM_T_80003:
   14852 	case WM_T_82575:
   14853 	case WM_T_82576:
   14854 	case WM_T_82580:
   14855 	case WM_T_I350:
   14856 	case WM_T_I354:
   14857 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14858 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14859 		/* FALLTHROUGH */
   14860 	case WM_T_82541:
   14861 	case WM_T_82541_2:
   14862 	case WM_T_82547:
   14863 	case WM_T_82547_2:
   14864 	case WM_T_82571:
   14865 	case WM_T_82572:
   14866 	case WM_T_82574:
   14867 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14868 		break;
   14869 	case WM_T_ICH8:
   14870 	case WM_T_ICH9:
   14871 	case WM_T_ICH10:
   14872 	case WM_T_PCH:
   14873 	case WM_T_PCH2:
   14874 	case WM_T_PCH_LPT:
   14875 	case WM_T_PCH_SPT:
   14876 	case WM_T_PCH_CNP:
   14877 		sc->sc_flags |= WM_F_HAS_AMT;
   14878 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14879 		break;
   14880 	default:
   14881 		break;
   14882 	}
   14883 
   14884 	/* 1: HAS_MANAGE */
   14885 	if (wm_enable_mng_pass_thru(sc) != 0)
   14886 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14887 
   14888 	/*
   14889 	 * Note that the WOL flags is set after the resetting of the eeprom
   14890 	 * stuff
   14891 	 */
   14892 }
   14893 
   14894 /*
   14895  * Unconfigure Ultra Low Power mode.
   14896  * Only for I217 and newer (see below).
   14897  */
   14898 static int
   14899 wm_ulp_disable(struct wm_softc *sc)
   14900 {
   14901 	uint32_t reg;
   14902 	uint16_t phyreg;
   14903 	int i = 0, rv = 0;
   14904 
   14905 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14906 		device_xname(sc->sc_dev), __func__));
   14907 	/* Exclude old devices */
   14908 	if ((sc->sc_type < WM_T_PCH_LPT)
   14909 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14910 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14911 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14912 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14913 		return 0;
   14914 
   14915 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14916 		/* Request ME un-configure ULP mode in the PHY */
   14917 		reg = CSR_READ(sc, WMREG_H2ME);
   14918 		reg &= ~H2ME_ULP;
   14919 		reg |= H2ME_ENFORCE_SETTINGS;
   14920 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14921 
   14922 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14923 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14924 			if (i++ == 30) {
   14925 				device_printf(sc->sc_dev, "%s timed out\n",
   14926 				    __func__);
   14927 				return -1;
   14928 			}
   14929 			delay(10 * 1000);
   14930 		}
   14931 		reg = CSR_READ(sc, WMREG_H2ME);
   14932 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14933 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14934 
   14935 		return 0;
   14936 	}
   14937 
   14938 	/* Acquire semaphore */
   14939 	rv = sc->phy.acquire(sc);
   14940 	if (rv != 0) {
   14941 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14942 		device_xname(sc->sc_dev), __func__));
   14943 		return -1;
   14944 	}
   14945 
   14946 	/* Toggle LANPHYPC */
   14947 	wm_toggle_lanphypc_pch_lpt(sc);
   14948 
   14949 	/* Unforce SMBus mode in PHY */
   14950 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14951 	if (rv != 0) {
   14952 		uint32_t reg2;
   14953 
   14954 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   14955 			__func__);
   14956 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14957 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14958 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14959 		delay(50 * 1000);
   14960 
   14961 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14962 		    &phyreg);
   14963 		if (rv != 0)
   14964 			goto release;
   14965 	}
   14966 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14967 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14968 
   14969 	/* Unforce SMBus mode in MAC */
   14970 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14971 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14972 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14973 
   14974 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14975 	if (rv != 0)
   14976 		goto release;
   14977 	phyreg |= HV_PM_CTRL_K1_ENA;
   14978 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14979 
   14980 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14981 		&phyreg);
   14982 	if (rv != 0)
   14983 		goto release;
   14984 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14985 	    | I218_ULP_CONFIG1_STICKY_ULP
   14986 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14987 	    | I218_ULP_CONFIG1_WOL_HOST
   14988 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14989 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14990 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14991 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14992 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14993 	phyreg |= I218_ULP_CONFIG1_START;
   14994 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14995 
   14996 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14997 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14998 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14999 
   15000 release:
   15001 	/* Release semaphore */
   15002 	sc->phy.release(sc);
   15003 	wm_gmii_reset(sc);
   15004 	delay(50 * 1000);
   15005 
   15006 	return rv;
   15007 }
   15008 
   15009 /* WOL in the newer chipset interfaces (pchlan) */
   15010 static int
   15011 wm_enable_phy_wakeup(struct wm_softc *sc)
   15012 {
   15013 	device_t dev = sc->sc_dev;
   15014 	uint32_t mreg, moff;
   15015 	uint16_t wuce, wuc, wufc, preg;
   15016 	int i, rv;
   15017 
   15018 	KASSERT(sc->sc_type >= WM_T_PCH);
   15019 
   15020 	/* Copy MAC RARs to PHY RARs */
   15021 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15022 
   15023 	/* Activate PHY wakeup */
   15024 	rv = sc->phy.acquire(sc);
   15025 	if (rv != 0) {
   15026 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15027 		    __func__);
   15028 		return rv;
   15029 	}
   15030 
   15031 	/*
   15032 	 * Enable access to PHY wakeup registers.
   15033 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15034 	 */
   15035 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15036 	if (rv != 0) {
   15037 		device_printf(dev,
   15038 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15039 		goto release;
   15040 	}
   15041 
   15042 	/* Copy MAC MTA to PHY MTA */
   15043 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15044 		uint16_t lo, hi;
   15045 
   15046 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15047 		lo = (uint16_t)(mreg & 0xffff);
   15048 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15049 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15050 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15051 	}
   15052 
   15053 	/* Configure PHY Rx Control register */
   15054 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15055 	mreg = CSR_READ(sc, WMREG_RCTL);
   15056 	if (mreg & RCTL_UPE)
   15057 		preg |= BM_RCTL_UPE;
   15058 	if (mreg & RCTL_MPE)
   15059 		preg |= BM_RCTL_MPE;
   15060 	preg &= ~(BM_RCTL_MO_MASK);
   15061 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15062 	if (moff != 0)
   15063 		preg |= moff << BM_RCTL_MO_SHIFT;
   15064 	if (mreg & RCTL_BAM)
   15065 		preg |= BM_RCTL_BAM;
   15066 	if (mreg & RCTL_PMCF)
   15067 		preg |= BM_RCTL_PMCF;
   15068 	mreg = CSR_READ(sc, WMREG_CTRL);
   15069 	if (mreg & CTRL_RFCE)
   15070 		preg |= BM_RCTL_RFCE;
   15071 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15072 
   15073 	wuc = WUC_APME | WUC_PME_EN;
   15074 	wufc = WUFC_MAG;
   15075 	/* Enable PHY wakeup in MAC register */
   15076 	CSR_WRITE(sc, WMREG_WUC,
   15077 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15078 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15079 
   15080 	/* Configure and enable PHY wakeup in PHY registers */
   15081 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15082 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15083 
   15084 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15085 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15086 
   15087 release:
   15088 	sc->phy.release(sc);
   15089 
   15090 	return 0;
   15091 }
   15092 
   15093 /* Power down workaround on D3 */
   15094 static void
   15095 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15096 {
   15097 	uint32_t reg;
   15098 	uint16_t phyreg;
   15099 	int i;
   15100 
   15101 	for (i = 0; i < 2; i++) {
   15102 		/* Disable link */
   15103 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15104 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15105 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15106 
   15107 		/*
   15108 		 * Call gig speed drop workaround on Gig disable before
   15109 		 * accessing any PHY registers
   15110 		 */
   15111 		if (sc->sc_type == WM_T_ICH8)
   15112 			wm_gig_downshift_workaround_ich8lan(sc);
   15113 
   15114 		/* Write VR power-down enable */
   15115 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15116 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15117 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15118 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15119 
   15120 		/* Read it back and test */
   15121 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15122 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15123 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15124 			break;
   15125 
   15126 		/* Issue PHY reset and repeat at most one more time */
   15127 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15128 	}
   15129 }
   15130 
   15131 /*
   15132  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15133  *  @sc: pointer to the HW structure
   15134  *
   15135  *  During S0 to Sx transition, it is possible the link remains at gig
   15136  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15137  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15138  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15139  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15140  *  needs to be written.
   15141  *  Parts that support (and are linked to a partner which support) EEE in
   15142  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15143  *  than 10Mbps w/o EEE.
   15144  */
   15145 static void
   15146 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15147 {
   15148 	device_t dev = sc->sc_dev;
   15149 	struct ethercom *ec = &sc->sc_ethercom;
   15150 	uint32_t phy_ctrl;
   15151 	int rv;
   15152 
   15153 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15154 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15155 
   15156 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   15157 
   15158 	if (sc->sc_phytype == WMPHY_I217) {
   15159 		uint16_t devid = sc->sc_pcidevid;
   15160 
   15161 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   15162 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   15163 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   15164 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   15165 		    (sc->sc_type >= WM_T_PCH_SPT))
   15166 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   15167 			    CSR_READ(sc, WMREG_FEXTNVM6)
   15168 			    & ~FEXTNVM6_REQ_PLL_CLK);
   15169 
   15170 		if (sc->phy.acquire(sc) != 0)
   15171 			goto out;
   15172 
   15173 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15174 			uint16_t eee_advert;
   15175 
   15176 			rv = wm_read_emi_reg_locked(dev,
   15177 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   15178 			if (rv)
   15179 				goto release;
   15180 
   15181 			/*
   15182 			 * Disable LPLU if both link partners support 100BaseT
   15183 			 * EEE and 100Full is advertised on both ends of the
   15184 			 * link, and enable Auto Enable LPI since there will
   15185 			 * be no driver to enable LPI while in Sx.
   15186 			 */
   15187 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   15188 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   15189 				uint16_t anar, phy_reg;
   15190 
   15191 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   15192 				    &anar);
   15193 				if (anar & ANAR_TX_FD) {
   15194 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   15195 					    PHY_CTRL_NOND0A_LPLU);
   15196 
   15197 					/* Set Auto Enable LPI after link up */
   15198 					sc->phy.readreg_locked(dev, 2,
   15199 					    I217_LPI_GPIO_CTRL, &phy_reg);
   15200 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15201 					sc->phy.writereg_locked(dev, 2,
   15202 					    I217_LPI_GPIO_CTRL, phy_reg);
   15203 				}
   15204 			}
   15205 		}
   15206 
   15207 		/*
   15208 		 * For i217 Intel Rapid Start Technology support,
   15209 		 * when the system is going into Sx and no manageability engine
   15210 		 * is present, the driver must configure proxy to reset only on
   15211 		 * power good.	LPI (Low Power Idle) state must also reset only
   15212 		 * on power good, as well as the MTA (Multicast table array).
   15213 		 * The SMBus release must also be disabled on LCD reset.
   15214 		 */
   15215 
   15216 		/*
   15217 		 * Enable MTA to reset for Intel Rapid Start Technology
   15218 		 * Support
   15219 		 */
   15220 
   15221 release:
   15222 		sc->phy.release(sc);
   15223 	}
   15224 out:
   15225 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   15226 
   15227 	if (sc->sc_type == WM_T_ICH8)
   15228 		wm_gig_downshift_workaround_ich8lan(sc);
   15229 
   15230 	if (sc->sc_type >= WM_T_PCH) {
   15231 		wm_oem_bits_config_ich8lan(sc, false);
   15232 
   15233 		/* Reset PHY to activate OEM bits on 82577/8 */
   15234 		if (sc->sc_type == WM_T_PCH)
   15235 			wm_reset_phy(sc);
   15236 
   15237 		if (sc->phy.acquire(sc) != 0)
   15238 			return;
   15239 		wm_write_smbus_addr(sc);
   15240 		sc->phy.release(sc);
   15241 	}
   15242 }
   15243 
   15244 /*
   15245  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   15246  *  @sc: pointer to the HW structure
   15247  *
   15248  *  During Sx to S0 transitions on non-managed devices or managed devices
   15249  *  on which PHY resets are not blocked, if the PHY registers cannot be
   15250  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   15251  *  the PHY.
   15252  *  On i217, setup Intel Rapid Start Technology.
   15253  */
   15254 static int
   15255 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   15256 {
   15257 	device_t dev = sc->sc_dev;
   15258 	int rv;
   15259 
   15260 	if (sc->sc_type < WM_T_PCH2)
   15261 		return 0;
   15262 
   15263 	rv = wm_init_phy_workarounds_pchlan(sc);
   15264 	if (rv != 0)
   15265 		return -1;
   15266 
   15267 	/* For i217 Intel Rapid Start Technology support when the system
   15268 	 * is transitioning from Sx and no manageability engine is present
   15269 	 * configure SMBus to restore on reset, disable proxy, and enable
   15270 	 * the reset on MTA (Multicast table array).
   15271 	 */
   15272 	if (sc->sc_phytype == WMPHY_I217) {
   15273 		uint16_t phy_reg;
   15274 
   15275 		if (sc->phy.acquire(sc) != 0)
   15276 			return -1;
   15277 
   15278 		/* Clear Auto Enable LPI after link up */
   15279 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15280 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15281 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15282 
   15283 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15284 			/* Restore clear on SMB if no manageability engine
   15285 			 * is present
   15286 			 */
   15287 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15288 			    &phy_reg);
   15289 			if (rv != 0)
   15290 				goto release;
   15291 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15292 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15293 
   15294 			/* Disable Proxy */
   15295 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15296 		}
   15297 		/* Enable reset on MTA */
   15298 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15299 		if (rv != 0)
   15300 			goto release;
   15301 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15302 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15303 
   15304 release:
   15305 		sc->phy.release(sc);
   15306 		return rv;
   15307 	}
   15308 
   15309 	return 0;
   15310 }
   15311 
   15312 static void
   15313 wm_enable_wakeup(struct wm_softc *sc)
   15314 {
   15315 	uint32_t reg, pmreg;
   15316 	pcireg_t pmode;
   15317 	int rv = 0;
   15318 
   15319 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15320 		device_xname(sc->sc_dev), __func__));
   15321 
   15322 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15323 	    &pmreg, NULL) == 0)
   15324 		return;
   15325 
   15326 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15327 		goto pme;
   15328 
   15329 	/* Advertise the wakeup capability */
   15330 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15331 	    | CTRL_SWDPIN(3));
   15332 
   15333 	/* Keep the laser running on fiber adapters */
   15334 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15335 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15336 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15337 		reg |= CTRL_EXT_SWDPIN(3);
   15338 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15339 	}
   15340 
   15341 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15342 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15343 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15344 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15345 		wm_suspend_workarounds_ich8lan(sc);
   15346 
   15347 #if 0	/* For the multicast packet */
   15348 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15349 	reg |= WUFC_MC;
   15350 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15351 #endif
   15352 
   15353 	if (sc->sc_type >= WM_T_PCH) {
   15354 		rv = wm_enable_phy_wakeup(sc);
   15355 		if (rv != 0)
   15356 			goto pme;
   15357 	} else {
   15358 		/* Enable wakeup by the MAC */
   15359 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15360 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15361 	}
   15362 
   15363 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15364 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15365 		|| (sc->sc_type == WM_T_PCH2))
   15366 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15367 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15368 
   15369 pme:
   15370 	/* Request PME */
   15371 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15372 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   15373 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15374 		/* For WOL */
   15375 		pmode |= PCI_PMCSR_PME_EN;
   15376 	} else {
   15377 		/* Disable WOL */
   15378 		pmode &= ~PCI_PMCSR_PME_EN;
   15379 	}
   15380 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15381 }
   15382 
   15383 /* Disable ASPM L0s and/or L1 for workaround */
   15384 static void
   15385 wm_disable_aspm(struct wm_softc *sc)
   15386 {
   15387 	pcireg_t reg, mask = 0;
   15388 	unsigned const char *str = "";
   15389 
   15390 	/*
   15391 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15392 	 * space.
   15393 	 */
   15394 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15395 		return;
   15396 
   15397 	switch (sc->sc_type) {
   15398 	case WM_T_82571:
   15399 	case WM_T_82572:
   15400 		/*
   15401 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15402 		 * State Power management L1 State (ASPM L1).
   15403 		 */
   15404 		mask = PCIE_LCSR_ASPM_L1;
   15405 		str = "L1 is";
   15406 		break;
   15407 	case WM_T_82573:
   15408 	case WM_T_82574:
   15409 	case WM_T_82583:
   15410 		/*
   15411 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15412 		 *
   15413 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15414 		 * some chipset.  The document of 82574 and 82583 says that
   15415 		 * disabling L0s with some specific chipset is sufficient,
   15416 		 * but we follow as of the Intel em driver does.
   15417 		 *
   15418 		 * References:
   15419 		 * Errata 8 of the Specification Update of i82573.
   15420 		 * Errata 20 of the Specification Update of i82574.
   15421 		 * Errata 9 of the Specification Update of i82583.
   15422 		 */
   15423 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15424 		str = "L0s and L1 are";
   15425 		break;
   15426 	default:
   15427 		return;
   15428 	}
   15429 
   15430 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15431 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15432 	reg &= ~mask;
   15433 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15434 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15435 
   15436 	/* Print only in wm_attach() */
   15437 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15438 		aprint_verbose_dev(sc->sc_dev,
   15439 		    "ASPM %s disabled to workaround the errata.\n", str);
   15440 }
   15441 
   15442 /* LPLU */
   15443 
   15444 static void
   15445 wm_lplu_d0_disable(struct wm_softc *sc)
   15446 {
   15447 	struct mii_data *mii = &sc->sc_mii;
   15448 	uint32_t reg;
   15449 	uint16_t phyval;
   15450 
   15451 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15452 		device_xname(sc->sc_dev), __func__));
   15453 
   15454 	if (sc->sc_phytype == WMPHY_IFE)
   15455 		return;
   15456 
   15457 	switch (sc->sc_type) {
   15458 	case WM_T_82571:
   15459 	case WM_T_82572:
   15460 	case WM_T_82573:
   15461 	case WM_T_82575:
   15462 	case WM_T_82576:
   15463 		mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
   15464 		phyval &= ~PMR_D0_LPLU;
   15465 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
   15466 		break;
   15467 	case WM_T_82580:
   15468 	case WM_T_I350:
   15469 	case WM_T_I210:
   15470 	case WM_T_I211:
   15471 		reg = CSR_READ(sc, WMREG_PHPM);
   15472 		reg &= ~PHPM_D0A_LPLU;
   15473 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15474 		break;
   15475 	case WM_T_82574:
   15476 	case WM_T_82583:
   15477 	case WM_T_ICH8:
   15478 	case WM_T_ICH9:
   15479 	case WM_T_ICH10:
   15480 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15481 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15482 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15483 		CSR_WRITE_FLUSH(sc);
   15484 		break;
   15485 	case WM_T_PCH:
   15486 	case WM_T_PCH2:
   15487 	case WM_T_PCH_LPT:
   15488 	case WM_T_PCH_SPT:
   15489 	case WM_T_PCH_CNP:
   15490 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15491 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15492 		if (wm_phy_resetisblocked(sc) == false)
   15493 			phyval |= HV_OEM_BITS_ANEGNOW;
   15494 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15495 		break;
   15496 	default:
   15497 		break;
   15498 	}
   15499 }
   15500 
   15501 /* EEE */
   15502 
   15503 static int
   15504 wm_set_eee_i350(struct wm_softc *sc)
   15505 {
   15506 	struct ethercom *ec = &sc->sc_ethercom;
   15507 	uint32_t ipcnfg, eeer;
   15508 	uint32_t ipcnfg_mask
   15509 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15510 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15511 
   15512 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15513 
   15514 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15515 	eeer = CSR_READ(sc, WMREG_EEER);
   15516 
   15517 	/* Enable or disable per user setting */
   15518 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15519 		ipcnfg |= ipcnfg_mask;
   15520 		eeer |= eeer_mask;
   15521 	} else {
   15522 		ipcnfg &= ~ipcnfg_mask;
   15523 		eeer &= ~eeer_mask;
   15524 	}
   15525 
   15526 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15527 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15528 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15529 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15530 
   15531 	return 0;
   15532 }
   15533 
   15534 static int
   15535 wm_set_eee_pchlan(struct wm_softc *sc)
   15536 {
   15537 	device_t dev = sc->sc_dev;
   15538 	struct ethercom *ec = &sc->sc_ethercom;
   15539 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15540 	int rv = 0;
   15541 
   15542 	switch (sc->sc_phytype) {
   15543 	case WMPHY_82579:
   15544 		lpa = I82579_EEE_LP_ABILITY;
   15545 		pcs_status = I82579_EEE_PCS_STATUS;
   15546 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15547 		break;
   15548 	case WMPHY_I217:
   15549 		lpa = I217_EEE_LP_ABILITY;
   15550 		pcs_status = I217_EEE_PCS_STATUS;
   15551 		adv_addr = I217_EEE_ADVERTISEMENT;
   15552 		break;
   15553 	default:
   15554 		return 0;
   15555 	}
   15556 
   15557 	if (sc->phy.acquire(sc)) {
   15558 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15559 		return 0;
   15560 	}
   15561 
   15562 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15563 	if (rv != 0)
   15564 		goto release;
   15565 
   15566 	/* Clear bits that enable EEE in various speeds */
   15567 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15568 
   15569 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15570 		/* Save off link partner's EEE ability */
   15571 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15572 		if (rv != 0)
   15573 			goto release;
   15574 
   15575 		/* Read EEE advertisement */
   15576 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15577 			goto release;
   15578 
   15579 		/*
   15580 		 * Enable EEE only for speeds in which the link partner is
   15581 		 * EEE capable and for which we advertise EEE.
   15582 		 */
   15583 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15584 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15585 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15586 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15587 			if ((data & ANLPAR_TX_FD) != 0)
   15588 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15589 			else {
   15590 				/*
   15591 				 * EEE is not supported in 100Half, so ignore
   15592 				 * partner's EEE in 100 ability if full-duplex
   15593 				 * is not advertised.
   15594 				 */
   15595 				sc->eee_lp_ability
   15596 				    &= ~AN_EEEADVERT_100_TX;
   15597 			}
   15598 		}
   15599 	}
   15600 
   15601 	if (sc->sc_phytype == WMPHY_82579) {
   15602 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15603 		if (rv != 0)
   15604 			goto release;
   15605 
   15606 		data &= ~I82579_LPI_PLL_SHUT_100;
   15607 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15608 	}
   15609 
   15610 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15611 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15612 		goto release;
   15613 
   15614 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15615 release:
   15616 	sc->phy.release(sc);
   15617 
   15618 	return rv;
   15619 }
   15620 
   15621 static int
   15622 wm_set_eee(struct wm_softc *sc)
   15623 {
   15624 	struct ethercom *ec = &sc->sc_ethercom;
   15625 
   15626 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15627 		return 0;
   15628 
   15629 	if (sc->sc_type == WM_T_I354) {
   15630 		/* I354 uses an external PHY */
   15631 		return 0; /* not yet */
   15632 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15633 		return wm_set_eee_i350(sc);
   15634 	else if (sc->sc_type >= WM_T_PCH2)
   15635 		return wm_set_eee_pchlan(sc);
   15636 
   15637 	return 0;
   15638 }
   15639 
   15640 /*
   15641  * Workarounds (mainly PHY related).
   15642  * Basically, PHY's workarounds are in the PHY drivers.
   15643  */
   15644 
   15645 /* Work-around for 82566 Kumeran PCS lock loss */
   15646 static int
   15647 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15648 {
   15649 	struct mii_data *mii = &sc->sc_mii;
   15650 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15651 	int i, reg, rv;
   15652 	uint16_t phyreg;
   15653 
   15654 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15655 		device_xname(sc->sc_dev), __func__));
   15656 
   15657 	/* If the link is not up, do nothing */
   15658 	if ((status & STATUS_LU) == 0)
   15659 		return 0;
   15660 
   15661 	/* Nothing to do if the link is other than 1Gbps */
   15662 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15663 		return 0;
   15664 
   15665 	for (i = 0; i < 10; i++) {
   15666 		/* read twice */
   15667 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15668 		if (rv != 0)
   15669 			return rv;
   15670 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15671 		if (rv != 0)
   15672 			return rv;
   15673 
   15674 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15675 			goto out;	/* GOOD! */
   15676 
   15677 		/* Reset the PHY */
   15678 		wm_reset_phy(sc);
   15679 		delay(5*1000);
   15680 	}
   15681 
   15682 	/* Disable GigE link negotiation */
   15683 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15684 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15685 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15686 
   15687 	/*
   15688 	 * Call gig speed drop workaround on Gig disable before accessing
   15689 	 * any PHY registers.
   15690 	 */
   15691 	wm_gig_downshift_workaround_ich8lan(sc);
   15692 
   15693 out:
   15694 	return 0;
   15695 }
   15696 
   15697 /*
   15698  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15699  *  @sc: pointer to the HW structure
   15700  *
   15701  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15702  *  LPLU, Gig disable, MDIC PHY reset):
   15703  *    1) Set Kumeran Near-end loopback
   15704  *    2) Clear Kumeran Near-end loopback
   15705  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15706  */
   15707 static void
   15708 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15709 {
   15710 	uint16_t kmreg;
   15711 
   15712 	/* Only for igp3 */
   15713 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15714 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15715 			return;
   15716 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15717 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15718 			return;
   15719 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15720 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15721 	}
   15722 }
   15723 
   15724 /*
   15725  * Workaround for pch's PHYs
   15726  * XXX should be moved to new PHY driver?
   15727  */
   15728 static int
   15729 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15730 {
   15731 	device_t dev = sc->sc_dev;
   15732 	struct mii_data *mii = &sc->sc_mii;
   15733 	struct mii_softc *child;
   15734 	uint16_t phy_data, phyrev = 0;
   15735 	int phytype = sc->sc_phytype;
   15736 	int rv;
   15737 
   15738 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15739 		device_xname(dev), __func__));
   15740 	KASSERT(sc->sc_type == WM_T_PCH);
   15741 
   15742 	/* Set MDIO slow mode before any other MDIO access */
   15743 	if (phytype == WMPHY_82577)
   15744 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15745 			return rv;
   15746 
   15747 	child = LIST_FIRST(&mii->mii_phys);
   15748 	if (child != NULL)
   15749 		phyrev = child->mii_mpd_rev;
   15750 
   15751 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15752 	if ((child != NULL) &&
   15753 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   15754 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   15755 		/* Disable generation of early preamble (0x4431) */
   15756 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15757 		    &phy_data);
   15758 		if (rv != 0)
   15759 			return rv;
   15760 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   15761 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   15762 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15763 		    phy_data);
   15764 		if (rv != 0)
   15765 			return rv;
   15766 
   15767 		/* Preamble tuning for SSC */
   15768 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   15769 		if (rv != 0)
   15770 			return rv;
   15771 	}
   15772 
   15773 	/* 82578 */
   15774 	if (phytype == WMPHY_82578) {
   15775 		/*
   15776 		 * Return registers to default by doing a soft reset then
   15777 		 * writing 0x3140 to the control register
   15778 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15779 		 */
   15780 		if ((child != NULL) && (phyrev < 2)) {
   15781 			PHY_RESET(child);
   15782 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   15783 			if (rv != 0)
   15784 				return rv;
   15785 		}
   15786 	}
   15787 
   15788 	/* Select page 0 */
   15789 	if ((rv = sc->phy.acquire(sc)) != 0)
   15790 		return rv;
   15791 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   15792 	sc->phy.release(sc);
   15793 	if (rv != 0)
   15794 		return rv;
   15795 
   15796 	/*
   15797 	 * Configure the K1 Si workaround during phy reset assuming there is
   15798 	 * link so that it disables K1 if link is in 1Gbps.
   15799 	 */
   15800 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15801 		return rv;
   15802 
   15803 	/* Workaround for link disconnects on a busy hub in half duplex */
   15804 	rv = sc->phy.acquire(sc);
   15805 	if (rv)
   15806 		return rv;
   15807 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   15808 	if (rv)
   15809 		goto release;
   15810 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   15811 	    phy_data & 0x00ff);
   15812 	if (rv)
   15813 		goto release;
   15814 
   15815 	/* Set MSE higher to enable link to stay up when noise is high */
   15816 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   15817 release:
   15818 	sc->phy.release(sc);
   15819 
   15820 	return rv;
   15821 }
   15822 
   15823 /*
   15824  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15825  *  @sc:   pointer to the HW structure
   15826  */
   15827 static void
   15828 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15829 {
   15830 	device_t dev = sc->sc_dev;
   15831 	uint32_t mac_reg;
   15832 	uint16_t i, wuce;
   15833 	int count;
   15834 
   15835 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15836 		device_xname(sc->sc_dev), __func__));
   15837 
   15838 	if (sc->phy.acquire(sc) != 0)
   15839 		return;
   15840 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15841 		goto release;
   15842 
   15843 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15844 	count = wm_rar_count(sc);
   15845 	for (i = 0; i < count; i++) {
   15846 		uint16_t lo, hi;
   15847 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15848 		lo = (uint16_t)(mac_reg & 0xffff);
   15849 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15850 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15851 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15852 
   15853 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15854 		lo = (uint16_t)(mac_reg & 0xffff);
   15855 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15856 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15857 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15858 	}
   15859 
   15860 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15861 
   15862 release:
   15863 	sc->phy.release(sc);
   15864 }
   15865 
   15866 /*
   15867  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15868  *  done after every PHY reset.
   15869  */
   15870 static int
   15871 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15872 {
   15873 	device_t dev = sc->sc_dev;
   15874 	int rv;
   15875 
   15876 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15877 		device_xname(dev), __func__));
   15878 	KASSERT(sc->sc_type == WM_T_PCH2);
   15879 
   15880 	/* Set MDIO slow mode before any other MDIO access */
   15881 	rv = wm_set_mdio_slow_mode_hv(sc);
   15882 	if (rv != 0)
   15883 		return rv;
   15884 
   15885 	rv = sc->phy.acquire(sc);
   15886 	if (rv != 0)
   15887 		return rv;
   15888 	/* Set MSE higher to enable link to stay up when noise is high */
   15889 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   15890 	if (rv != 0)
   15891 		goto release;
   15892 	/* Drop link after 5 times MSE threshold was reached */
   15893 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   15894 release:
   15895 	sc->phy.release(sc);
   15896 
   15897 	return rv;
   15898 }
   15899 
   15900 /**
   15901  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15902  *  @link: link up bool flag
   15903  *
   15904  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15905  *  preventing further DMA write requests.  Workaround the issue by disabling
   15906  *  the de-assertion of the clock request when in 1Gpbs mode.
   15907  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15908  *  speeds in order to avoid Tx hangs.
   15909  **/
   15910 static int
   15911 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15912 {
   15913 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15914 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15915 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15916 	uint16_t phyreg;
   15917 
   15918 	if (link && (speed == STATUS_SPEED_1000)) {
   15919 		sc->phy.acquire(sc);
   15920 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15921 		    &phyreg);
   15922 		if (rv != 0)
   15923 			goto release;
   15924 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15925 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15926 		if (rv != 0)
   15927 			goto release;
   15928 		delay(20);
   15929 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15930 
   15931 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15932 		    &phyreg);
   15933 release:
   15934 		sc->phy.release(sc);
   15935 		return rv;
   15936 	}
   15937 
   15938 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15939 
   15940 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15941 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15942 	    || !link
   15943 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15944 		goto update_fextnvm6;
   15945 
   15946 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   15947 
   15948 	/* Clear link status transmit timeout */
   15949 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   15950 	if (speed == STATUS_SPEED_100) {
   15951 		/* Set inband Tx timeout to 5x10us for 100Half */
   15952 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15953 
   15954 		/* Do not extend the K1 entry latency for 100Half */
   15955 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15956 	} else {
   15957 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   15958 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15959 
   15960 		/* Extend the K1 entry latency for 10 Mbps */
   15961 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15962 	}
   15963 
   15964 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   15965 
   15966 update_fextnvm6:
   15967 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   15968 	return 0;
   15969 }
   15970 
   15971 /*
   15972  *  wm_k1_gig_workaround_hv - K1 Si workaround
   15973  *  @sc:   pointer to the HW structure
   15974  *  @link: link up bool flag
   15975  *
   15976  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   15977  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   15978  *  If link is down, the function will restore the default K1 setting located
   15979  *  in the NVM.
   15980  */
   15981 static int
   15982 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   15983 {
   15984 	int k1_enable = sc->sc_nvm_k1_enabled;
   15985 
   15986 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15987 		device_xname(sc->sc_dev), __func__));
   15988 
   15989 	if (sc->phy.acquire(sc) != 0)
   15990 		return -1;
   15991 
   15992 	if (link) {
   15993 		k1_enable = 0;
   15994 
   15995 		/* Link stall fix for link up */
   15996 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15997 		    0x0100);
   15998 	} else {
   15999 		/* Link stall fix for link down */
   16000 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16001 		    0x4100);
   16002 	}
   16003 
   16004 	wm_configure_k1_ich8lan(sc, k1_enable);
   16005 	sc->phy.release(sc);
   16006 
   16007 	return 0;
   16008 }
   16009 
   16010 /*
   16011  *  wm_k1_workaround_lv - K1 Si workaround
   16012  *  @sc:   pointer to the HW structure
   16013  *
   16014  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   16015  *  Disable K1 for 1000 and 100 speeds
   16016  */
   16017 static int
   16018 wm_k1_workaround_lv(struct wm_softc *sc)
   16019 {
   16020 	uint32_t reg;
   16021 	uint16_t phyreg;
   16022 	int rv;
   16023 
   16024 	if (sc->sc_type != WM_T_PCH2)
   16025 		return 0;
   16026 
   16027 	/* Set K1 beacon duration based on 10Mbps speed */
   16028 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   16029 	if (rv != 0)
   16030 		return rv;
   16031 
   16032 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   16033 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   16034 		if (phyreg &
   16035 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   16036 			/* LV 1G/100 Packet drop issue wa  */
   16037 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   16038 			    &phyreg);
   16039 			if (rv != 0)
   16040 				return rv;
   16041 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   16042 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   16043 			    phyreg);
   16044 			if (rv != 0)
   16045 				return rv;
   16046 		} else {
   16047 			/* For 10Mbps */
   16048 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   16049 			reg &= ~FEXTNVM4_BEACON_DURATION;
   16050 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   16051 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   16052 		}
   16053 	}
   16054 
   16055 	return 0;
   16056 }
   16057 
   16058 /*
   16059  *  wm_link_stall_workaround_hv - Si workaround
   16060  *  @sc: pointer to the HW structure
   16061  *
   16062  *  This function works around a Si bug where the link partner can get
   16063  *  a link up indication before the PHY does. If small packets are sent
   16064  *  by the link partner they can be placed in the packet buffer without
   16065  *  being properly accounted for by the PHY and will stall preventing
   16066  *  further packets from being received.  The workaround is to clear the
   16067  *  packet buffer after the PHY detects link up.
   16068  */
   16069 static int
   16070 wm_link_stall_workaround_hv(struct wm_softc *sc)
   16071 {
   16072 	uint16_t phyreg;
   16073 
   16074 	if (sc->sc_phytype != WMPHY_82578)
   16075 		return 0;
   16076 
   16077 	/* Do not apply workaround if in PHY loopback bit 14 set */
   16078 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   16079 	if ((phyreg & BMCR_LOOP) != 0)
   16080 		return 0;
   16081 
   16082 	/* Check if link is up and at 1Gbps */
   16083 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   16084 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16085 	    | BM_CS_STATUS_SPEED_MASK;
   16086 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16087 		| BM_CS_STATUS_SPEED_1000))
   16088 		return 0;
   16089 
   16090 	delay(200 * 1000);	/* XXX too big */
   16091 
   16092 	/* Flush the packets in the fifo buffer */
   16093 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16094 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   16095 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16096 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   16097 
   16098 	return 0;
   16099 }
   16100 
   16101 static int
   16102 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   16103 {
   16104 	int rv;
   16105 	uint16_t reg;
   16106 
   16107 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   16108 	if (rv != 0)
   16109 		return rv;
   16110 
   16111 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   16112 	    reg | HV_KMRN_MDIO_SLOW);
   16113 }
   16114 
   16115 /*
   16116  *  wm_configure_k1_ich8lan - Configure K1 power state
   16117  *  @sc: pointer to the HW structure
   16118  *  @enable: K1 state to configure
   16119  *
   16120  *  Configure the K1 power state based on the provided parameter.
   16121  *  Assumes semaphore already acquired.
   16122  */
   16123 static void
   16124 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   16125 {
   16126 	uint32_t ctrl, ctrl_ext, tmp;
   16127 	uint16_t kmreg;
   16128 	int rv;
   16129 
   16130 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16131 
   16132 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   16133 	if (rv != 0)
   16134 		return;
   16135 
   16136 	if (k1_enable)
   16137 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   16138 	else
   16139 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   16140 
   16141 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   16142 	if (rv != 0)
   16143 		return;
   16144 
   16145 	delay(20);
   16146 
   16147 	ctrl = CSR_READ(sc, WMREG_CTRL);
   16148 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   16149 
   16150 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   16151 	tmp |= CTRL_FRCSPD;
   16152 
   16153 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   16154 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   16155 	CSR_WRITE_FLUSH(sc);
   16156 	delay(20);
   16157 
   16158 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   16159 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   16160 	CSR_WRITE_FLUSH(sc);
   16161 	delay(20);
   16162 
   16163 	return;
   16164 }
   16165 
   16166 /* special case - for 82575 - need to do manual init ... */
   16167 static void
   16168 wm_reset_init_script_82575(struct wm_softc *sc)
   16169 {
   16170 	/*
   16171 	 * Remark: this is untested code - we have no board without EEPROM
   16172 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   16173 	 */
   16174 
   16175 	/* SerDes configuration via SERDESCTRL */
   16176 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   16177 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   16178 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   16179 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   16180 
   16181 	/* CCM configuration via CCMCTL register */
   16182 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   16183 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   16184 
   16185 	/* PCIe lanes configuration */
   16186 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   16187 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   16188 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   16189 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   16190 
   16191 	/* PCIe PLL Configuration */
   16192 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   16193 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   16194 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   16195 }
   16196 
   16197 static void
   16198 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   16199 {
   16200 	uint32_t reg;
   16201 	uint16_t nvmword;
   16202 	int rv;
   16203 
   16204 	if (sc->sc_type != WM_T_82580)
   16205 		return;
   16206 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   16207 		return;
   16208 
   16209 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   16210 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   16211 	if (rv != 0) {
   16212 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   16213 		    __func__);
   16214 		return;
   16215 	}
   16216 
   16217 	reg = CSR_READ(sc, WMREG_MDICNFG);
   16218 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   16219 		reg |= MDICNFG_DEST;
   16220 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   16221 		reg |= MDICNFG_COM_MDIO;
   16222 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16223 }
   16224 
   16225 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   16226 
   16227 static bool
   16228 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   16229 {
   16230 	uint32_t reg;
   16231 	uint16_t id1, id2;
   16232 	int i, rv;
   16233 
   16234 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16235 		device_xname(sc->sc_dev), __func__));
   16236 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16237 
   16238 	id1 = id2 = 0xffff;
   16239 	for (i = 0; i < 2; i++) {
   16240 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   16241 		    &id1);
   16242 		if ((rv != 0) || MII_INVALIDID(id1))
   16243 			continue;
   16244 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   16245 		    &id2);
   16246 		if ((rv != 0) || MII_INVALIDID(id2))
   16247 			continue;
   16248 		break;
   16249 	}
   16250 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   16251 		goto out;
   16252 
   16253 	/*
   16254 	 * In case the PHY needs to be in mdio slow mode,
   16255 	 * set slow mode and try to get the PHY id again.
   16256 	 */
   16257 	rv = 0;
   16258 	if (sc->sc_type < WM_T_PCH_LPT) {
   16259 		sc->phy.release(sc);
   16260 		wm_set_mdio_slow_mode_hv(sc);
   16261 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   16262 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   16263 		sc->phy.acquire(sc);
   16264 	}
   16265 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   16266 		device_printf(sc->sc_dev, "XXX return with false\n");
   16267 		return false;
   16268 	}
   16269 out:
   16270 	if (sc->sc_type >= WM_T_PCH_LPT) {
   16271 		/* Only unforce SMBus if ME is not active */
   16272 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16273 			uint16_t phyreg;
   16274 
   16275 			/* Unforce SMBus mode in PHY */
   16276 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   16277 			    CV_SMB_CTRL, &phyreg);
   16278 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16279 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16280 			    CV_SMB_CTRL, phyreg);
   16281 
   16282 			/* Unforce SMBus mode in MAC */
   16283 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16284 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16285 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16286 		}
   16287 	}
   16288 	return true;
   16289 }
   16290 
   16291 static void
   16292 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16293 {
   16294 	uint32_t reg;
   16295 	int i;
   16296 
   16297 	/* Set PHY Config Counter to 50msec */
   16298 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16299 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16300 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16301 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16302 
   16303 	/* Toggle LANPHYPC */
   16304 	reg = CSR_READ(sc, WMREG_CTRL);
   16305 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16306 	reg &= ~CTRL_LANPHYPC_VALUE;
   16307 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16308 	CSR_WRITE_FLUSH(sc);
   16309 	delay(1000);
   16310 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16311 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16312 	CSR_WRITE_FLUSH(sc);
   16313 
   16314 	if (sc->sc_type < WM_T_PCH_LPT)
   16315 		delay(50 * 1000);
   16316 	else {
   16317 		i = 20;
   16318 
   16319 		do {
   16320 			delay(5 * 1000);
   16321 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16322 		    && i--);
   16323 
   16324 		delay(30 * 1000);
   16325 	}
   16326 }
   16327 
   16328 static int
   16329 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16330 {
   16331 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16332 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16333 	uint32_t rxa;
   16334 	uint16_t scale = 0, lat_enc = 0;
   16335 	int32_t obff_hwm = 0;
   16336 	int64_t lat_ns, value;
   16337 
   16338 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16339 		device_xname(sc->sc_dev), __func__));
   16340 
   16341 	if (link) {
   16342 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16343 		uint32_t status;
   16344 		uint16_t speed;
   16345 		pcireg_t preg;
   16346 
   16347 		status = CSR_READ(sc, WMREG_STATUS);
   16348 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16349 		case STATUS_SPEED_10:
   16350 			speed = 10;
   16351 			break;
   16352 		case STATUS_SPEED_100:
   16353 			speed = 100;
   16354 			break;
   16355 		case STATUS_SPEED_1000:
   16356 			speed = 1000;
   16357 			break;
   16358 		default:
   16359 			device_printf(sc->sc_dev, "Unknown speed "
   16360 			    "(status = %08x)\n", status);
   16361 			return -1;
   16362 		}
   16363 
   16364 		/* Rx Packet Buffer Allocation size (KB) */
   16365 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16366 
   16367 		/*
   16368 		 * Determine the maximum latency tolerated by the device.
   16369 		 *
   16370 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16371 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16372 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16373 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16374 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16375 		 */
   16376 		lat_ns = ((int64_t)rxa * 1024 -
   16377 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16378 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16379 		if (lat_ns < 0)
   16380 			lat_ns = 0;
   16381 		else
   16382 			lat_ns /= speed;
   16383 		value = lat_ns;
   16384 
   16385 		while (value > LTRV_VALUE) {
   16386 			scale ++;
   16387 			value = howmany(value, __BIT(5));
   16388 		}
   16389 		if (scale > LTRV_SCALE_MAX) {
   16390 			device_printf(sc->sc_dev,
   16391 			    "Invalid LTR latency scale %d\n", scale);
   16392 			return -1;
   16393 		}
   16394 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16395 
   16396 		/* Determine the maximum latency tolerated by the platform */
   16397 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16398 		    WM_PCI_LTR_CAP_LPT);
   16399 		max_snoop = preg & 0xffff;
   16400 		max_nosnoop = preg >> 16;
   16401 
   16402 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16403 
   16404 		if (lat_enc > max_ltr_enc) {
   16405 			lat_enc = max_ltr_enc;
   16406 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16407 			    * PCI_LTR_SCALETONS(
   16408 				    __SHIFTOUT(lat_enc,
   16409 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16410 		}
   16411 
   16412 		if (lat_ns) {
   16413 			lat_ns *= speed * 1000;
   16414 			lat_ns /= 8;
   16415 			lat_ns /= 1000000000;
   16416 			obff_hwm = (int32_t)(rxa - lat_ns);
   16417 		}
   16418 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16419 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16420 			    "(rxa = %d, lat_ns = %d)\n",
   16421 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16422 			return -1;
   16423 		}
   16424 	}
   16425 	/* Snoop and No-Snoop latencies the same */
   16426 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16427 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16428 
   16429 	/* Set OBFF high water mark */
   16430 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16431 	reg |= obff_hwm;
   16432 	CSR_WRITE(sc, WMREG_SVT, reg);
   16433 
   16434 	/* Enable OBFF */
   16435 	reg = CSR_READ(sc, WMREG_SVCR);
   16436 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16437 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16438 
   16439 	return 0;
   16440 }
   16441 
   16442 /*
   16443  * I210 Errata 25 and I211 Errata 10
   16444  * Slow System Clock.
   16445  */
   16446 static int
   16447 wm_pll_workaround_i210(struct wm_softc *sc)
   16448 {
   16449 	uint32_t mdicnfg, wuc;
   16450 	uint32_t reg;
   16451 	pcireg_t pcireg;
   16452 	uint32_t pmreg;
   16453 	uint16_t nvmword, tmp_nvmword;
   16454 	uint16_t phyval;
   16455 	bool wa_done = false;
   16456 	int i, rv = 0;
   16457 
   16458 	/* Get Power Management cap offset */
   16459 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16460 	    &pmreg, NULL) == 0)
   16461 		return -1;
   16462 
   16463 	/* Save WUC and MDICNFG registers */
   16464 	wuc = CSR_READ(sc, WMREG_WUC);
   16465 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16466 
   16467 	reg = mdicnfg & ~MDICNFG_DEST;
   16468 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16469 
   16470 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   16471 		nvmword = INVM_DEFAULT_AL;
   16472 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16473 
   16474 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16475 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16476 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16477 
   16478 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16479 			rv = 0;
   16480 			break; /* OK */
   16481 		} else
   16482 			rv = -1;
   16483 
   16484 		wa_done = true;
   16485 		/* Directly reset the internal PHY */
   16486 		reg = CSR_READ(sc, WMREG_CTRL);
   16487 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16488 
   16489 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16490 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16491 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16492 
   16493 		CSR_WRITE(sc, WMREG_WUC, 0);
   16494 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16495 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16496 
   16497 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16498 		    pmreg + PCI_PMCSR);
   16499 		pcireg |= PCI_PMCSR_STATE_D3;
   16500 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16501 		    pmreg + PCI_PMCSR, pcireg);
   16502 		delay(1000);
   16503 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16504 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16505 		    pmreg + PCI_PMCSR, pcireg);
   16506 
   16507 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16508 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16509 
   16510 		/* Restore WUC register */
   16511 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16512 	}
   16513 
   16514 	/* Restore MDICNFG setting */
   16515 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16516 	if (wa_done)
   16517 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16518 	return rv;
   16519 }
   16520 
   16521 static void
   16522 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16523 {
   16524 	uint32_t reg;
   16525 
   16526 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16527 		device_xname(sc->sc_dev), __func__));
   16528 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16529 	    || (sc->sc_type == WM_T_PCH_CNP));
   16530 
   16531 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16532 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16533 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16534 
   16535 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16536 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16537 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16538 }
   16539