Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.664
      1 /*	$NetBSD: if_wm.c,v 1.664 2020/01/31 12:03:23 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.664 2020/01/31 12:03:23 knakahara Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/workqueue.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <net/rss_config.h>
    121 
    122 #include <netinet/in.h>			/* XXX for struct ip */
    123 #include <netinet/in_systm.h>		/* XXX for struct ip */
    124 #include <netinet/ip.h>			/* XXX for struct ip */
    125 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    126 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    127 
    128 #include <sys/bus.h>
    129 #include <sys/intr.h>
    130 #include <machine/endian.h>
    131 
    132 #include <dev/mii/mii.h>
    133 #include <dev/mii/mdio.h>
    134 #include <dev/mii/miivar.h>
    135 #include <dev/mii/miidevs.h>
    136 #include <dev/mii/mii_bitbang.h>
    137 #include <dev/mii/ikphyreg.h>
    138 #include <dev/mii/igphyreg.h>
    139 #include <dev/mii/igphyvar.h>
    140 #include <dev/mii/inbmphyreg.h>
    141 #include <dev/mii/ihphyreg.h>
    142 
    143 #include <dev/pci/pcireg.h>
    144 #include <dev/pci/pcivar.h>
    145 #include <dev/pci/pcidevs.h>
    146 
    147 #include <dev/pci/if_wmreg.h>
    148 #include <dev/pci/if_wmvar.h>
    149 
    150 #ifdef WM_DEBUG
    151 #define	WM_DEBUG_LINK		__BIT(0)
    152 #define	WM_DEBUG_TX		__BIT(1)
    153 #define	WM_DEBUG_RX		__BIT(2)
    154 #define	WM_DEBUG_GMII		__BIT(3)
    155 #define	WM_DEBUG_MANAGE		__BIT(4)
    156 #define	WM_DEBUG_NVM		__BIT(5)
    157 #define	WM_DEBUG_INIT		__BIT(6)
    158 #define	WM_DEBUG_LOCK		__BIT(7)
    159 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    160     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    161 #define	DPRINTF(x, y)	do { if (wm_debug & (x)) printf y; } while (0)
    162 #else
    163 #define	DPRINTF(x, y)	__nothing
    164 #endif /* WM_DEBUG */
    165 
    166 #ifdef NET_MPSAFE
    167 #define WM_MPSAFE	1
    168 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    169 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    170 #else
    171 #define CALLOUT_FLAGS	0
    172 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    173 #endif
    174 
    175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    176 
    177 /*
    178  * This device driver's max interrupt numbers.
    179  */
    180 #define WM_MAX_NQUEUEINTR	16
    181 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    182 
    183 #ifndef WM_DISABLE_MSI
    184 #define	WM_DISABLE_MSI 0
    185 #endif
    186 #ifndef WM_DISABLE_MSIX
    187 #define	WM_DISABLE_MSIX 0
    188 #endif
    189 
    190 int wm_disable_msi = WM_DISABLE_MSI;
    191 int wm_disable_msix = WM_DISABLE_MSIX;
    192 
    193 #ifndef WM_WATCHDOG_TIMEOUT
    194 #define WM_WATCHDOG_TIMEOUT 5
    195 #endif
    196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    197 
    198 /*
    199  * Transmit descriptor list size.  Due to errata, we can only have
    200  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    201  * on >= 82544. We tell the upper layers that they can queue a lot
    202  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    203  * of them at a time.
    204  *
    205  * We allow up to 64 DMA segments per packet.  Pathological packet
    206  * chains containing many small mbufs have been observed in zero-copy
    207  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    208  * m_defrag() is called to reduce it.
    209  */
    210 #define	WM_NTXSEGS		64
    211 #define	WM_IFQUEUELEN		256
    212 #define	WM_TXQUEUELEN_MAX	64
    213 #define	WM_TXQUEUELEN_MAX_82547	16
    214 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    215 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    216 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    217 #define	WM_NTXDESC_82542	256
    218 #define	WM_NTXDESC_82544	4096
    219 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    220 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    221 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    222 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    223 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    224 
    225 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    226 
    227 #define	WM_TXINTERQSIZE		256
    228 
    229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    230 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    231 #endif
    232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    233 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    234 #endif
    235 
    236 /*
    237  * Receive descriptor list size.  We have one Rx buffer for normal
    238  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    239  * packet.  We allocate 256 receive descriptors, each with a 2k
    240  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    241  */
    242 #define	WM_NRXDESC		256U
    243 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    244 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    245 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    246 
    247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    248 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    249 #endif
    250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    251 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    252 #endif
    253 
    254 typedef union txdescs {
    255 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    256 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    257 } txdescs_t;
    258 
    259 typedef union rxdescs {
    260 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    261 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    262 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    263 } rxdescs_t;
    264 
    265 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    266 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    267 
    268 /*
    269  * Software state for transmit jobs.
    270  */
    271 struct wm_txsoft {
    272 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    273 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    274 	int txs_firstdesc;		/* first descriptor in packet */
    275 	int txs_lastdesc;		/* last descriptor in packet */
    276 	int txs_ndesc;			/* # of descriptors used */
    277 };
    278 
    279 /*
    280  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    281  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    282  * them together.
    283  */
    284 struct wm_rxsoft {
    285 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    287 };
    288 
    289 #define WM_LINKUP_TIMEOUT	50
    290 
    291 static uint16_t swfwphysem[] = {
    292 	SWFW_PHY0_SM,
    293 	SWFW_PHY1_SM,
    294 	SWFW_PHY2_SM,
    295 	SWFW_PHY3_SM
    296 };
    297 
    298 static const uint32_t wm_82580_rxpbs_table[] = {
    299 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    300 };
    301 
    302 struct wm_softc;
    303 
    304 #ifdef WM_EVENT_COUNTERS
    305 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    306 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    307 	struct evcnt qname##_ev_##evname;
    308 
    309 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    310 	do {								\
    311 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    312 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    313 		    "%s%02d%s", #qname, (qnum), #evname);		\
    314 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    315 		    (evtype), NULL, (xname),				\
    316 		    (q)->qname##_##evname##_evcnt_name);		\
    317 	} while (0)
    318 
    319 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    320 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    321 
    322 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    323 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    324 
    325 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    326 	evcnt_detach(&(q)->qname##_ev_##evname);
    327 #endif /* WM_EVENT_COUNTERS */
    328 
    329 struct wm_txqueue {
    330 	kmutex_t *txq_lock;		/* lock for tx operations */
    331 
    332 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    333 
    334 	/* Software state for the transmit descriptors. */
    335 	int txq_num;			/* must be a power of two */
    336 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    337 
    338 	/* TX control data structures. */
    339 	int txq_ndesc;			/* must be a power of two */
    340 	size_t txq_descsize;		/* a tx descriptor size */
    341 	txdescs_t *txq_descs_u;
    342 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    343 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    344 	int txq_desc_rseg;		/* real number of control segment */
    345 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    346 #define	txq_descs	txq_descs_u->sctxu_txdescs
    347 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    348 
    349 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    350 
    351 	int txq_free;			/* number of free Tx descriptors */
    352 	int txq_next;			/* next ready Tx descriptor */
    353 
    354 	int txq_sfree;			/* number of free Tx jobs */
    355 	int txq_snext;			/* next free Tx job */
    356 	int txq_sdirty;			/* dirty Tx jobs */
    357 
    358 	/* These 4 variables are used only on the 82547. */
    359 	int txq_fifo_size;		/* Tx FIFO size */
    360 	int txq_fifo_head;		/* current head of FIFO */
    361 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    362 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    363 
    364 	/*
    365 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    366 	 * CPUs. This queue intermediate them without block.
    367 	 */
    368 	pcq_t *txq_interq;
    369 
    370 	/*
    371 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    372 	 * to manage Tx H/W queue's busy flag.
    373 	 */
    374 	int txq_flags;			/* flags for H/W queue, see below */
    375 #define	WM_TXQ_NO_SPACE	0x1
    376 
    377 	bool txq_stopping;
    378 
    379 	bool txq_sending;
    380 	time_t txq_lastsent;
    381 
    382 	uint32_t txq_packets;		/* for AIM */
    383 	uint32_t txq_bytes;		/* for AIM */
    384 #ifdef WM_EVENT_COUNTERS
    385 	/* TX event counters */
    386 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    387 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    388 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    389 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    390 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    391 					    /* XXX not used? */
    392 
    393 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    394 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    395 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    396 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    397 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    398 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    399 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    400 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    401 					    /* other than toomanyseg */
    402 
    403 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    404 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    405 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    406 
    407 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    408 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    409 #endif /* WM_EVENT_COUNTERS */
    410 };
    411 
    412 struct wm_rxqueue {
    413 	kmutex_t *rxq_lock;		/* lock for rx operations */
    414 
    415 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    416 
    417 	/* Software state for the receive descriptors. */
    418 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    419 
    420 	/* RX control data structures. */
    421 	int rxq_ndesc;			/* must be a power of two */
    422 	size_t rxq_descsize;		/* a rx descriptor size */
    423 	rxdescs_t *rxq_descs_u;
    424 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    425 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    426 	int rxq_desc_rseg;		/* real number of control segment */
    427 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    428 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    429 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    430 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    431 
    432 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    433 
    434 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    435 	int rxq_discard;
    436 	int rxq_len;
    437 	struct mbuf *rxq_head;
    438 	struct mbuf *rxq_tail;
    439 	struct mbuf **rxq_tailp;
    440 
    441 	bool rxq_stopping;
    442 
    443 	uint32_t rxq_packets;		/* for AIM */
    444 	uint32_t rxq_bytes;		/* for AIM */
    445 #ifdef WM_EVENT_COUNTERS
    446 	/* RX event counters */
    447 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    448 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    449 
    450 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    451 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    452 #endif
    453 };
    454 
    455 struct wm_queue {
    456 	int wmq_id;			/* index of TX/RX queues */
    457 	int wmq_intr_idx;		/* index of MSI-X tables */
    458 
    459 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    460 	bool wmq_set_itr;
    461 
    462 	struct wm_txqueue wmq_txq;
    463 	struct wm_rxqueue wmq_rxq;
    464 
    465 	bool wmq_txrx_use_workqueue;
    466 	struct work wmq_cookie;
    467 	void *wmq_si;
    468 	krndsource_t rnd_source;	/* random source */
    469 };
    470 
    471 struct wm_phyop {
    472 	int (*acquire)(struct wm_softc *);
    473 	void (*release)(struct wm_softc *);
    474 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    475 	int (*writereg_locked)(device_t, int, int, uint16_t);
    476 	int reset_delay_us;
    477 	bool no_errprint;
    478 };
    479 
    480 struct wm_nvmop {
    481 	int (*acquire)(struct wm_softc *);
    482 	void (*release)(struct wm_softc *);
    483 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    484 };
    485 
    486 /*
    487  * Software state per device.
    488  */
    489 struct wm_softc {
    490 	device_t sc_dev;		/* generic device information */
    491 	bus_space_tag_t sc_st;		/* bus space tag */
    492 	bus_space_handle_t sc_sh;	/* bus space handle */
    493 	bus_size_t sc_ss;		/* bus space size */
    494 	bus_space_tag_t sc_iot;		/* I/O space tag */
    495 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    496 	bus_size_t sc_ios;		/* I/O space size */
    497 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    498 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    499 	bus_size_t sc_flashs;		/* flash registers space size */
    500 	off_t sc_flashreg_offset;	/*
    501 					 * offset to flash registers from
    502 					 * start of BAR
    503 					 */
    504 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    505 
    506 	struct ethercom sc_ethercom;	/* ethernet common data */
    507 	struct mii_data sc_mii;		/* MII/media information */
    508 
    509 	pci_chipset_tag_t sc_pc;
    510 	pcitag_t sc_pcitag;
    511 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    512 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    513 
    514 	uint16_t sc_pcidevid;		/* PCI device ID */
    515 	wm_chip_type sc_type;		/* MAC type */
    516 	int sc_rev;			/* MAC revision */
    517 	wm_phy_type sc_phytype;		/* PHY type */
    518 	uint8_t sc_sfptype;		/* SFP type */
    519 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    520 #define	WM_MEDIATYPE_UNKNOWN		0x00
    521 #define	WM_MEDIATYPE_FIBER		0x01
    522 #define	WM_MEDIATYPE_COPPER		0x02
    523 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    524 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    525 	int sc_flags;			/* flags; see below */
    526 	u_short sc_if_flags;		/* last if_flags */
    527 	int sc_ec_capenable;		/* last ec_capenable */
    528 	int sc_flowflags;		/* 802.3x flow control flags */
    529 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    530 	int sc_align_tweak;
    531 
    532 	void *sc_ihs[WM_MAX_NINTR];	/*
    533 					 * interrupt cookie.
    534 					 * - legacy and msi use sc_ihs[0] only
    535 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    536 					 */
    537 	pci_intr_handle_t *sc_intrs;	/*
    538 					 * legacy and msi use sc_intrs[0] only
    539 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    540 					 */
    541 	int sc_nintrs;			/* number of interrupts */
    542 
    543 	int sc_link_intr_idx;		/* index of MSI-X tables */
    544 
    545 	callout_t sc_tick_ch;		/* tick callout */
    546 	bool sc_core_stopping;
    547 
    548 	int sc_nvm_ver_major;
    549 	int sc_nvm_ver_minor;
    550 	int sc_nvm_ver_build;
    551 	int sc_nvm_addrbits;		/* NVM address bits */
    552 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    553 	int sc_ich8_flash_base;
    554 	int sc_ich8_flash_bank_size;
    555 	int sc_nvm_k1_enabled;
    556 
    557 	int sc_nqueues;
    558 	struct wm_queue *sc_queue;
    559 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    560 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    561 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    562 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    563 	struct workqueue *sc_queue_wq;
    564 	bool sc_txrx_use_workqueue;
    565 
    566 	int sc_affinity_offset;
    567 
    568 #ifdef WM_EVENT_COUNTERS
    569 	/* Event counters. */
    570 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    571 
    572 	/* WM_T_82542_2_1 only */
    573 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    574 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    575 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    576 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    577 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    578 #endif /* WM_EVENT_COUNTERS */
    579 
    580 	struct sysctllog *sc_sysctllog;
    581 
    582 	/* This variable are used only on the 82547. */
    583 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    584 
    585 	uint32_t sc_ctrl;		/* prototype CTRL register */
    586 #if 0
    587 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    588 #endif
    589 	uint32_t sc_icr;		/* prototype interrupt bits */
    590 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    591 	uint32_t sc_tctl;		/* prototype TCTL register */
    592 	uint32_t sc_rctl;		/* prototype RCTL register */
    593 	uint32_t sc_txcw;		/* prototype TXCW register */
    594 	uint32_t sc_tipg;		/* prototype TIPG register */
    595 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    596 	uint32_t sc_pba;		/* prototype PBA register */
    597 
    598 	int sc_tbi_linkup;		/* TBI link status */
    599 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    600 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    601 
    602 	int sc_mchash_type;		/* multicast filter offset */
    603 
    604 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    605 
    606 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    607 	kmutex_t *sc_ich_phymtx;	/*
    608 					 * 82574/82583/ICH/PCH specific PHY
    609 					 * mutex. For 82574/82583, the mutex
    610 					 * is used for both PHY and NVM.
    611 					 */
    612 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    613 
    614 	struct wm_phyop phy;
    615 	struct wm_nvmop nvm;
    616 };
    617 
    618 #define WM_CORE_LOCK(_sc)						\
    619 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    620 #define WM_CORE_UNLOCK(_sc)						\
    621 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    622 #define WM_CORE_LOCKED(_sc)						\
    623 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    624 
    625 #define	WM_RXCHAIN_RESET(rxq)						\
    626 do {									\
    627 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    628 	*(rxq)->rxq_tailp = NULL;					\
    629 	(rxq)->rxq_len = 0;						\
    630 } while (/*CONSTCOND*/0)
    631 
    632 #define	WM_RXCHAIN_LINK(rxq, m)						\
    633 do {									\
    634 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    635 	(rxq)->rxq_tailp = &(m)->m_next;				\
    636 } while (/*CONSTCOND*/0)
    637 
    638 #ifdef WM_EVENT_COUNTERS
    639 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    640 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    641 
    642 #define WM_Q_EVCNT_INCR(qname, evname)			\
    643 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    644 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    645 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    646 #else /* !WM_EVENT_COUNTERS */
    647 #define	WM_EVCNT_INCR(ev)	/* nothing */
    648 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    649 
    650 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    651 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    652 #endif /* !WM_EVENT_COUNTERS */
    653 
    654 #define	CSR_READ(sc, reg)						\
    655 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    656 #define	CSR_WRITE(sc, reg, val)						\
    657 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    658 #define	CSR_WRITE_FLUSH(sc)						\
    659 	(void)CSR_READ((sc), WMREG_STATUS)
    660 
    661 #define ICH8_FLASH_READ32(sc, reg)					\
    662 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    663 	    (reg) + sc->sc_flashreg_offset)
    664 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    665 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    666 	    (reg) + sc->sc_flashreg_offset, (data))
    667 
    668 #define ICH8_FLASH_READ16(sc, reg)					\
    669 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    670 	    (reg) + sc->sc_flashreg_offset)
    671 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    672 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    673 	    (reg) + sc->sc_flashreg_offset, (data))
    674 
    675 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    676 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    677 
    678 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    679 #define	WM_CDTXADDR_HI(txq, x)						\
    680 	(sizeof(bus_addr_t) == 8 ?					\
    681 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    682 
    683 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    684 #define	WM_CDRXADDR_HI(rxq, x)						\
    685 	(sizeof(bus_addr_t) == 8 ?					\
    686 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    687 
    688 /*
    689  * Register read/write functions.
    690  * Other than CSR_{READ|WRITE}().
    691  */
    692 #if 0
    693 static inline uint32_t wm_io_read(struct wm_softc *, int);
    694 #endif
    695 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    696 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    697     uint32_t, uint32_t);
    698 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    699 
    700 /*
    701  * Descriptor sync/init functions.
    702  */
    703 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    704 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    705 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    706 
    707 /*
    708  * Device driver interface functions and commonly used functions.
    709  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    710  */
    711 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    712 static int	wm_match(device_t, cfdata_t, void *);
    713 static void	wm_attach(device_t, device_t, void *);
    714 static int	wm_detach(device_t, int);
    715 static bool	wm_suspend(device_t, const pmf_qual_t *);
    716 static bool	wm_resume(device_t, const pmf_qual_t *);
    717 static void	wm_watchdog(struct ifnet *);
    718 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    719     uint16_t *);
    720 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    721     uint16_t *);
    722 static void	wm_tick(void *);
    723 static int	wm_ifflags_cb(struct ethercom *);
    724 static int	wm_ioctl(struct ifnet *, u_long, void *);
    725 /* MAC address related */
    726 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    727 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    728 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    729 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    730 static int	wm_rar_count(struct wm_softc *);
    731 static void	wm_set_filter(struct wm_softc *);
    732 /* Reset and init related */
    733 static void	wm_set_vlan(struct wm_softc *);
    734 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    735 static void	wm_get_auto_rd_done(struct wm_softc *);
    736 static void	wm_lan_init_done(struct wm_softc *);
    737 static void	wm_get_cfg_done(struct wm_softc *);
    738 static int	wm_phy_post_reset(struct wm_softc *);
    739 static int	wm_write_smbus_addr(struct wm_softc *);
    740 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    741 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    742 static void	wm_initialize_hardware_bits(struct wm_softc *);
    743 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    744 static int	wm_reset_phy(struct wm_softc *);
    745 static void	wm_flush_desc_rings(struct wm_softc *);
    746 static void	wm_reset(struct wm_softc *);
    747 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    748 static void	wm_rxdrain(struct wm_rxqueue *);
    749 static void	wm_init_rss(struct wm_softc *);
    750 static void	wm_adjust_qnum(struct wm_softc *, int);
    751 static inline bool	wm_is_using_msix(struct wm_softc *);
    752 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    753 static int	wm_softint_establish(struct wm_softc *, int, int);
    754 static int	wm_setup_legacy(struct wm_softc *);
    755 static int	wm_setup_msix(struct wm_softc *);
    756 static int	wm_init(struct ifnet *);
    757 static int	wm_init_locked(struct ifnet *);
    758 static void	wm_init_sysctls(struct wm_softc *);
    759 static void	wm_unset_stopping_flags(struct wm_softc *);
    760 static void	wm_set_stopping_flags(struct wm_softc *);
    761 static void	wm_stop(struct ifnet *, int);
    762 static void	wm_stop_locked(struct ifnet *, int);
    763 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    764 static void	wm_82547_txfifo_stall(void *);
    765 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    766 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    767 /* DMA related */
    768 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    769 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    770 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    771 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    772     struct wm_txqueue *);
    773 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    774 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    775 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    776     struct wm_rxqueue *);
    777 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    778 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    779 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    780 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    781 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    782 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    783 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    784     struct wm_txqueue *);
    785 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    786     struct wm_rxqueue *);
    787 static int	wm_alloc_txrx_queues(struct wm_softc *);
    788 static void	wm_free_txrx_queues(struct wm_softc *);
    789 static int	wm_init_txrx_queues(struct wm_softc *);
    790 /* Start */
    791 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    792     struct wm_txsoft *, uint32_t *, uint8_t *);
    793 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    794 static void	wm_start(struct ifnet *);
    795 static void	wm_start_locked(struct ifnet *);
    796 static int	wm_transmit(struct ifnet *, struct mbuf *);
    797 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    798 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    799     bool);
    800 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    801     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    802 static void	wm_nq_start(struct ifnet *);
    803 static void	wm_nq_start_locked(struct ifnet *);
    804 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    805 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    806 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    807     bool);
    808 static void	wm_deferred_start_locked(struct wm_txqueue *);
    809 static void	wm_handle_queue(void *);
    810 static void	wm_handle_queue_work(struct work *, void *);
    811 /* Interrupt */
    812 static bool	wm_txeof(struct wm_txqueue *, u_int);
    813 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    814 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    815 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    816 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    817 static void	wm_linkintr(struct wm_softc *, uint32_t);
    818 static int	wm_intr_legacy(void *);
    819 static inline void	wm_txrxintr_disable(struct wm_queue *);
    820 static inline void	wm_txrxintr_enable(struct wm_queue *);
    821 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    822 static int	wm_txrxintr_msix(void *);
    823 static int	wm_linkintr_msix(void *);
    824 
    825 /*
    826  * Media related.
    827  * GMII, SGMII, TBI, SERDES and SFP.
    828  */
    829 /* Common */
    830 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    831 /* GMII related */
    832 static void	wm_gmii_reset(struct wm_softc *);
    833 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    834 static int	wm_get_phy_id_82575(struct wm_softc *);
    835 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    836 static int	wm_gmii_mediachange(struct ifnet *);
    837 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    838 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    839 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    840 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    841 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    842 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    843 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    844 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    845 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    846 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    847 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    848 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    849 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    850 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    851 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    852 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    853 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    854 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    855 	bool);
    856 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    857 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    858 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    859 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    860 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    861 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    862 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    863 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    864 static void	wm_gmii_statchg(struct ifnet *);
    865 /*
    866  * kumeran related (80003, ICH* and PCH*).
    867  * These functions are not for accessing MII registers but for accessing
    868  * kumeran specific registers.
    869  */
    870 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    871 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    872 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    873 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    874 /* EMI register related */
    875 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    876 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    877 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    878 /* SGMII */
    879 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    880 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    881 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    882 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    883 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    884 /* TBI related */
    885 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    886 static void	wm_tbi_mediainit(struct wm_softc *);
    887 static int	wm_tbi_mediachange(struct ifnet *);
    888 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    889 static int	wm_check_for_link(struct wm_softc *);
    890 static void	wm_tbi_tick(struct wm_softc *);
    891 /* SERDES related */
    892 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    893 static int	wm_serdes_mediachange(struct ifnet *);
    894 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    895 static void	wm_serdes_tick(struct wm_softc *);
    896 /* SFP related */
    897 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    898 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    899 
    900 /*
    901  * NVM related.
    902  * Microwire, SPI (w/wo EERD) and Flash.
    903  */
    904 /* Misc functions */
    905 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    906 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    907 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    908 /* Microwire */
    909 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    910 /* SPI */
    911 static int	wm_nvm_ready_spi(struct wm_softc *);
    912 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    913 /* Using with EERD */
    914 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    915 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    916 /* Flash */
    917 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    918     unsigned int *);
    919 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    920 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    921 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    922     uint32_t *);
    923 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    924 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    925 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    926 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    927 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    928 /* iNVM */
    929 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    930 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    931 /* Lock, detecting NVM type, validate checksum and read */
    932 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    933 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    934 static int	wm_nvm_validate_checksum(struct wm_softc *);
    935 static void	wm_nvm_version_invm(struct wm_softc *);
    936 static void	wm_nvm_version(struct wm_softc *);
    937 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    938 
    939 /*
    940  * Hardware semaphores.
    941  * Very complexed...
    942  */
    943 static int	wm_get_null(struct wm_softc *);
    944 static void	wm_put_null(struct wm_softc *);
    945 static int	wm_get_eecd(struct wm_softc *);
    946 static void	wm_put_eecd(struct wm_softc *);
    947 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    948 static void	wm_put_swsm_semaphore(struct wm_softc *);
    949 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    950 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    951 static int	wm_get_nvm_80003(struct wm_softc *);
    952 static void	wm_put_nvm_80003(struct wm_softc *);
    953 static int	wm_get_nvm_82571(struct wm_softc *);
    954 static void	wm_put_nvm_82571(struct wm_softc *);
    955 static int	wm_get_phy_82575(struct wm_softc *);
    956 static void	wm_put_phy_82575(struct wm_softc *);
    957 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    958 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    959 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    960 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    961 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    962 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    963 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    964 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    965 
    966 /*
    967  * Management mode and power management related subroutines.
    968  * BMC, AMT, suspend/resume and EEE.
    969  */
    970 #if 0
    971 static int	wm_check_mng_mode(struct wm_softc *);
    972 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    973 static int	wm_check_mng_mode_82574(struct wm_softc *);
    974 static int	wm_check_mng_mode_generic(struct wm_softc *);
    975 #endif
    976 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    977 static bool	wm_phy_resetisblocked(struct wm_softc *);
    978 static void	wm_get_hw_control(struct wm_softc *);
    979 static void	wm_release_hw_control(struct wm_softc *);
    980 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    981 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    982 static void	wm_init_manageability(struct wm_softc *);
    983 static void	wm_release_manageability(struct wm_softc *);
    984 static void	wm_get_wakeup(struct wm_softc *);
    985 static int	wm_ulp_disable(struct wm_softc *);
    986 static int	wm_enable_phy_wakeup(struct wm_softc *);
    987 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    988 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    989 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
    990 static void	wm_enable_wakeup(struct wm_softc *);
    991 static void	wm_disable_aspm(struct wm_softc *);
    992 /* LPLU (Low Power Link Up) */
    993 static void	wm_lplu_d0_disable(struct wm_softc *);
    994 /* EEE */
    995 static int	wm_set_eee_i350(struct wm_softc *);
    996 static int	wm_set_eee_pchlan(struct wm_softc *);
    997 static int	wm_set_eee(struct wm_softc *);
    998 
    999 /*
   1000  * Workarounds (mainly PHY related).
   1001  * Basically, PHY's workarounds are in the PHY drivers.
   1002  */
   1003 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1004 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1005 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1006 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1007 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1008 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1009 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1010 static int	wm_k1_workaround_lv(struct wm_softc *);
   1011 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1012 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1013 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1014 static void	wm_reset_init_script_82575(struct wm_softc *);
   1015 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1016 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1017 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1018 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1019 static int	wm_pll_workaround_i210(struct wm_softc *);
   1020 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1021 
   1022 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1023     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1024 
   1025 /*
   1026  * Devices supported by this driver.
   1027  */
   1028 static const struct wm_product {
   1029 	pci_vendor_id_t		wmp_vendor;
   1030 	pci_product_id_t	wmp_product;
   1031 	const char		*wmp_name;
   1032 	wm_chip_type		wmp_type;
   1033 	uint32_t		wmp_flags;
   1034 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1035 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1036 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1037 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1038 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1039 } wm_products[] = {
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1041 	  "Intel i82542 1000BASE-X Ethernet",
   1042 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1045 	  "Intel i82543GC 1000BASE-X Ethernet",
   1046 	  WM_T_82543,		WMP_F_FIBER },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1049 	  "Intel i82543GC 1000BASE-T Ethernet",
   1050 	  WM_T_82543,		WMP_F_COPPER },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1053 	  "Intel i82544EI 1000BASE-T Ethernet",
   1054 	  WM_T_82544,		WMP_F_COPPER },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1057 	  "Intel i82544EI 1000BASE-X Ethernet",
   1058 	  WM_T_82544,		WMP_F_FIBER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1061 	  "Intel i82544GC 1000BASE-T Ethernet",
   1062 	  WM_T_82544,		WMP_F_COPPER },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1065 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1066 	  WM_T_82544,		WMP_F_COPPER },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1069 	  "Intel i82540EM 1000BASE-T Ethernet",
   1070 	  WM_T_82540,		WMP_F_COPPER },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1073 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1074 	  WM_T_82540,		WMP_F_COPPER },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1077 	  "Intel i82540EP 1000BASE-T Ethernet",
   1078 	  WM_T_82540,		WMP_F_COPPER },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1081 	  "Intel i82540EP 1000BASE-T Ethernet",
   1082 	  WM_T_82540,		WMP_F_COPPER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1085 	  "Intel i82540EP 1000BASE-T Ethernet",
   1086 	  WM_T_82540,		WMP_F_COPPER },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1089 	  "Intel i82545EM 1000BASE-T Ethernet",
   1090 	  WM_T_82545,		WMP_F_COPPER },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1093 	  "Intel i82545GM 1000BASE-T Ethernet",
   1094 	  WM_T_82545_3,		WMP_F_COPPER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1097 	  "Intel i82545GM 1000BASE-X Ethernet",
   1098 	  WM_T_82545_3,		WMP_F_FIBER },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1101 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1102 	  WM_T_82545_3,		WMP_F_SERDES },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1105 	  "Intel i82546EB 1000BASE-T Ethernet",
   1106 	  WM_T_82546,		WMP_F_COPPER },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1109 	  "Intel i82546EB 1000BASE-T Ethernet",
   1110 	  WM_T_82546,		WMP_F_COPPER },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1113 	  "Intel i82545EM 1000BASE-X Ethernet",
   1114 	  WM_T_82545,		WMP_F_FIBER },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1117 	  "Intel i82546EB 1000BASE-X Ethernet",
   1118 	  WM_T_82546,		WMP_F_FIBER },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1121 	  "Intel i82546GB 1000BASE-T Ethernet",
   1122 	  WM_T_82546_3,		WMP_F_COPPER },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1125 	  "Intel i82546GB 1000BASE-X Ethernet",
   1126 	  WM_T_82546_3,		WMP_F_FIBER },
   1127 
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1129 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1130 	  WM_T_82546_3,		WMP_F_SERDES },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1133 	  "i82546GB quad-port Gigabit Ethernet",
   1134 	  WM_T_82546_3,		WMP_F_COPPER },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1137 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1138 	  WM_T_82546_3,		WMP_F_COPPER },
   1139 
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1141 	  "Intel PRO/1000MT (82546GB)",
   1142 	  WM_T_82546_3,		WMP_F_COPPER },
   1143 
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1145 	  "Intel i82541EI 1000BASE-T Ethernet",
   1146 	  WM_T_82541,		WMP_F_COPPER },
   1147 
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1149 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1150 	  WM_T_82541,		WMP_F_COPPER },
   1151 
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1153 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1154 	  WM_T_82541,		WMP_F_COPPER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1157 	  "Intel i82541ER 1000BASE-T Ethernet",
   1158 	  WM_T_82541_2,		WMP_F_COPPER },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1161 	  "Intel i82541GI 1000BASE-T Ethernet",
   1162 	  WM_T_82541_2,		WMP_F_COPPER },
   1163 
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1165 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1166 	  WM_T_82541_2,		WMP_F_COPPER },
   1167 
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1169 	  "Intel i82541PI 1000BASE-T Ethernet",
   1170 	  WM_T_82541_2,		WMP_F_COPPER },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1173 	  "Intel i82547EI 1000BASE-T Ethernet",
   1174 	  WM_T_82547,		WMP_F_COPPER },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1177 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1178 	  WM_T_82547,		WMP_F_COPPER },
   1179 
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1181 	  "Intel i82547GI 1000BASE-T Ethernet",
   1182 	  WM_T_82547_2,		WMP_F_COPPER },
   1183 
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1185 	  "Intel PRO/1000 PT (82571EB)",
   1186 	  WM_T_82571,		WMP_F_COPPER },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1189 	  "Intel PRO/1000 PF (82571EB)",
   1190 	  WM_T_82571,		WMP_F_FIBER },
   1191 
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1193 	  "Intel PRO/1000 PB (82571EB)",
   1194 	  WM_T_82571,		WMP_F_SERDES },
   1195 
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1197 	  "Intel PRO/1000 QT (82571EB)",
   1198 	  WM_T_82571,		WMP_F_COPPER },
   1199 
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1201 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1202 	  WM_T_82571,		WMP_F_COPPER },
   1203 
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1205 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1206 	  WM_T_82571,		WMP_F_COPPER },
   1207 
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1209 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1210 	  WM_T_82571,		WMP_F_SERDES },
   1211 
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1213 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1214 	  WM_T_82571,		WMP_F_SERDES },
   1215 
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1217 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1218 	  WM_T_82571,		WMP_F_FIBER },
   1219 
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1221 	  "Intel i82572EI 1000baseT Ethernet",
   1222 	  WM_T_82572,		WMP_F_COPPER },
   1223 
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1225 	  "Intel i82572EI 1000baseX Ethernet",
   1226 	  WM_T_82572,		WMP_F_FIBER },
   1227 
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1229 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1230 	  WM_T_82572,		WMP_F_SERDES },
   1231 
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1233 	  "Intel i82572EI 1000baseT Ethernet",
   1234 	  WM_T_82572,		WMP_F_COPPER },
   1235 
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1237 	  "Intel i82573E",
   1238 	  WM_T_82573,		WMP_F_COPPER },
   1239 
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1241 	  "Intel i82573E IAMT",
   1242 	  WM_T_82573,		WMP_F_COPPER },
   1243 
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1245 	  "Intel i82573L Gigabit Ethernet",
   1246 	  WM_T_82573,		WMP_F_COPPER },
   1247 
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1249 	  "Intel i82574L",
   1250 	  WM_T_82574,		WMP_F_COPPER },
   1251 
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1253 	  "Intel i82574L",
   1254 	  WM_T_82574,		WMP_F_COPPER },
   1255 
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1257 	  "Intel i82583V",
   1258 	  WM_T_82583,		WMP_F_COPPER },
   1259 
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1261 	  "i80003 dual 1000baseT Ethernet",
   1262 	  WM_T_80003,		WMP_F_COPPER },
   1263 
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1265 	  "i80003 dual 1000baseX Ethernet",
   1266 	  WM_T_80003,		WMP_F_COPPER },
   1267 
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1269 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1270 	  WM_T_80003,		WMP_F_SERDES },
   1271 
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1273 	  "Intel i80003 1000baseT Ethernet",
   1274 	  WM_T_80003,		WMP_F_COPPER },
   1275 
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1277 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1278 	  WM_T_80003,		WMP_F_SERDES },
   1279 
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1281 	  "Intel i82801H (M_AMT) LAN Controller",
   1282 	  WM_T_ICH8,		WMP_F_COPPER },
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1284 	  "Intel i82801H (AMT) LAN Controller",
   1285 	  WM_T_ICH8,		WMP_F_COPPER },
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1287 	  "Intel i82801H LAN Controller",
   1288 	  WM_T_ICH8,		WMP_F_COPPER },
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1290 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1291 	  WM_T_ICH8,		WMP_F_COPPER },
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1293 	  "Intel i82801H (M) LAN Controller",
   1294 	  WM_T_ICH8,		WMP_F_COPPER },
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1296 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1297 	  WM_T_ICH8,		WMP_F_COPPER },
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1299 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1300 	  WM_T_ICH8,		WMP_F_COPPER },
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1302 	  "82567V-3 LAN Controller",
   1303 	  WM_T_ICH8,		WMP_F_COPPER },
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1305 	  "82801I (AMT) LAN Controller",
   1306 	  WM_T_ICH9,		WMP_F_COPPER },
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1308 	  "82801I 10/100 LAN Controller",
   1309 	  WM_T_ICH9,		WMP_F_COPPER },
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1311 	  "82801I (G) 10/100 LAN Controller",
   1312 	  WM_T_ICH9,		WMP_F_COPPER },
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1314 	  "82801I (GT) 10/100 LAN Controller",
   1315 	  WM_T_ICH9,		WMP_F_COPPER },
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1317 	  "82801I (C) LAN Controller",
   1318 	  WM_T_ICH9,		WMP_F_COPPER },
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1320 	  "82801I mobile LAN Controller",
   1321 	  WM_T_ICH9,		WMP_F_COPPER },
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1323 	  "82801I mobile (V) LAN Controller",
   1324 	  WM_T_ICH9,		WMP_F_COPPER },
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1326 	  "82801I mobile (AMT) LAN Controller",
   1327 	  WM_T_ICH9,		WMP_F_COPPER },
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1329 	  "82567LM-4 LAN Controller",
   1330 	  WM_T_ICH9,		WMP_F_COPPER },
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1332 	  "82567LM-2 LAN Controller",
   1333 	  WM_T_ICH10,		WMP_F_COPPER },
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1335 	  "82567LF-2 LAN Controller",
   1336 	  WM_T_ICH10,		WMP_F_COPPER },
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1338 	  "82567LM-3 LAN Controller",
   1339 	  WM_T_ICH10,		WMP_F_COPPER },
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1341 	  "82567LF-3 LAN Controller",
   1342 	  WM_T_ICH10,		WMP_F_COPPER },
   1343 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1344 	  "82567V-2 LAN Controller",
   1345 	  WM_T_ICH10,		WMP_F_COPPER },
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1347 	  "82567V-3? LAN Controller",
   1348 	  WM_T_ICH10,		WMP_F_COPPER },
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1350 	  "HANKSVILLE LAN Controller",
   1351 	  WM_T_ICH10,		WMP_F_COPPER },
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1353 	  "PCH LAN (82577LM) Controller",
   1354 	  WM_T_PCH,		WMP_F_COPPER },
   1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1356 	  "PCH LAN (82577LC) Controller",
   1357 	  WM_T_PCH,		WMP_F_COPPER },
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1359 	  "PCH LAN (82578DM) Controller",
   1360 	  WM_T_PCH,		WMP_F_COPPER },
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1362 	  "PCH LAN (82578DC) Controller",
   1363 	  WM_T_PCH,		WMP_F_COPPER },
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1365 	  "PCH2 LAN (82579LM) Controller",
   1366 	  WM_T_PCH2,		WMP_F_COPPER },
   1367 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1368 	  "PCH2 LAN (82579V) Controller",
   1369 	  WM_T_PCH2,		WMP_F_COPPER },
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1371 	  "82575EB dual-1000baseT Ethernet",
   1372 	  WM_T_82575,		WMP_F_COPPER },
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1374 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1375 	  WM_T_82575,		WMP_F_SERDES },
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1377 	  "82575GB quad-1000baseT Ethernet",
   1378 	  WM_T_82575,		WMP_F_COPPER },
   1379 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1380 	  "82575GB quad-1000baseT Ethernet (PM)",
   1381 	  WM_T_82575,		WMP_F_COPPER },
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1383 	  "82576 1000BaseT Ethernet",
   1384 	  WM_T_82576,		WMP_F_COPPER },
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1386 	  "82576 1000BaseX Ethernet",
   1387 	  WM_T_82576,		WMP_F_FIBER },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1390 	  "82576 gigabit Ethernet (SERDES)",
   1391 	  WM_T_82576,		WMP_F_SERDES },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1394 	  "82576 quad-1000BaseT Ethernet",
   1395 	  WM_T_82576,		WMP_F_COPPER },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1398 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1399 	  WM_T_82576,		WMP_F_COPPER },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1402 	  "82576 gigabit Ethernet",
   1403 	  WM_T_82576,		WMP_F_COPPER },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1406 	  "82576 gigabit Ethernet (SERDES)",
   1407 	  WM_T_82576,		WMP_F_SERDES },
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1409 	  "82576 quad-gigabit Ethernet (SERDES)",
   1410 	  WM_T_82576,		WMP_F_SERDES },
   1411 
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1413 	  "82580 1000BaseT Ethernet",
   1414 	  WM_T_82580,		WMP_F_COPPER },
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1416 	  "82580 1000BaseX Ethernet",
   1417 	  WM_T_82580,		WMP_F_FIBER },
   1418 
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1420 	  "82580 1000BaseT Ethernet (SERDES)",
   1421 	  WM_T_82580,		WMP_F_SERDES },
   1422 
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1424 	  "82580 gigabit Ethernet (SGMII)",
   1425 	  WM_T_82580,		WMP_F_COPPER },
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1427 	  "82580 dual-1000BaseT Ethernet",
   1428 	  WM_T_82580,		WMP_F_COPPER },
   1429 
   1430 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1431 	  "82580 quad-1000BaseX Ethernet",
   1432 	  WM_T_82580,		WMP_F_FIBER },
   1433 
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1435 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1436 	  WM_T_82580,		WMP_F_COPPER },
   1437 
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1439 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1440 	  WM_T_82580,		WMP_F_SERDES },
   1441 
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1443 	  "DH89XXCC 1000BASE-KX Ethernet",
   1444 	  WM_T_82580,		WMP_F_SERDES },
   1445 
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1447 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1448 	  WM_T_82580,		WMP_F_SERDES },
   1449 
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1451 	  "I350 Gigabit Network Connection",
   1452 	  WM_T_I350,		WMP_F_COPPER },
   1453 
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1455 	  "I350 Gigabit Fiber Network Connection",
   1456 	  WM_T_I350,		WMP_F_FIBER },
   1457 
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1459 	  "I350 Gigabit Backplane Connection",
   1460 	  WM_T_I350,		WMP_F_SERDES },
   1461 
   1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1463 	  "I350 Quad Port Gigabit Ethernet",
   1464 	  WM_T_I350,		WMP_F_SERDES },
   1465 
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1467 	  "I350 Gigabit Connection",
   1468 	  WM_T_I350,		WMP_F_COPPER },
   1469 
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1471 	  "I354 Gigabit Ethernet (KX)",
   1472 	  WM_T_I354,		WMP_F_SERDES },
   1473 
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1475 	  "I354 Gigabit Ethernet (SGMII)",
   1476 	  WM_T_I354,		WMP_F_COPPER },
   1477 
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1479 	  "I354 Gigabit Ethernet (2.5G)",
   1480 	  WM_T_I354,		WMP_F_COPPER },
   1481 
   1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1483 	  "I210-T1 Ethernet Server Adapter",
   1484 	  WM_T_I210,		WMP_F_COPPER },
   1485 
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1487 	  "I210 Ethernet (Copper OEM)",
   1488 	  WM_T_I210,		WMP_F_COPPER },
   1489 
   1490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1491 	  "I210 Ethernet (Copper IT)",
   1492 	  WM_T_I210,		WMP_F_COPPER },
   1493 
   1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1495 	  "I210 Ethernet (Copper, FLASH less)",
   1496 	  WM_T_I210,		WMP_F_COPPER },
   1497 
   1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1499 	  "I210 Gigabit Ethernet (Fiber)",
   1500 	  WM_T_I210,		WMP_F_FIBER },
   1501 
   1502 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1503 	  "I210 Gigabit Ethernet (SERDES)",
   1504 	  WM_T_I210,		WMP_F_SERDES },
   1505 
   1506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1507 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1508 	  WM_T_I210,		WMP_F_SERDES },
   1509 
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1511 	  "I210 Gigabit Ethernet (SGMII)",
   1512 	  WM_T_I210,		WMP_F_COPPER },
   1513 
   1514 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1515 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1516 	  WM_T_I210,		WMP_F_COPPER },
   1517 
   1518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1519 	  "I211 Ethernet (COPPER)",
   1520 	  WM_T_I211,		WMP_F_COPPER },
   1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1522 	  "I217 V Ethernet Connection",
   1523 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1525 	  "I217 LM Ethernet Connection",
   1526 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1528 	  "I218 V Ethernet Connection",
   1529 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1530 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1531 	  "I218 V Ethernet Connection",
   1532 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1534 	  "I218 V Ethernet Connection",
   1535 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1537 	  "I218 LM Ethernet Connection",
   1538 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1540 	  "I218 LM Ethernet Connection",
   1541 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1543 	  "I218 LM Ethernet Connection",
   1544 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1545 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1546 	  "I219 LM Ethernet Connection",
   1547 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1548 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1549 	  "I219 LM Ethernet Connection",
   1550 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1552 	  "I219 LM Ethernet Connection",
   1553 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1555 	  "I219 LM Ethernet Connection",
   1556 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1557 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1558 	  "I219 LM Ethernet Connection",
   1559 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1560 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1561 	  "I219 LM Ethernet Connection",
   1562 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1563 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1564 	  "I219 LM Ethernet Connection",
   1565 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1566 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1567 	  "I219 LM Ethernet Connection",
   1568 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1569 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1570 	  "I219 LM Ethernet Connection",
   1571 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1573 	  "I219 LM Ethernet Connection",
   1574 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1575 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1576 	  "I219 LM Ethernet Connection",
   1577 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1578 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1579 	  "I219 LM Ethernet Connection",
   1580 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1581 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1582 	  "I219 LM Ethernet Connection",
   1583 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1584 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1585 	  "I219 LM Ethernet Connection",
   1586 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1587 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1588 	  "I219 LM Ethernet Connection",
   1589 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1590 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1591 	  "I219 V Ethernet Connection",
   1592 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1593 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1594 	  "I219 V Ethernet Connection",
   1595 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1596 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1597 	  "I219 V Ethernet Connection",
   1598 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1599 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1600 	  "I219 V Ethernet Connection",
   1601 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1602 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1603 	  "I219 V Ethernet Connection",
   1604 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1605 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1606 	  "I219 V Ethernet Connection",
   1607 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1608 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1609 	  "I219 V Ethernet Connection",
   1610 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1611 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1612 	  "I219 V Ethernet Connection",
   1613 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1614 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1615 	  "I219 V Ethernet Connection",
   1616 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1617 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1618 	  "I219 V Ethernet Connection",
   1619 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1620 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1621 	  "I219 V Ethernet Connection",
   1622 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1623 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1624 	  "I219 V Ethernet Connection",
   1625 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1626 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1627 	  "I219 V Ethernet Connection",
   1628 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1629 	{ 0,			0,
   1630 	  NULL,
   1631 	  0,			0 },
   1632 };
   1633 
   1634 /*
   1635  * Register read/write functions.
   1636  * Other than CSR_{READ|WRITE}().
   1637  */
   1638 
   1639 #if 0 /* Not currently used */
   1640 static inline uint32_t
   1641 wm_io_read(struct wm_softc *sc, int reg)
   1642 {
   1643 
   1644 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1645 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1646 }
   1647 #endif
   1648 
   1649 static inline void
   1650 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1651 {
   1652 
   1653 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1654 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1655 }
   1656 
   1657 static inline void
   1658 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1659     uint32_t data)
   1660 {
   1661 	uint32_t regval;
   1662 	int i;
   1663 
   1664 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1665 
   1666 	CSR_WRITE(sc, reg, regval);
   1667 
   1668 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1669 		delay(5);
   1670 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1671 			break;
   1672 	}
   1673 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1674 		aprint_error("%s: WARNING:"
   1675 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1676 		    device_xname(sc->sc_dev), reg);
   1677 	}
   1678 }
   1679 
   1680 static inline void
   1681 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1682 {
   1683 	wa->wa_low = htole32(v & 0xffffffffU);
   1684 	if (sizeof(bus_addr_t) == 8)
   1685 		wa->wa_high = htole32((uint64_t) v >> 32);
   1686 	else
   1687 		wa->wa_high = 0;
   1688 }
   1689 
   1690 /*
   1691  * Descriptor sync/init functions.
   1692  */
   1693 static inline void
   1694 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1695 {
   1696 	struct wm_softc *sc = txq->txq_sc;
   1697 
   1698 	/* If it will wrap around, sync to the end of the ring. */
   1699 	if ((start + num) > WM_NTXDESC(txq)) {
   1700 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1701 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1702 		    (WM_NTXDESC(txq) - start), ops);
   1703 		num -= (WM_NTXDESC(txq) - start);
   1704 		start = 0;
   1705 	}
   1706 
   1707 	/* Now sync whatever is left. */
   1708 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1709 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1710 }
   1711 
   1712 static inline void
   1713 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1714 {
   1715 	struct wm_softc *sc = rxq->rxq_sc;
   1716 
   1717 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1718 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1719 }
   1720 
   1721 static inline void
   1722 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1723 {
   1724 	struct wm_softc *sc = rxq->rxq_sc;
   1725 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1726 	struct mbuf *m = rxs->rxs_mbuf;
   1727 
   1728 	/*
   1729 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1730 	 * so that the payload after the Ethernet header is aligned
   1731 	 * to a 4-byte boundary.
   1732 
   1733 	 * XXX BRAINDAMAGE ALERT!
   1734 	 * The stupid chip uses the same size for every buffer, which
   1735 	 * is set in the Receive Control register.  We are using the 2K
   1736 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1737 	 * reason, we can't "scoot" packets longer than the standard
   1738 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1739 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1740 	 * the upper layer copy the headers.
   1741 	 */
   1742 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1743 
   1744 	if (sc->sc_type == WM_T_82574) {
   1745 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1746 		rxd->erx_data.erxd_addr =
   1747 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1748 		rxd->erx_data.erxd_dd = 0;
   1749 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1750 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1751 
   1752 		rxd->nqrx_data.nrxd_paddr =
   1753 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1754 		/* Currently, split header is not supported. */
   1755 		rxd->nqrx_data.nrxd_haddr = 0;
   1756 	} else {
   1757 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1758 
   1759 		wm_set_dma_addr(&rxd->wrx_addr,
   1760 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1761 		rxd->wrx_len = 0;
   1762 		rxd->wrx_cksum = 0;
   1763 		rxd->wrx_status = 0;
   1764 		rxd->wrx_errors = 0;
   1765 		rxd->wrx_special = 0;
   1766 	}
   1767 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1768 
   1769 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1770 }
   1771 
   1772 /*
   1773  * Device driver interface functions and commonly used functions.
   1774  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1775  */
   1776 
   1777 /* Lookup supported device table */
   1778 static const struct wm_product *
   1779 wm_lookup(const struct pci_attach_args *pa)
   1780 {
   1781 	const struct wm_product *wmp;
   1782 
   1783 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1784 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1785 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1786 			return wmp;
   1787 	}
   1788 	return NULL;
   1789 }
   1790 
   1791 /* The match function (ca_match) */
   1792 static int
   1793 wm_match(device_t parent, cfdata_t cf, void *aux)
   1794 {
   1795 	struct pci_attach_args *pa = aux;
   1796 
   1797 	if (wm_lookup(pa) != NULL)
   1798 		return 1;
   1799 
   1800 	return 0;
   1801 }
   1802 
   1803 /* The attach function (ca_attach) */
   1804 static void
   1805 wm_attach(device_t parent, device_t self, void *aux)
   1806 {
   1807 	struct wm_softc *sc = device_private(self);
   1808 	struct pci_attach_args *pa = aux;
   1809 	prop_dictionary_t dict;
   1810 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1811 	pci_chipset_tag_t pc = pa->pa_pc;
   1812 	int counts[PCI_INTR_TYPE_SIZE];
   1813 	pci_intr_type_t max_type;
   1814 	const char *eetype, *xname;
   1815 	bus_space_tag_t memt;
   1816 	bus_space_handle_t memh;
   1817 	bus_size_t memsize;
   1818 	int memh_valid;
   1819 	int i, error;
   1820 	const struct wm_product *wmp;
   1821 	prop_data_t ea;
   1822 	prop_number_t pn;
   1823 	uint8_t enaddr[ETHER_ADDR_LEN];
   1824 	char buf[256];
   1825 	char wqname[MAXCOMLEN];
   1826 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1827 	pcireg_t preg, memtype;
   1828 	uint16_t eeprom_data, apme_mask;
   1829 	bool force_clear_smbi;
   1830 	uint32_t link_mode;
   1831 	uint32_t reg;
   1832 
   1833 	sc->sc_dev = self;
   1834 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1835 	sc->sc_core_stopping = false;
   1836 
   1837 	wmp = wm_lookup(pa);
   1838 #ifdef DIAGNOSTIC
   1839 	if (wmp == NULL) {
   1840 		printf("\n");
   1841 		panic("wm_attach: impossible");
   1842 	}
   1843 #endif
   1844 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1845 
   1846 	sc->sc_pc = pa->pa_pc;
   1847 	sc->sc_pcitag = pa->pa_tag;
   1848 
   1849 	if (pci_dma64_available(pa))
   1850 		sc->sc_dmat = pa->pa_dmat64;
   1851 	else
   1852 		sc->sc_dmat = pa->pa_dmat;
   1853 
   1854 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1855 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1856 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1857 
   1858 	sc->sc_type = wmp->wmp_type;
   1859 
   1860 	/* Set default function pointers */
   1861 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1862 	sc->phy.release = sc->nvm.release = wm_put_null;
   1863 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1864 
   1865 	if (sc->sc_type < WM_T_82543) {
   1866 		if (sc->sc_rev < 2) {
   1867 			aprint_error_dev(sc->sc_dev,
   1868 			    "i82542 must be at least rev. 2\n");
   1869 			return;
   1870 		}
   1871 		if (sc->sc_rev < 3)
   1872 			sc->sc_type = WM_T_82542_2_0;
   1873 	}
   1874 
   1875 	/*
   1876 	 * Disable MSI for Errata:
   1877 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1878 	 *
   1879 	 *  82544: Errata 25
   1880 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1881 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1882 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1883 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1884 	 *
   1885 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1886 	 *
   1887 	 *  82571 & 82572: Errata 63
   1888 	 */
   1889 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1890 	    || (sc->sc_type == WM_T_82572))
   1891 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1892 
   1893 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1894 	    || (sc->sc_type == WM_T_82580)
   1895 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1896 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1897 		sc->sc_flags |= WM_F_NEWQUEUE;
   1898 
   1899 	/* Set device properties (mactype) */
   1900 	dict = device_properties(sc->sc_dev);
   1901 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1902 
   1903 	/*
   1904 	 * Map the device.  All devices support memory-mapped acccess,
   1905 	 * and it is really required for normal operation.
   1906 	 */
   1907 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1908 	switch (memtype) {
   1909 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1910 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1911 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1912 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1913 		break;
   1914 	default:
   1915 		memh_valid = 0;
   1916 		break;
   1917 	}
   1918 
   1919 	if (memh_valid) {
   1920 		sc->sc_st = memt;
   1921 		sc->sc_sh = memh;
   1922 		sc->sc_ss = memsize;
   1923 	} else {
   1924 		aprint_error_dev(sc->sc_dev,
   1925 		    "unable to map device registers\n");
   1926 		return;
   1927 	}
   1928 
   1929 	/*
   1930 	 * In addition, i82544 and later support I/O mapped indirect
   1931 	 * register access.  It is not desirable (nor supported in
   1932 	 * this driver) to use it for normal operation, though it is
   1933 	 * required to work around bugs in some chip versions.
   1934 	 */
   1935 	if (sc->sc_type >= WM_T_82544) {
   1936 		/* First we have to find the I/O BAR. */
   1937 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1938 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1939 			if (memtype == PCI_MAPREG_TYPE_IO)
   1940 				break;
   1941 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1942 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1943 				i += 4;	/* skip high bits, too */
   1944 		}
   1945 		if (i < PCI_MAPREG_END) {
   1946 			/*
   1947 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1948 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1949 			 * It's no problem because newer chips has no this
   1950 			 * bug.
   1951 			 *
   1952 			 * The i8254x doesn't apparently respond when the
   1953 			 * I/O BAR is 0, which looks somewhat like it's not
   1954 			 * been configured.
   1955 			 */
   1956 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1957 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1958 				aprint_error_dev(sc->sc_dev,
   1959 				    "WARNING: I/O BAR at zero.\n");
   1960 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1961 					0, &sc->sc_iot, &sc->sc_ioh,
   1962 					NULL, &sc->sc_ios) == 0) {
   1963 				sc->sc_flags |= WM_F_IOH_VALID;
   1964 			} else
   1965 				aprint_error_dev(sc->sc_dev,
   1966 				    "WARNING: unable to map I/O space\n");
   1967 		}
   1968 
   1969 	}
   1970 
   1971 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1972 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1973 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1974 	if (sc->sc_type < WM_T_82542_2_1)
   1975 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1976 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1977 
   1978 	/* Power up chip */
   1979 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1980 	    && error != EOPNOTSUPP) {
   1981 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1982 		return;
   1983 	}
   1984 
   1985 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1986 	/*
   1987 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1988 	 * resource.
   1989 	 */
   1990 	if (sc->sc_nqueues > 1) {
   1991 		max_type = PCI_INTR_TYPE_MSIX;
   1992 		/*
   1993 		 *  82583 has a MSI-X capability in the PCI configuration space
   1994 		 * but it doesn't support it. At least the document doesn't
   1995 		 * say anything about MSI-X.
   1996 		 */
   1997 		counts[PCI_INTR_TYPE_MSIX]
   1998 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1999 	} else {
   2000 		max_type = PCI_INTR_TYPE_MSI;
   2001 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2002 	}
   2003 
   2004 	/* Allocation settings */
   2005 	counts[PCI_INTR_TYPE_MSI] = 1;
   2006 	counts[PCI_INTR_TYPE_INTX] = 1;
   2007 	/* overridden by disable flags */
   2008 	if (wm_disable_msi != 0) {
   2009 		counts[PCI_INTR_TYPE_MSI] = 0;
   2010 		if (wm_disable_msix != 0) {
   2011 			max_type = PCI_INTR_TYPE_INTX;
   2012 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2013 		}
   2014 	} else if (wm_disable_msix != 0) {
   2015 		max_type = PCI_INTR_TYPE_MSI;
   2016 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2017 	}
   2018 
   2019 alloc_retry:
   2020 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2021 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2022 		return;
   2023 	}
   2024 
   2025 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2026 		error = wm_setup_msix(sc);
   2027 		if (error) {
   2028 			pci_intr_release(pc, sc->sc_intrs,
   2029 			    counts[PCI_INTR_TYPE_MSIX]);
   2030 
   2031 			/* Setup for MSI: Disable MSI-X */
   2032 			max_type = PCI_INTR_TYPE_MSI;
   2033 			counts[PCI_INTR_TYPE_MSI] = 1;
   2034 			counts[PCI_INTR_TYPE_INTX] = 1;
   2035 			goto alloc_retry;
   2036 		}
   2037 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2038 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2039 		error = wm_setup_legacy(sc);
   2040 		if (error) {
   2041 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2042 			    counts[PCI_INTR_TYPE_MSI]);
   2043 
   2044 			/* The next try is for INTx: Disable MSI */
   2045 			max_type = PCI_INTR_TYPE_INTX;
   2046 			counts[PCI_INTR_TYPE_INTX] = 1;
   2047 			goto alloc_retry;
   2048 		}
   2049 	} else {
   2050 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2051 		error = wm_setup_legacy(sc);
   2052 		if (error) {
   2053 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2054 			    counts[PCI_INTR_TYPE_INTX]);
   2055 			return;
   2056 		}
   2057 	}
   2058 
   2059 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2060 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2061 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2062 	    WM_WORKQUEUE_FLAGS);
   2063 	if (error) {
   2064 		aprint_error_dev(sc->sc_dev,
   2065 		    "unable to create workqueue\n");
   2066 		goto out;
   2067 	}
   2068 
   2069 	/*
   2070 	 * Check the function ID (unit number of the chip).
   2071 	 */
   2072 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2073 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2074 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2075 	    || (sc->sc_type == WM_T_82580)
   2076 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2077 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2078 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2079 	else
   2080 		sc->sc_funcid = 0;
   2081 
   2082 	/*
   2083 	 * Determine a few things about the bus we're connected to.
   2084 	 */
   2085 	if (sc->sc_type < WM_T_82543) {
   2086 		/* We don't really know the bus characteristics here. */
   2087 		sc->sc_bus_speed = 33;
   2088 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2089 		/*
   2090 		 * CSA (Communication Streaming Architecture) is about as fast
   2091 		 * a 32-bit 66MHz PCI Bus.
   2092 		 */
   2093 		sc->sc_flags |= WM_F_CSA;
   2094 		sc->sc_bus_speed = 66;
   2095 		aprint_verbose_dev(sc->sc_dev,
   2096 		    "Communication Streaming Architecture\n");
   2097 		if (sc->sc_type == WM_T_82547) {
   2098 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2099 			callout_setfunc(&sc->sc_txfifo_ch,
   2100 			    wm_82547_txfifo_stall, sc);
   2101 			aprint_verbose_dev(sc->sc_dev,
   2102 			    "using 82547 Tx FIFO stall work-around\n");
   2103 		}
   2104 	} else if (sc->sc_type >= WM_T_82571) {
   2105 		sc->sc_flags |= WM_F_PCIE;
   2106 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2107 		    && (sc->sc_type != WM_T_ICH10)
   2108 		    && (sc->sc_type != WM_T_PCH)
   2109 		    && (sc->sc_type != WM_T_PCH2)
   2110 		    && (sc->sc_type != WM_T_PCH_LPT)
   2111 		    && (sc->sc_type != WM_T_PCH_SPT)
   2112 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2113 			/* ICH* and PCH* have no PCIe capability registers */
   2114 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2115 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2116 				NULL) == 0)
   2117 				aprint_error_dev(sc->sc_dev,
   2118 				    "unable to find PCIe capability\n");
   2119 		}
   2120 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2121 	} else {
   2122 		reg = CSR_READ(sc, WMREG_STATUS);
   2123 		if (reg & STATUS_BUS64)
   2124 			sc->sc_flags |= WM_F_BUS64;
   2125 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2126 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2127 
   2128 			sc->sc_flags |= WM_F_PCIX;
   2129 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2130 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2131 				aprint_error_dev(sc->sc_dev,
   2132 				    "unable to find PCIX capability\n");
   2133 			else if (sc->sc_type != WM_T_82545_3 &&
   2134 				 sc->sc_type != WM_T_82546_3) {
   2135 				/*
   2136 				 * Work around a problem caused by the BIOS
   2137 				 * setting the max memory read byte count
   2138 				 * incorrectly.
   2139 				 */
   2140 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2141 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2142 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2143 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2144 
   2145 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2146 				    PCIX_CMD_BYTECNT_SHIFT;
   2147 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2148 				    PCIX_STATUS_MAXB_SHIFT;
   2149 				if (bytecnt > maxb) {
   2150 					aprint_verbose_dev(sc->sc_dev,
   2151 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2152 					    512 << bytecnt, 512 << maxb);
   2153 					pcix_cmd = (pcix_cmd &
   2154 					    ~PCIX_CMD_BYTECNT_MASK) |
   2155 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2156 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2157 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2158 					    pcix_cmd);
   2159 				}
   2160 			}
   2161 		}
   2162 		/*
   2163 		 * The quad port adapter is special; it has a PCIX-PCIX
   2164 		 * bridge on the board, and can run the secondary bus at
   2165 		 * a higher speed.
   2166 		 */
   2167 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2168 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2169 								      : 66;
   2170 		} else if (sc->sc_flags & WM_F_PCIX) {
   2171 			switch (reg & STATUS_PCIXSPD_MASK) {
   2172 			case STATUS_PCIXSPD_50_66:
   2173 				sc->sc_bus_speed = 66;
   2174 				break;
   2175 			case STATUS_PCIXSPD_66_100:
   2176 				sc->sc_bus_speed = 100;
   2177 				break;
   2178 			case STATUS_PCIXSPD_100_133:
   2179 				sc->sc_bus_speed = 133;
   2180 				break;
   2181 			default:
   2182 				aprint_error_dev(sc->sc_dev,
   2183 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2184 				    reg & STATUS_PCIXSPD_MASK);
   2185 				sc->sc_bus_speed = 66;
   2186 				break;
   2187 			}
   2188 		} else
   2189 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2190 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2191 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2192 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2193 	}
   2194 
   2195 	/* clear interesting stat counters */
   2196 	CSR_READ(sc, WMREG_COLC);
   2197 	CSR_READ(sc, WMREG_RXERRC);
   2198 
   2199 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2200 	    || (sc->sc_type >= WM_T_ICH8))
   2201 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2202 	if (sc->sc_type >= WM_T_ICH8)
   2203 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2204 
   2205 	/* Set PHY, NVM mutex related stuff */
   2206 	switch (sc->sc_type) {
   2207 	case WM_T_82542_2_0:
   2208 	case WM_T_82542_2_1:
   2209 	case WM_T_82543:
   2210 	case WM_T_82544:
   2211 		/* Microwire */
   2212 		sc->nvm.read = wm_nvm_read_uwire;
   2213 		sc->sc_nvm_wordsize = 64;
   2214 		sc->sc_nvm_addrbits = 6;
   2215 		break;
   2216 	case WM_T_82540:
   2217 	case WM_T_82545:
   2218 	case WM_T_82545_3:
   2219 	case WM_T_82546:
   2220 	case WM_T_82546_3:
   2221 		/* Microwire */
   2222 		sc->nvm.read = wm_nvm_read_uwire;
   2223 		reg = CSR_READ(sc, WMREG_EECD);
   2224 		if (reg & EECD_EE_SIZE) {
   2225 			sc->sc_nvm_wordsize = 256;
   2226 			sc->sc_nvm_addrbits = 8;
   2227 		} else {
   2228 			sc->sc_nvm_wordsize = 64;
   2229 			sc->sc_nvm_addrbits = 6;
   2230 		}
   2231 		sc->sc_flags |= WM_F_LOCK_EECD;
   2232 		sc->nvm.acquire = wm_get_eecd;
   2233 		sc->nvm.release = wm_put_eecd;
   2234 		break;
   2235 	case WM_T_82541:
   2236 	case WM_T_82541_2:
   2237 	case WM_T_82547:
   2238 	case WM_T_82547_2:
   2239 		reg = CSR_READ(sc, WMREG_EECD);
   2240 		/*
   2241 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2242 		 * on 8254[17], so set flags and functios before calling it.
   2243 		 */
   2244 		sc->sc_flags |= WM_F_LOCK_EECD;
   2245 		sc->nvm.acquire = wm_get_eecd;
   2246 		sc->nvm.release = wm_put_eecd;
   2247 		if (reg & EECD_EE_TYPE) {
   2248 			/* SPI */
   2249 			sc->nvm.read = wm_nvm_read_spi;
   2250 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2251 			wm_nvm_set_addrbits_size_eecd(sc);
   2252 		} else {
   2253 			/* Microwire */
   2254 			sc->nvm.read = wm_nvm_read_uwire;
   2255 			if ((reg & EECD_EE_ABITS) != 0) {
   2256 				sc->sc_nvm_wordsize = 256;
   2257 				sc->sc_nvm_addrbits = 8;
   2258 			} else {
   2259 				sc->sc_nvm_wordsize = 64;
   2260 				sc->sc_nvm_addrbits = 6;
   2261 			}
   2262 		}
   2263 		break;
   2264 	case WM_T_82571:
   2265 	case WM_T_82572:
   2266 		/* SPI */
   2267 		sc->nvm.read = wm_nvm_read_eerd;
   2268 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2269 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2270 		wm_nvm_set_addrbits_size_eecd(sc);
   2271 		sc->phy.acquire = wm_get_swsm_semaphore;
   2272 		sc->phy.release = wm_put_swsm_semaphore;
   2273 		sc->nvm.acquire = wm_get_nvm_82571;
   2274 		sc->nvm.release = wm_put_nvm_82571;
   2275 		break;
   2276 	case WM_T_82573:
   2277 	case WM_T_82574:
   2278 	case WM_T_82583:
   2279 		sc->nvm.read = wm_nvm_read_eerd;
   2280 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2281 		if (sc->sc_type == WM_T_82573) {
   2282 			sc->phy.acquire = wm_get_swsm_semaphore;
   2283 			sc->phy.release = wm_put_swsm_semaphore;
   2284 			sc->nvm.acquire = wm_get_nvm_82571;
   2285 			sc->nvm.release = wm_put_nvm_82571;
   2286 		} else {
   2287 			/* Both PHY and NVM use the same semaphore. */
   2288 			sc->phy.acquire = sc->nvm.acquire
   2289 			    = wm_get_swfwhw_semaphore;
   2290 			sc->phy.release = sc->nvm.release
   2291 			    = wm_put_swfwhw_semaphore;
   2292 		}
   2293 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2294 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2295 			sc->sc_nvm_wordsize = 2048;
   2296 		} else {
   2297 			/* SPI */
   2298 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2299 			wm_nvm_set_addrbits_size_eecd(sc);
   2300 		}
   2301 		break;
   2302 	case WM_T_82575:
   2303 	case WM_T_82576:
   2304 	case WM_T_82580:
   2305 	case WM_T_I350:
   2306 	case WM_T_I354:
   2307 	case WM_T_80003:
   2308 		/* SPI */
   2309 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2310 		wm_nvm_set_addrbits_size_eecd(sc);
   2311 		if ((sc->sc_type == WM_T_80003)
   2312 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2313 			sc->nvm.read = wm_nvm_read_eerd;
   2314 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2315 		} else {
   2316 			sc->nvm.read = wm_nvm_read_spi;
   2317 			sc->sc_flags |= WM_F_LOCK_EECD;
   2318 		}
   2319 		sc->phy.acquire = wm_get_phy_82575;
   2320 		sc->phy.release = wm_put_phy_82575;
   2321 		sc->nvm.acquire = wm_get_nvm_80003;
   2322 		sc->nvm.release = wm_put_nvm_80003;
   2323 		break;
   2324 	case WM_T_ICH8:
   2325 	case WM_T_ICH9:
   2326 	case WM_T_ICH10:
   2327 	case WM_T_PCH:
   2328 	case WM_T_PCH2:
   2329 	case WM_T_PCH_LPT:
   2330 		sc->nvm.read = wm_nvm_read_ich8;
   2331 		/* FLASH */
   2332 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2333 		sc->sc_nvm_wordsize = 2048;
   2334 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2335 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2336 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2337 			aprint_error_dev(sc->sc_dev,
   2338 			    "can't map FLASH registers\n");
   2339 			goto out;
   2340 		}
   2341 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2342 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2343 		    ICH_FLASH_SECTOR_SIZE;
   2344 		sc->sc_ich8_flash_bank_size =
   2345 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2346 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2347 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2348 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2349 		sc->sc_flashreg_offset = 0;
   2350 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2351 		sc->phy.release = wm_put_swflag_ich8lan;
   2352 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2353 		sc->nvm.release = wm_put_nvm_ich8lan;
   2354 		break;
   2355 	case WM_T_PCH_SPT:
   2356 	case WM_T_PCH_CNP:
   2357 		sc->nvm.read = wm_nvm_read_spt;
   2358 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2359 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2360 		sc->sc_flasht = sc->sc_st;
   2361 		sc->sc_flashh = sc->sc_sh;
   2362 		sc->sc_ich8_flash_base = 0;
   2363 		sc->sc_nvm_wordsize =
   2364 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2365 		    * NVM_SIZE_MULTIPLIER;
   2366 		/* It is size in bytes, we want words */
   2367 		sc->sc_nvm_wordsize /= 2;
   2368 		/* Assume 2 banks */
   2369 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2370 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2371 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2372 		sc->phy.release = wm_put_swflag_ich8lan;
   2373 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2374 		sc->nvm.release = wm_put_nvm_ich8lan;
   2375 		break;
   2376 	case WM_T_I210:
   2377 	case WM_T_I211:
   2378 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2379 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2380 		if (wm_nvm_flash_presence_i210(sc)) {
   2381 			sc->nvm.read = wm_nvm_read_eerd;
   2382 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2383 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2384 			wm_nvm_set_addrbits_size_eecd(sc);
   2385 		} else {
   2386 			sc->nvm.read = wm_nvm_read_invm;
   2387 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2388 			sc->sc_nvm_wordsize = INVM_SIZE;
   2389 		}
   2390 		sc->phy.acquire = wm_get_phy_82575;
   2391 		sc->phy.release = wm_put_phy_82575;
   2392 		sc->nvm.acquire = wm_get_nvm_80003;
   2393 		sc->nvm.release = wm_put_nvm_80003;
   2394 		break;
   2395 	default:
   2396 		break;
   2397 	}
   2398 
   2399 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2400 	switch (sc->sc_type) {
   2401 	case WM_T_82571:
   2402 	case WM_T_82572:
   2403 		reg = CSR_READ(sc, WMREG_SWSM2);
   2404 		if ((reg & SWSM2_LOCK) == 0) {
   2405 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2406 			force_clear_smbi = true;
   2407 		} else
   2408 			force_clear_smbi = false;
   2409 		break;
   2410 	case WM_T_82573:
   2411 	case WM_T_82574:
   2412 	case WM_T_82583:
   2413 		force_clear_smbi = true;
   2414 		break;
   2415 	default:
   2416 		force_clear_smbi = false;
   2417 		break;
   2418 	}
   2419 	if (force_clear_smbi) {
   2420 		reg = CSR_READ(sc, WMREG_SWSM);
   2421 		if ((reg & SWSM_SMBI) != 0)
   2422 			aprint_error_dev(sc->sc_dev,
   2423 			    "Please update the Bootagent\n");
   2424 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2425 	}
   2426 
   2427 	/*
   2428 	 * Defer printing the EEPROM type until after verifying the checksum
   2429 	 * This allows the EEPROM type to be printed correctly in the case
   2430 	 * that no EEPROM is attached.
   2431 	 */
   2432 	/*
   2433 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2434 	 * this for later, so we can fail future reads from the EEPROM.
   2435 	 */
   2436 	if (wm_nvm_validate_checksum(sc)) {
   2437 		/*
   2438 		 * Read twice again because some PCI-e parts fail the
   2439 		 * first check due to the link being in sleep state.
   2440 		 */
   2441 		if (wm_nvm_validate_checksum(sc))
   2442 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2443 	}
   2444 
   2445 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2446 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2447 	else {
   2448 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2449 		    sc->sc_nvm_wordsize);
   2450 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2451 			aprint_verbose("iNVM");
   2452 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2453 			aprint_verbose("FLASH(HW)");
   2454 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2455 			aprint_verbose("FLASH");
   2456 		else {
   2457 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2458 				eetype = "SPI";
   2459 			else
   2460 				eetype = "MicroWire";
   2461 			aprint_verbose("(%d address bits) %s EEPROM",
   2462 			    sc->sc_nvm_addrbits, eetype);
   2463 		}
   2464 	}
   2465 	wm_nvm_version(sc);
   2466 	aprint_verbose("\n");
   2467 
   2468 	/*
   2469 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2470 	 * incorrect.
   2471 	 */
   2472 	wm_gmii_setup_phytype(sc, 0, 0);
   2473 
   2474 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2475 	switch (sc->sc_type) {
   2476 	case WM_T_ICH8:
   2477 	case WM_T_ICH9:
   2478 	case WM_T_ICH10:
   2479 	case WM_T_PCH:
   2480 	case WM_T_PCH2:
   2481 	case WM_T_PCH_LPT:
   2482 	case WM_T_PCH_SPT:
   2483 	case WM_T_PCH_CNP:
   2484 		apme_mask = WUC_APME;
   2485 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2486 		if ((eeprom_data & apme_mask) != 0)
   2487 			sc->sc_flags |= WM_F_WOL;
   2488 		break;
   2489 	default:
   2490 		break;
   2491 	}
   2492 
   2493 	/* Reset the chip to a known state. */
   2494 	wm_reset(sc);
   2495 
   2496 	/*
   2497 	 * Check for I21[01] PLL workaround.
   2498 	 *
   2499 	 * Three cases:
   2500 	 * a) Chip is I211.
   2501 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2502 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2503 	 */
   2504 	if (sc->sc_type == WM_T_I211)
   2505 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2506 	if (sc->sc_type == WM_T_I210) {
   2507 		if (!wm_nvm_flash_presence_i210(sc))
   2508 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2509 		else if ((sc->sc_nvm_ver_major < 3)
   2510 		    || ((sc->sc_nvm_ver_major == 3)
   2511 			&& (sc->sc_nvm_ver_minor < 25))) {
   2512 			aprint_verbose_dev(sc->sc_dev,
   2513 			    "ROM image version %d.%d is older than 3.25\n",
   2514 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2515 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2516 		}
   2517 	}
   2518 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2519 		wm_pll_workaround_i210(sc);
   2520 
   2521 	wm_get_wakeup(sc);
   2522 
   2523 	/* Non-AMT based hardware can now take control from firmware */
   2524 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2525 		wm_get_hw_control(sc);
   2526 
   2527 	/*
   2528 	 * Read the Ethernet address from the EEPROM, if not first found
   2529 	 * in device properties.
   2530 	 */
   2531 	ea = prop_dictionary_get(dict, "mac-address");
   2532 	if (ea != NULL) {
   2533 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2534 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2535 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2536 	} else {
   2537 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2538 			aprint_error_dev(sc->sc_dev,
   2539 			    "unable to read Ethernet address\n");
   2540 			goto out;
   2541 		}
   2542 	}
   2543 
   2544 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2545 	    ether_sprintf(enaddr));
   2546 
   2547 	/*
   2548 	 * Read the config info from the EEPROM, and set up various
   2549 	 * bits in the control registers based on their contents.
   2550 	 */
   2551 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2552 	if (pn != NULL) {
   2553 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2554 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2555 	} else {
   2556 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2557 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2558 			goto out;
   2559 		}
   2560 	}
   2561 
   2562 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2563 	if (pn != NULL) {
   2564 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2565 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2566 	} else {
   2567 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2568 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2569 			goto out;
   2570 		}
   2571 	}
   2572 
   2573 	/* check for WM_F_WOL */
   2574 	switch (sc->sc_type) {
   2575 	case WM_T_82542_2_0:
   2576 	case WM_T_82542_2_1:
   2577 	case WM_T_82543:
   2578 		/* dummy? */
   2579 		eeprom_data = 0;
   2580 		apme_mask = NVM_CFG3_APME;
   2581 		break;
   2582 	case WM_T_82544:
   2583 		apme_mask = NVM_CFG2_82544_APM_EN;
   2584 		eeprom_data = cfg2;
   2585 		break;
   2586 	case WM_T_82546:
   2587 	case WM_T_82546_3:
   2588 	case WM_T_82571:
   2589 	case WM_T_82572:
   2590 	case WM_T_82573:
   2591 	case WM_T_82574:
   2592 	case WM_T_82583:
   2593 	case WM_T_80003:
   2594 	case WM_T_82575:
   2595 	case WM_T_82576:
   2596 		apme_mask = NVM_CFG3_APME;
   2597 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2598 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2599 		break;
   2600 	case WM_T_82580:
   2601 	case WM_T_I350:
   2602 	case WM_T_I354:
   2603 	case WM_T_I210:
   2604 	case WM_T_I211:
   2605 		apme_mask = NVM_CFG3_APME;
   2606 		wm_nvm_read(sc,
   2607 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2608 		    1, &eeprom_data);
   2609 		break;
   2610 	case WM_T_ICH8:
   2611 	case WM_T_ICH9:
   2612 	case WM_T_ICH10:
   2613 	case WM_T_PCH:
   2614 	case WM_T_PCH2:
   2615 	case WM_T_PCH_LPT:
   2616 	case WM_T_PCH_SPT:
   2617 	case WM_T_PCH_CNP:
   2618 		/* Already checked before wm_reset () */
   2619 		apme_mask = eeprom_data = 0;
   2620 		break;
   2621 	default: /* XXX 82540 */
   2622 		apme_mask = NVM_CFG3_APME;
   2623 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2624 		break;
   2625 	}
   2626 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2627 	if ((eeprom_data & apme_mask) != 0)
   2628 		sc->sc_flags |= WM_F_WOL;
   2629 
   2630 	/*
   2631 	 * We have the eeprom settings, now apply the special cases
   2632 	 * where the eeprom may be wrong or the board won't support
   2633 	 * wake on lan on a particular port
   2634 	 */
   2635 	switch (sc->sc_pcidevid) {
   2636 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2637 		sc->sc_flags &= ~WM_F_WOL;
   2638 		break;
   2639 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2640 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2641 		/* Wake events only supported on port A for dual fiber
   2642 		 * regardless of eeprom setting */
   2643 		if (sc->sc_funcid == 1)
   2644 			sc->sc_flags &= ~WM_F_WOL;
   2645 		break;
   2646 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2647 		/* If quad port adapter, disable WoL on all but port A */
   2648 		if (sc->sc_funcid != 0)
   2649 			sc->sc_flags &= ~WM_F_WOL;
   2650 		break;
   2651 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2652 		/* Wake events only supported on port A for dual fiber
   2653 		 * regardless of eeprom setting */
   2654 		if (sc->sc_funcid == 1)
   2655 			sc->sc_flags &= ~WM_F_WOL;
   2656 		break;
   2657 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2658 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2659 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2660 		/* If quad port adapter, disable WoL on all but port A */
   2661 		if (sc->sc_funcid != 0)
   2662 			sc->sc_flags &= ~WM_F_WOL;
   2663 		break;
   2664 	}
   2665 
   2666 	if (sc->sc_type >= WM_T_82575) {
   2667 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2668 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2669 			    nvmword);
   2670 			if ((sc->sc_type == WM_T_82575) ||
   2671 			    (sc->sc_type == WM_T_82576)) {
   2672 				/* Check NVM for autonegotiation */
   2673 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2674 				    != 0)
   2675 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2676 			}
   2677 			if ((sc->sc_type == WM_T_82575) ||
   2678 			    (sc->sc_type == WM_T_I350)) {
   2679 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2680 					sc->sc_flags |= WM_F_MAS;
   2681 			}
   2682 		}
   2683 	}
   2684 
   2685 	/*
   2686 	 * XXX need special handling for some multiple port cards
   2687 	 * to disable a paticular port.
   2688 	 */
   2689 
   2690 	if (sc->sc_type >= WM_T_82544) {
   2691 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2692 		if (pn != NULL) {
   2693 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2694 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2695 		} else {
   2696 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2697 				aprint_error_dev(sc->sc_dev,
   2698 				    "unable to read SWDPIN\n");
   2699 				goto out;
   2700 			}
   2701 		}
   2702 	}
   2703 
   2704 	if (cfg1 & NVM_CFG1_ILOS)
   2705 		sc->sc_ctrl |= CTRL_ILOS;
   2706 
   2707 	/*
   2708 	 * XXX
   2709 	 * This code isn't correct because pin 2 and 3 are located
   2710 	 * in different position on newer chips. Check all datasheet.
   2711 	 *
   2712 	 * Until resolve this problem, check if a chip < 82580
   2713 	 */
   2714 	if (sc->sc_type <= WM_T_82580) {
   2715 		if (sc->sc_type >= WM_T_82544) {
   2716 			sc->sc_ctrl |=
   2717 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2718 			    CTRL_SWDPIO_SHIFT;
   2719 			sc->sc_ctrl |=
   2720 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2721 			    CTRL_SWDPINS_SHIFT;
   2722 		} else {
   2723 			sc->sc_ctrl |=
   2724 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2725 			    CTRL_SWDPIO_SHIFT;
   2726 		}
   2727 	}
   2728 
   2729 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2730 		wm_nvm_read(sc,
   2731 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2732 		    1, &nvmword);
   2733 		if (nvmword & NVM_CFG3_ILOS)
   2734 			sc->sc_ctrl |= CTRL_ILOS;
   2735 	}
   2736 
   2737 #if 0
   2738 	if (sc->sc_type >= WM_T_82544) {
   2739 		if (cfg1 & NVM_CFG1_IPS0)
   2740 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2741 		if (cfg1 & NVM_CFG1_IPS1)
   2742 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2743 		sc->sc_ctrl_ext |=
   2744 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2745 		    CTRL_EXT_SWDPIO_SHIFT;
   2746 		sc->sc_ctrl_ext |=
   2747 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2748 		    CTRL_EXT_SWDPINS_SHIFT;
   2749 	} else {
   2750 		sc->sc_ctrl_ext |=
   2751 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2752 		    CTRL_EXT_SWDPIO_SHIFT;
   2753 	}
   2754 #endif
   2755 
   2756 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2757 #if 0
   2758 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2759 #endif
   2760 
   2761 	if (sc->sc_type == WM_T_PCH) {
   2762 		uint16_t val;
   2763 
   2764 		/* Save the NVM K1 bit setting */
   2765 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2766 
   2767 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2768 			sc->sc_nvm_k1_enabled = 1;
   2769 		else
   2770 			sc->sc_nvm_k1_enabled = 0;
   2771 	}
   2772 
   2773 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2774 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2775 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2776 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2777 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2778 	    || sc->sc_type == WM_T_82573
   2779 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2780 		/* Copper only */
   2781 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2782 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2783 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2784 	    || (sc->sc_type ==WM_T_I211)) {
   2785 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2786 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2787 		switch (link_mode) {
   2788 		case CTRL_EXT_LINK_MODE_1000KX:
   2789 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2790 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2791 			break;
   2792 		case CTRL_EXT_LINK_MODE_SGMII:
   2793 			if (wm_sgmii_uses_mdio(sc)) {
   2794 				aprint_normal_dev(sc->sc_dev,
   2795 				    "SGMII(MDIO)\n");
   2796 				sc->sc_flags |= WM_F_SGMII;
   2797 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2798 				break;
   2799 			}
   2800 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2801 			/*FALLTHROUGH*/
   2802 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2803 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2804 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2805 				if (link_mode
   2806 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2807 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2808 					sc->sc_flags |= WM_F_SGMII;
   2809 					aprint_verbose_dev(sc->sc_dev,
   2810 					    "SGMII\n");
   2811 				} else {
   2812 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2813 					aprint_verbose_dev(sc->sc_dev,
   2814 					    "SERDES\n");
   2815 				}
   2816 				break;
   2817 			}
   2818 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2819 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2820 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2821 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2822 				sc->sc_flags |= WM_F_SGMII;
   2823 			}
   2824 			/* Do not change link mode for 100BaseFX */
   2825 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2826 				break;
   2827 
   2828 			/* Change current link mode setting */
   2829 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2830 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2831 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2832 			else
   2833 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2834 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2835 			break;
   2836 		case CTRL_EXT_LINK_MODE_GMII:
   2837 		default:
   2838 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2839 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2840 			break;
   2841 		}
   2842 
   2843 		reg &= ~CTRL_EXT_I2C_ENA;
   2844 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2845 			reg |= CTRL_EXT_I2C_ENA;
   2846 		else
   2847 			reg &= ~CTRL_EXT_I2C_ENA;
   2848 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2849 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   2850 			wm_gmii_setup_phytype(sc, 0, 0);
   2851 			wm_reset_mdicnfg_82580(sc);
   2852 		}
   2853 	} else if (sc->sc_type < WM_T_82543 ||
   2854 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2855 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2856 			aprint_error_dev(sc->sc_dev,
   2857 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2858 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2859 		}
   2860 	} else {
   2861 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2862 			aprint_error_dev(sc->sc_dev,
   2863 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2864 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2865 		}
   2866 	}
   2867 
   2868 	if (sc->sc_type >= WM_T_PCH2)
   2869 		sc->sc_flags |= WM_F_EEE;
   2870 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2871 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2872 		/* XXX: Need special handling for I354. (not yet) */
   2873 		if (sc->sc_type != WM_T_I354)
   2874 			sc->sc_flags |= WM_F_EEE;
   2875 	}
   2876 
   2877 	/* Set device properties (macflags) */
   2878 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2879 
   2880 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2881 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2882 
   2883 	/* Initialize the media structures accordingly. */
   2884 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2885 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2886 	else
   2887 		wm_tbi_mediainit(sc); /* All others */
   2888 
   2889 	ifp = &sc->sc_ethercom.ec_if;
   2890 	xname = device_xname(sc->sc_dev);
   2891 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2892 	ifp->if_softc = sc;
   2893 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2894 #ifdef WM_MPSAFE
   2895 	ifp->if_extflags = IFEF_MPSAFE;
   2896 #endif
   2897 	ifp->if_ioctl = wm_ioctl;
   2898 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2899 		ifp->if_start = wm_nq_start;
   2900 		/*
   2901 		 * When the number of CPUs is one and the controller can use
   2902 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2903 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2904 		 * and the other is used for link status changing.
   2905 		 * In this situation, wm_nq_transmit() is disadvantageous
   2906 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2907 		 */
   2908 		if (wm_is_using_multiqueue(sc))
   2909 			ifp->if_transmit = wm_nq_transmit;
   2910 	} else {
   2911 		ifp->if_start = wm_start;
   2912 		/*
   2913 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2914 		 */
   2915 		if (wm_is_using_multiqueue(sc))
   2916 			ifp->if_transmit = wm_transmit;
   2917 	}
   2918 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2919 	ifp->if_init = wm_init;
   2920 	ifp->if_stop = wm_stop;
   2921 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2922 	IFQ_SET_READY(&ifp->if_snd);
   2923 
   2924 	/* Check for jumbo frame */
   2925 	switch (sc->sc_type) {
   2926 	case WM_T_82573:
   2927 		/* XXX limited to 9234 if ASPM is disabled */
   2928 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2929 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2930 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2931 		break;
   2932 	case WM_T_82571:
   2933 	case WM_T_82572:
   2934 	case WM_T_82574:
   2935 	case WM_T_82583:
   2936 	case WM_T_82575:
   2937 	case WM_T_82576:
   2938 	case WM_T_82580:
   2939 	case WM_T_I350:
   2940 	case WM_T_I354:
   2941 	case WM_T_I210:
   2942 	case WM_T_I211:
   2943 	case WM_T_80003:
   2944 	case WM_T_ICH9:
   2945 	case WM_T_ICH10:
   2946 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2947 	case WM_T_PCH_LPT:
   2948 	case WM_T_PCH_SPT:
   2949 	case WM_T_PCH_CNP:
   2950 		/* XXX limited to 9234 */
   2951 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2952 		break;
   2953 	case WM_T_PCH:
   2954 		/* XXX limited to 4096 */
   2955 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2956 		break;
   2957 	case WM_T_82542_2_0:
   2958 	case WM_T_82542_2_1:
   2959 	case WM_T_ICH8:
   2960 		/* No support for jumbo frame */
   2961 		break;
   2962 	default:
   2963 		/* ETHER_MAX_LEN_JUMBO */
   2964 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2965 		break;
   2966 	}
   2967 
   2968 	/* If we're a i82543 or greater, we can support VLANs. */
   2969 	if (sc->sc_type >= WM_T_82543) {
   2970 		sc->sc_ethercom.ec_capabilities |=
   2971 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2972 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   2973 	}
   2974 
   2975 	if ((sc->sc_flags & WM_F_EEE) != 0)
   2976 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   2977 
   2978 	/*
   2979 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2980 	 * on i82543 and later.
   2981 	 */
   2982 	if (sc->sc_type >= WM_T_82543) {
   2983 		ifp->if_capabilities |=
   2984 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2985 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2986 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2987 		    IFCAP_CSUM_TCPv6_Tx |
   2988 		    IFCAP_CSUM_UDPv6_Tx;
   2989 	}
   2990 
   2991 	/*
   2992 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2993 	 *
   2994 	 *	82541GI (8086:1076) ... no
   2995 	 *	82572EI (8086:10b9) ... yes
   2996 	 */
   2997 	if (sc->sc_type >= WM_T_82571) {
   2998 		ifp->if_capabilities |=
   2999 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3000 	}
   3001 
   3002 	/*
   3003 	 * If we're a i82544 or greater (except i82547), we can do
   3004 	 * TCP segmentation offload.
   3005 	 */
   3006 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   3007 		ifp->if_capabilities |= IFCAP_TSOv4;
   3008 	}
   3009 
   3010 	if (sc->sc_type >= WM_T_82571) {
   3011 		ifp->if_capabilities |= IFCAP_TSOv6;
   3012 	}
   3013 
   3014 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3015 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3016 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3017 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3018 
   3019 #ifdef WM_MPSAFE
   3020 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   3021 #else
   3022 	sc->sc_core_lock = NULL;
   3023 #endif
   3024 
   3025 	/* Attach the interface. */
   3026 	error = if_initialize(ifp);
   3027 	if (error != 0) {
   3028 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   3029 		    error);
   3030 		return; /* Error */
   3031 	}
   3032 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3033 	ether_ifattach(ifp, enaddr);
   3034 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3035 	if_register(ifp);
   3036 
   3037 #ifdef WM_EVENT_COUNTERS
   3038 	/* Attach event counters. */
   3039 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3040 	    NULL, xname, "linkintr");
   3041 
   3042 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3043 	    NULL, xname, "tx_xoff");
   3044 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3045 	    NULL, xname, "tx_xon");
   3046 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3047 	    NULL, xname, "rx_xoff");
   3048 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3049 	    NULL, xname, "rx_xon");
   3050 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3051 	    NULL, xname, "rx_macctl");
   3052 #endif /* WM_EVENT_COUNTERS */
   3053 
   3054 	sc->sc_txrx_use_workqueue = false;
   3055 
   3056 	wm_init_sysctls(sc);
   3057 
   3058 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3059 		pmf_class_network_register(self, ifp);
   3060 	else
   3061 		aprint_error_dev(self, "couldn't establish power handler\n");
   3062 
   3063 	sc->sc_flags |= WM_F_ATTACHED;
   3064 out:
   3065 	return;
   3066 }
   3067 
   3068 /* The detach function (ca_detach) */
   3069 static int
   3070 wm_detach(device_t self, int flags __unused)
   3071 {
   3072 	struct wm_softc *sc = device_private(self);
   3073 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3074 	int i;
   3075 
   3076 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3077 		return 0;
   3078 
   3079 	/* Stop the interface. Callouts are stopped in it. */
   3080 	wm_stop(ifp, 1);
   3081 
   3082 	pmf_device_deregister(self);
   3083 
   3084 	sysctl_teardown(&sc->sc_sysctllog);
   3085 
   3086 #ifdef WM_EVENT_COUNTERS
   3087 	evcnt_detach(&sc->sc_ev_linkintr);
   3088 
   3089 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3090 	evcnt_detach(&sc->sc_ev_tx_xon);
   3091 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3092 	evcnt_detach(&sc->sc_ev_rx_xon);
   3093 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3094 #endif /* WM_EVENT_COUNTERS */
   3095 
   3096 	/* Tell the firmware about the release */
   3097 	WM_CORE_LOCK(sc);
   3098 	wm_release_manageability(sc);
   3099 	wm_release_hw_control(sc);
   3100 	wm_enable_wakeup(sc);
   3101 	WM_CORE_UNLOCK(sc);
   3102 
   3103 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3104 
   3105 	/* Delete all remaining media. */
   3106 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   3107 
   3108 	ether_ifdetach(ifp);
   3109 	if_detach(ifp);
   3110 	if_percpuq_destroy(sc->sc_ipq);
   3111 
   3112 	/* Unload RX dmamaps and free mbufs */
   3113 	for (i = 0; i < sc->sc_nqueues; i++) {
   3114 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3115 		mutex_enter(rxq->rxq_lock);
   3116 		wm_rxdrain(rxq);
   3117 		mutex_exit(rxq->rxq_lock);
   3118 	}
   3119 	/* Must unlock here */
   3120 
   3121 	/* Disestablish the interrupt handler */
   3122 	for (i = 0; i < sc->sc_nintrs; i++) {
   3123 		if (sc->sc_ihs[i] != NULL) {
   3124 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3125 			sc->sc_ihs[i] = NULL;
   3126 		}
   3127 	}
   3128 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3129 
   3130 	for (i = 0; i < sc->sc_nqueues; i++)
   3131 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3132 
   3133 	wm_free_txrx_queues(sc);
   3134 
   3135 	/* Unmap the registers */
   3136 	if (sc->sc_ss) {
   3137 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3138 		sc->sc_ss = 0;
   3139 	}
   3140 	if (sc->sc_ios) {
   3141 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3142 		sc->sc_ios = 0;
   3143 	}
   3144 	if (sc->sc_flashs) {
   3145 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3146 		sc->sc_flashs = 0;
   3147 	}
   3148 
   3149 	if (sc->sc_core_lock)
   3150 		mutex_obj_free(sc->sc_core_lock);
   3151 	if (sc->sc_ich_phymtx)
   3152 		mutex_obj_free(sc->sc_ich_phymtx);
   3153 	if (sc->sc_ich_nvmmtx)
   3154 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3155 
   3156 	return 0;
   3157 }
   3158 
   3159 static bool
   3160 wm_suspend(device_t self, const pmf_qual_t *qual)
   3161 {
   3162 	struct wm_softc *sc = device_private(self);
   3163 
   3164 	wm_release_manageability(sc);
   3165 	wm_release_hw_control(sc);
   3166 	wm_enable_wakeup(sc);
   3167 
   3168 	return true;
   3169 }
   3170 
   3171 static bool
   3172 wm_resume(device_t self, const pmf_qual_t *qual)
   3173 {
   3174 	struct wm_softc *sc = device_private(self);
   3175 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3176 	pcireg_t reg;
   3177 	char buf[256];
   3178 
   3179 	reg = CSR_READ(sc, WMREG_WUS);
   3180 	if (reg != 0) {
   3181 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3182 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3183 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3184 	}
   3185 
   3186 	if (sc->sc_type >= WM_T_PCH2)
   3187 		wm_resume_workarounds_pchlan(sc);
   3188 	if ((ifp->if_flags & IFF_UP) == 0) {
   3189 		wm_reset(sc);
   3190 		/* Non-AMT based hardware can now take control from firmware */
   3191 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3192 			wm_get_hw_control(sc);
   3193 		wm_init_manageability(sc);
   3194 	} else {
   3195 		/*
   3196 		 * We called pmf_class_network_register(), so if_init() is
   3197 		 * automatically called when IFF_UP. wm_reset(),
   3198 		 * wm_get_hw_control() and wm_init_manageability() are called
   3199 		 * via wm_init().
   3200 		 */
   3201 	}
   3202 
   3203 	return true;
   3204 }
   3205 
   3206 /*
   3207  * wm_watchdog:		[ifnet interface function]
   3208  *
   3209  *	Watchdog timer handler.
   3210  */
   3211 static void
   3212 wm_watchdog(struct ifnet *ifp)
   3213 {
   3214 	int qid;
   3215 	struct wm_softc *sc = ifp->if_softc;
   3216 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3217 
   3218 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3219 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3220 
   3221 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3222 	}
   3223 
   3224 	/* IF any of queues hanged up, reset the interface. */
   3225 	if (hang_queue != 0) {
   3226 		(void)wm_init(ifp);
   3227 
   3228 		/*
   3229 		 * There are still some upper layer processing which call
   3230 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3231 		 */
   3232 		/* Try to get more packets going. */
   3233 		ifp->if_start(ifp);
   3234 	}
   3235 }
   3236 
   3237 
   3238 static void
   3239 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3240 {
   3241 
   3242 	mutex_enter(txq->txq_lock);
   3243 	if (txq->txq_sending &&
   3244 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3245 		wm_watchdog_txq_locked(ifp, txq, hang);
   3246 
   3247 	mutex_exit(txq->txq_lock);
   3248 }
   3249 
   3250 static void
   3251 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3252     uint16_t *hang)
   3253 {
   3254 	struct wm_softc *sc = ifp->if_softc;
   3255 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3256 
   3257 	KASSERT(mutex_owned(txq->txq_lock));
   3258 
   3259 	/*
   3260 	 * Since we're using delayed interrupts, sweep up
   3261 	 * before we report an error.
   3262 	 */
   3263 	wm_txeof(txq, UINT_MAX);
   3264 
   3265 	if (txq->txq_sending)
   3266 		*hang |= __BIT(wmq->wmq_id);
   3267 
   3268 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3269 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3270 		    device_xname(sc->sc_dev));
   3271 	} else {
   3272 #ifdef WM_DEBUG
   3273 		int i, j;
   3274 		struct wm_txsoft *txs;
   3275 #endif
   3276 		log(LOG_ERR,
   3277 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3278 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3279 		    txq->txq_next);
   3280 		if_statinc(ifp, if_oerrors);
   3281 #ifdef WM_DEBUG
   3282 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3283 		    i = WM_NEXTTXS(txq, i)) {
   3284 			txs = &txq->txq_soft[i];
   3285 			printf("txs %d tx %d -> %d\n",
   3286 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3287 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3288 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3289 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3290 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3291 					printf("\t %#08x%08x\n",
   3292 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3293 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3294 				} else {
   3295 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3296 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3297 					    txq->txq_descs[j].wtx_addr.wa_low);
   3298 					printf("\t %#04x%02x%02x%08x\n",
   3299 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3300 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3301 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3302 					    txq->txq_descs[j].wtx_cmdlen);
   3303 				}
   3304 				if (j == txs->txs_lastdesc)
   3305 					break;
   3306 			}
   3307 		}
   3308 #endif
   3309 	}
   3310 }
   3311 
   3312 /*
   3313  * wm_tick:
   3314  *
   3315  *	One second timer, used to check link status, sweep up
   3316  *	completed transmit jobs, etc.
   3317  */
   3318 static void
   3319 wm_tick(void *arg)
   3320 {
   3321 	struct wm_softc *sc = arg;
   3322 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3323 #ifndef WM_MPSAFE
   3324 	int s = splnet();
   3325 #endif
   3326 
   3327 	WM_CORE_LOCK(sc);
   3328 
   3329 	if (sc->sc_core_stopping) {
   3330 		WM_CORE_UNLOCK(sc);
   3331 #ifndef WM_MPSAFE
   3332 		splx(s);
   3333 #endif
   3334 		return;
   3335 	}
   3336 
   3337 	if (sc->sc_type >= WM_T_82542_2_1) {
   3338 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3339 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3340 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3341 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3342 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3343 	}
   3344 
   3345 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3346 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
   3347 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
   3348 	    + CSR_READ(sc, WMREG_CRCERRS)
   3349 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3350 	    + CSR_READ(sc, WMREG_SYMERRC)
   3351 	    + CSR_READ(sc, WMREG_RXERRC)
   3352 	    + CSR_READ(sc, WMREG_SEC)
   3353 	    + CSR_READ(sc, WMREG_CEXTERR)
   3354 	    + CSR_READ(sc, WMREG_RLEC));
   3355 	/*
   3356 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3357 	 * memory. It does not mean the number of dropped packet. Because
   3358 	 * ethernet controller can receive packets in such case if there is
   3359 	 * space in phy's FIFO.
   3360 	 *
   3361 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3362 	 * own EVCNT instead of if_iqdrops.
   3363 	 */
   3364 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
   3365 	IF_STAT_PUTREF(ifp);
   3366 
   3367 	if (sc->sc_flags & WM_F_HAS_MII)
   3368 		mii_tick(&sc->sc_mii);
   3369 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3370 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3371 		wm_serdes_tick(sc);
   3372 	else
   3373 		wm_tbi_tick(sc);
   3374 
   3375 	WM_CORE_UNLOCK(sc);
   3376 
   3377 	wm_watchdog(ifp);
   3378 
   3379 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3380 }
   3381 
   3382 static int
   3383 wm_ifflags_cb(struct ethercom *ec)
   3384 {
   3385 	struct ifnet *ifp = &ec->ec_if;
   3386 	struct wm_softc *sc = ifp->if_softc;
   3387 	u_short iffchange;
   3388 	int ecchange;
   3389 	bool needreset = false;
   3390 	int rc = 0;
   3391 
   3392 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3393 		device_xname(sc->sc_dev), __func__));
   3394 
   3395 	WM_CORE_LOCK(sc);
   3396 
   3397 	/*
   3398 	 * Check for if_flags.
   3399 	 * Main usage is to prevent linkdown when opening bpf.
   3400 	 */
   3401 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3402 	sc->sc_if_flags = ifp->if_flags;
   3403 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3404 		needreset = true;
   3405 		goto ec;
   3406 	}
   3407 
   3408 	/* iff related updates */
   3409 	if ((iffchange & IFF_PROMISC) != 0)
   3410 		wm_set_filter(sc);
   3411 
   3412 	wm_set_vlan(sc);
   3413 
   3414 ec:
   3415 	/* Check for ec_capenable. */
   3416 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3417 	sc->sc_ec_capenable = ec->ec_capenable;
   3418 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3419 		needreset = true;
   3420 		goto out;
   3421 	}
   3422 
   3423 	/* ec related updates */
   3424 	wm_set_eee(sc);
   3425 
   3426 out:
   3427 	if (needreset)
   3428 		rc = ENETRESET;
   3429 	WM_CORE_UNLOCK(sc);
   3430 
   3431 	return rc;
   3432 }
   3433 
   3434 /*
   3435  * wm_ioctl:		[ifnet interface function]
   3436  *
   3437  *	Handle control requests from the operator.
   3438  */
   3439 static int
   3440 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3441 {
   3442 	struct wm_softc *sc = ifp->if_softc;
   3443 	struct ifreq *ifr = (struct ifreq *)data;
   3444 	struct ifaddr *ifa = (struct ifaddr *)data;
   3445 	struct sockaddr_dl *sdl;
   3446 	int s, error;
   3447 
   3448 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3449 		device_xname(sc->sc_dev), __func__));
   3450 
   3451 #ifndef WM_MPSAFE
   3452 	s = splnet();
   3453 #endif
   3454 	switch (cmd) {
   3455 	case SIOCSIFMEDIA:
   3456 		WM_CORE_LOCK(sc);
   3457 		/* Flow control requires full-duplex mode. */
   3458 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3459 		    (ifr->ifr_media & IFM_FDX) == 0)
   3460 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3461 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3462 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3463 				/* We can do both TXPAUSE and RXPAUSE. */
   3464 				ifr->ifr_media |=
   3465 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3466 			}
   3467 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3468 		}
   3469 		WM_CORE_UNLOCK(sc);
   3470 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3471 		break;
   3472 	case SIOCINITIFADDR:
   3473 		WM_CORE_LOCK(sc);
   3474 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3475 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3476 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3477 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3478 			/* Unicast address is the first multicast entry */
   3479 			wm_set_filter(sc);
   3480 			error = 0;
   3481 			WM_CORE_UNLOCK(sc);
   3482 			break;
   3483 		}
   3484 		WM_CORE_UNLOCK(sc);
   3485 		/*FALLTHROUGH*/
   3486 	default:
   3487 #ifdef WM_MPSAFE
   3488 		s = splnet();
   3489 #endif
   3490 		/* It may call wm_start, so unlock here */
   3491 		error = ether_ioctl(ifp, cmd, data);
   3492 #ifdef WM_MPSAFE
   3493 		splx(s);
   3494 #endif
   3495 		if (error != ENETRESET)
   3496 			break;
   3497 
   3498 		error = 0;
   3499 
   3500 		if (cmd == SIOCSIFCAP)
   3501 			error = (*ifp->if_init)(ifp);
   3502 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3503 			;
   3504 		else if (ifp->if_flags & IFF_RUNNING) {
   3505 			/*
   3506 			 * Multicast list has changed; set the hardware filter
   3507 			 * accordingly.
   3508 			 */
   3509 			WM_CORE_LOCK(sc);
   3510 			wm_set_filter(sc);
   3511 			WM_CORE_UNLOCK(sc);
   3512 		}
   3513 		break;
   3514 	}
   3515 
   3516 #ifndef WM_MPSAFE
   3517 	splx(s);
   3518 #endif
   3519 	return error;
   3520 }
   3521 
   3522 /* MAC address related */
   3523 
   3524 /*
   3525  * Get the offset of MAC address and return it.
   3526  * If error occured, use offset 0.
   3527  */
   3528 static uint16_t
   3529 wm_check_alt_mac_addr(struct wm_softc *sc)
   3530 {
   3531 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3532 	uint16_t offset = NVM_OFF_MACADDR;
   3533 
   3534 	/* Try to read alternative MAC address pointer */
   3535 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3536 		return 0;
   3537 
   3538 	/* Check pointer if it's valid or not. */
   3539 	if ((offset == 0x0000) || (offset == 0xffff))
   3540 		return 0;
   3541 
   3542 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3543 	/*
   3544 	 * Check whether alternative MAC address is valid or not.
   3545 	 * Some cards have non 0xffff pointer but those don't use
   3546 	 * alternative MAC address in reality.
   3547 	 *
   3548 	 * Check whether the broadcast bit is set or not.
   3549 	 */
   3550 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3551 		if (((myea[0] & 0xff) & 0x01) == 0)
   3552 			return offset; /* Found */
   3553 
   3554 	/* Not found */
   3555 	return 0;
   3556 }
   3557 
   3558 static int
   3559 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3560 {
   3561 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3562 	uint16_t offset = NVM_OFF_MACADDR;
   3563 	int do_invert = 0;
   3564 
   3565 	switch (sc->sc_type) {
   3566 	case WM_T_82580:
   3567 	case WM_T_I350:
   3568 	case WM_T_I354:
   3569 		/* EEPROM Top Level Partitioning */
   3570 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3571 		break;
   3572 	case WM_T_82571:
   3573 	case WM_T_82575:
   3574 	case WM_T_82576:
   3575 	case WM_T_80003:
   3576 	case WM_T_I210:
   3577 	case WM_T_I211:
   3578 		offset = wm_check_alt_mac_addr(sc);
   3579 		if (offset == 0)
   3580 			if ((sc->sc_funcid & 0x01) == 1)
   3581 				do_invert = 1;
   3582 		break;
   3583 	default:
   3584 		if ((sc->sc_funcid & 0x01) == 1)
   3585 			do_invert = 1;
   3586 		break;
   3587 	}
   3588 
   3589 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3590 		goto bad;
   3591 
   3592 	enaddr[0] = myea[0] & 0xff;
   3593 	enaddr[1] = myea[0] >> 8;
   3594 	enaddr[2] = myea[1] & 0xff;
   3595 	enaddr[3] = myea[1] >> 8;
   3596 	enaddr[4] = myea[2] & 0xff;
   3597 	enaddr[5] = myea[2] >> 8;
   3598 
   3599 	/*
   3600 	 * Toggle the LSB of the MAC address on the second port
   3601 	 * of some dual port cards.
   3602 	 */
   3603 	if (do_invert != 0)
   3604 		enaddr[5] ^= 1;
   3605 
   3606 	return 0;
   3607 
   3608  bad:
   3609 	return -1;
   3610 }
   3611 
   3612 /*
   3613  * wm_set_ral:
   3614  *
   3615  *	Set an entery in the receive address list.
   3616  */
   3617 static void
   3618 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3619 {
   3620 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3621 	uint32_t wlock_mac;
   3622 	int rv;
   3623 
   3624 	if (enaddr != NULL) {
   3625 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3626 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3627 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3628 		ral_hi |= RAL_AV;
   3629 	} else {
   3630 		ral_lo = 0;
   3631 		ral_hi = 0;
   3632 	}
   3633 
   3634 	switch (sc->sc_type) {
   3635 	case WM_T_82542_2_0:
   3636 	case WM_T_82542_2_1:
   3637 	case WM_T_82543:
   3638 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3639 		CSR_WRITE_FLUSH(sc);
   3640 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3641 		CSR_WRITE_FLUSH(sc);
   3642 		break;
   3643 	case WM_T_PCH2:
   3644 	case WM_T_PCH_LPT:
   3645 	case WM_T_PCH_SPT:
   3646 	case WM_T_PCH_CNP:
   3647 		if (idx == 0) {
   3648 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3649 			CSR_WRITE_FLUSH(sc);
   3650 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3651 			CSR_WRITE_FLUSH(sc);
   3652 			return;
   3653 		}
   3654 		if (sc->sc_type != WM_T_PCH2) {
   3655 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3656 			    FWSM_WLOCK_MAC);
   3657 			addrl = WMREG_SHRAL(idx - 1);
   3658 			addrh = WMREG_SHRAH(idx - 1);
   3659 		} else {
   3660 			wlock_mac = 0;
   3661 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3662 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3663 		}
   3664 
   3665 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3666 			rv = wm_get_swflag_ich8lan(sc);
   3667 			if (rv != 0)
   3668 				return;
   3669 			CSR_WRITE(sc, addrl, ral_lo);
   3670 			CSR_WRITE_FLUSH(sc);
   3671 			CSR_WRITE(sc, addrh, ral_hi);
   3672 			CSR_WRITE_FLUSH(sc);
   3673 			wm_put_swflag_ich8lan(sc);
   3674 		}
   3675 
   3676 		break;
   3677 	default:
   3678 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3679 		CSR_WRITE_FLUSH(sc);
   3680 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3681 		CSR_WRITE_FLUSH(sc);
   3682 		break;
   3683 	}
   3684 }
   3685 
   3686 /*
   3687  * wm_mchash:
   3688  *
   3689  *	Compute the hash of the multicast address for the 4096-bit
   3690  *	multicast filter.
   3691  */
   3692 static uint32_t
   3693 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3694 {
   3695 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3696 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3697 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3698 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3699 	uint32_t hash;
   3700 
   3701 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3702 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3703 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3704 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3705 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3706 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3707 		return (hash & 0x3ff);
   3708 	}
   3709 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3710 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3711 
   3712 	return (hash & 0xfff);
   3713 }
   3714 
   3715 /*
   3716  *
   3717  *
   3718  */
   3719 static int
   3720 wm_rar_count(struct wm_softc *sc)
   3721 {
   3722 	int size;
   3723 
   3724 	switch (sc->sc_type) {
   3725 	case WM_T_ICH8:
   3726 		size = WM_RAL_TABSIZE_ICH8 -1;
   3727 		break;
   3728 	case WM_T_ICH9:
   3729 	case WM_T_ICH10:
   3730 	case WM_T_PCH:
   3731 		size = WM_RAL_TABSIZE_ICH8;
   3732 		break;
   3733 	case WM_T_PCH2:
   3734 		size = WM_RAL_TABSIZE_PCH2;
   3735 		break;
   3736 	case WM_T_PCH_LPT:
   3737 	case WM_T_PCH_SPT:
   3738 	case WM_T_PCH_CNP:
   3739 		size = WM_RAL_TABSIZE_PCH_LPT;
   3740 		break;
   3741 	case WM_T_82575:
   3742 	case WM_T_I210:
   3743 	case WM_T_I211:
   3744 		size = WM_RAL_TABSIZE_82575;
   3745 		break;
   3746 	case WM_T_82576:
   3747 	case WM_T_82580:
   3748 		size = WM_RAL_TABSIZE_82576;
   3749 		break;
   3750 	case WM_T_I350:
   3751 	case WM_T_I354:
   3752 		size = WM_RAL_TABSIZE_I350;
   3753 		break;
   3754 	default:
   3755 		size = WM_RAL_TABSIZE;
   3756 	}
   3757 
   3758 	return size;
   3759 }
   3760 
   3761 /*
   3762  * wm_set_filter:
   3763  *
   3764  *	Set up the receive filter.
   3765  */
   3766 static void
   3767 wm_set_filter(struct wm_softc *sc)
   3768 {
   3769 	struct ethercom *ec = &sc->sc_ethercom;
   3770 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3771 	struct ether_multi *enm;
   3772 	struct ether_multistep step;
   3773 	bus_addr_t mta_reg;
   3774 	uint32_t hash, reg, bit;
   3775 	int i, size, ralmax;
   3776 
   3777 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3778 		device_xname(sc->sc_dev), __func__));
   3779 
   3780 	if (sc->sc_type >= WM_T_82544)
   3781 		mta_reg = WMREG_CORDOVA_MTA;
   3782 	else
   3783 		mta_reg = WMREG_MTA;
   3784 
   3785 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3786 
   3787 	if (ifp->if_flags & IFF_BROADCAST)
   3788 		sc->sc_rctl |= RCTL_BAM;
   3789 	if (ifp->if_flags & IFF_PROMISC) {
   3790 		sc->sc_rctl |= RCTL_UPE;
   3791 		ETHER_LOCK(ec);
   3792 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3793 		ETHER_UNLOCK(ec);
   3794 		goto allmulti;
   3795 	}
   3796 
   3797 	/*
   3798 	 * Set the station address in the first RAL slot, and
   3799 	 * clear the remaining slots.
   3800 	 */
   3801 	size = wm_rar_count(sc);
   3802 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3803 
   3804 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3805 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3806 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3807 		switch (i) {
   3808 		case 0:
   3809 			/* We can use all entries */
   3810 			ralmax = size;
   3811 			break;
   3812 		case 1:
   3813 			/* Only RAR[0] */
   3814 			ralmax = 1;
   3815 			break;
   3816 		default:
   3817 			/* Available SHRA + RAR[0] */
   3818 			ralmax = i + 1;
   3819 		}
   3820 	} else
   3821 		ralmax = size;
   3822 	for (i = 1; i < size; i++) {
   3823 		if (i < ralmax)
   3824 			wm_set_ral(sc, NULL, i);
   3825 	}
   3826 
   3827 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3828 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3829 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3830 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3831 		size = WM_ICH8_MC_TABSIZE;
   3832 	else
   3833 		size = WM_MC_TABSIZE;
   3834 	/* Clear out the multicast table. */
   3835 	for (i = 0; i < size; i++) {
   3836 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3837 		CSR_WRITE_FLUSH(sc);
   3838 	}
   3839 
   3840 	ETHER_LOCK(ec);
   3841 	ETHER_FIRST_MULTI(step, ec, enm);
   3842 	while (enm != NULL) {
   3843 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3844 			ec->ec_flags |= ETHER_F_ALLMULTI;
   3845 			ETHER_UNLOCK(ec);
   3846 			/*
   3847 			 * We must listen to a range of multicast addresses.
   3848 			 * For now, just accept all multicasts, rather than
   3849 			 * trying to set only those filter bits needed to match
   3850 			 * the range.  (At this time, the only use of address
   3851 			 * ranges is for IP multicast routing, for which the
   3852 			 * range is big enough to require all bits set.)
   3853 			 */
   3854 			goto allmulti;
   3855 		}
   3856 
   3857 		hash = wm_mchash(sc, enm->enm_addrlo);
   3858 
   3859 		reg = (hash >> 5);
   3860 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3861 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3862 		    || (sc->sc_type == WM_T_PCH2)
   3863 		    || (sc->sc_type == WM_T_PCH_LPT)
   3864 		    || (sc->sc_type == WM_T_PCH_SPT)
   3865 		    || (sc->sc_type == WM_T_PCH_CNP))
   3866 			reg &= 0x1f;
   3867 		else
   3868 			reg &= 0x7f;
   3869 		bit = hash & 0x1f;
   3870 
   3871 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3872 		hash |= 1U << bit;
   3873 
   3874 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3875 			/*
   3876 			 * 82544 Errata 9: Certain register cannot be written
   3877 			 * with particular alignments in PCI-X bus operation
   3878 			 * (FCAH, MTA and VFTA).
   3879 			 */
   3880 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3881 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3882 			CSR_WRITE_FLUSH(sc);
   3883 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3884 			CSR_WRITE_FLUSH(sc);
   3885 		} else {
   3886 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3887 			CSR_WRITE_FLUSH(sc);
   3888 		}
   3889 
   3890 		ETHER_NEXT_MULTI(step, enm);
   3891 	}
   3892 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   3893 	ETHER_UNLOCK(ec);
   3894 
   3895 	goto setit;
   3896 
   3897  allmulti:
   3898 	sc->sc_rctl |= RCTL_MPE;
   3899 
   3900  setit:
   3901 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3902 }
   3903 
   3904 /* Reset and init related */
   3905 
   3906 static void
   3907 wm_set_vlan(struct wm_softc *sc)
   3908 {
   3909 
   3910 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3911 		device_xname(sc->sc_dev), __func__));
   3912 
   3913 	/* Deal with VLAN enables. */
   3914 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3915 		sc->sc_ctrl |= CTRL_VME;
   3916 	else
   3917 		sc->sc_ctrl &= ~CTRL_VME;
   3918 
   3919 	/* Write the control registers. */
   3920 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3921 }
   3922 
   3923 static void
   3924 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3925 {
   3926 	uint32_t gcr;
   3927 	pcireg_t ctrl2;
   3928 
   3929 	gcr = CSR_READ(sc, WMREG_GCR);
   3930 
   3931 	/* Only take action if timeout value is defaulted to 0 */
   3932 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3933 		goto out;
   3934 
   3935 	if ((gcr & GCR_CAP_VER2) == 0) {
   3936 		gcr |= GCR_CMPL_TMOUT_10MS;
   3937 		goto out;
   3938 	}
   3939 
   3940 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3941 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3942 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3943 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3944 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3945 
   3946 out:
   3947 	/* Disable completion timeout resend */
   3948 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3949 
   3950 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3951 }
   3952 
   3953 void
   3954 wm_get_auto_rd_done(struct wm_softc *sc)
   3955 {
   3956 	int i;
   3957 
   3958 	/* wait for eeprom to reload */
   3959 	switch (sc->sc_type) {
   3960 	case WM_T_82571:
   3961 	case WM_T_82572:
   3962 	case WM_T_82573:
   3963 	case WM_T_82574:
   3964 	case WM_T_82583:
   3965 	case WM_T_82575:
   3966 	case WM_T_82576:
   3967 	case WM_T_82580:
   3968 	case WM_T_I350:
   3969 	case WM_T_I354:
   3970 	case WM_T_I210:
   3971 	case WM_T_I211:
   3972 	case WM_T_80003:
   3973 	case WM_T_ICH8:
   3974 	case WM_T_ICH9:
   3975 		for (i = 0; i < 10; i++) {
   3976 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3977 				break;
   3978 			delay(1000);
   3979 		}
   3980 		if (i == 10) {
   3981 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3982 			    "complete\n", device_xname(sc->sc_dev));
   3983 		}
   3984 		break;
   3985 	default:
   3986 		break;
   3987 	}
   3988 }
   3989 
   3990 void
   3991 wm_lan_init_done(struct wm_softc *sc)
   3992 {
   3993 	uint32_t reg = 0;
   3994 	int i;
   3995 
   3996 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3997 		device_xname(sc->sc_dev), __func__));
   3998 
   3999 	/* Wait for eeprom to reload */
   4000 	switch (sc->sc_type) {
   4001 	case WM_T_ICH10:
   4002 	case WM_T_PCH:
   4003 	case WM_T_PCH2:
   4004 	case WM_T_PCH_LPT:
   4005 	case WM_T_PCH_SPT:
   4006 	case WM_T_PCH_CNP:
   4007 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4008 			reg = CSR_READ(sc, WMREG_STATUS);
   4009 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4010 				break;
   4011 			delay(100);
   4012 		}
   4013 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4014 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4015 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4016 		}
   4017 		break;
   4018 	default:
   4019 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4020 		    __func__);
   4021 		break;
   4022 	}
   4023 
   4024 	reg &= ~STATUS_LAN_INIT_DONE;
   4025 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4026 }
   4027 
   4028 void
   4029 wm_get_cfg_done(struct wm_softc *sc)
   4030 {
   4031 	int mask;
   4032 	uint32_t reg;
   4033 	int i;
   4034 
   4035 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4036 		device_xname(sc->sc_dev), __func__));
   4037 
   4038 	/* Wait for eeprom to reload */
   4039 	switch (sc->sc_type) {
   4040 	case WM_T_82542_2_0:
   4041 	case WM_T_82542_2_1:
   4042 		/* null */
   4043 		break;
   4044 	case WM_T_82543:
   4045 	case WM_T_82544:
   4046 	case WM_T_82540:
   4047 	case WM_T_82545:
   4048 	case WM_T_82545_3:
   4049 	case WM_T_82546:
   4050 	case WM_T_82546_3:
   4051 	case WM_T_82541:
   4052 	case WM_T_82541_2:
   4053 	case WM_T_82547:
   4054 	case WM_T_82547_2:
   4055 	case WM_T_82573:
   4056 	case WM_T_82574:
   4057 	case WM_T_82583:
   4058 		/* generic */
   4059 		delay(10*1000);
   4060 		break;
   4061 	case WM_T_80003:
   4062 	case WM_T_82571:
   4063 	case WM_T_82572:
   4064 	case WM_T_82575:
   4065 	case WM_T_82576:
   4066 	case WM_T_82580:
   4067 	case WM_T_I350:
   4068 	case WM_T_I354:
   4069 	case WM_T_I210:
   4070 	case WM_T_I211:
   4071 		if (sc->sc_type == WM_T_82571) {
   4072 			/* Only 82571 shares port 0 */
   4073 			mask = EEMNGCTL_CFGDONE_0;
   4074 		} else
   4075 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4076 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4077 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4078 				break;
   4079 			delay(1000);
   4080 		}
   4081 		if (i >= WM_PHY_CFG_TIMEOUT)
   4082 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   4083 				device_xname(sc->sc_dev), __func__));
   4084 		break;
   4085 	case WM_T_ICH8:
   4086 	case WM_T_ICH9:
   4087 	case WM_T_ICH10:
   4088 	case WM_T_PCH:
   4089 	case WM_T_PCH2:
   4090 	case WM_T_PCH_LPT:
   4091 	case WM_T_PCH_SPT:
   4092 	case WM_T_PCH_CNP:
   4093 		delay(10*1000);
   4094 		if (sc->sc_type >= WM_T_ICH10)
   4095 			wm_lan_init_done(sc);
   4096 		else
   4097 			wm_get_auto_rd_done(sc);
   4098 
   4099 		/* Clear PHY Reset Asserted bit */
   4100 		reg = CSR_READ(sc, WMREG_STATUS);
   4101 		if ((reg & STATUS_PHYRA) != 0)
   4102 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4103 		break;
   4104 	default:
   4105 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4106 		    __func__);
   4107 		break;
   4108 	}
   4109 }
   4110 
   4111 int
   4112 wm_phy_post_reset(struct wm_softc *sc)
   4113 {
   4114 	device_t dev = sc->sc_dev;
   4115 	uint16_t reg;
   4116 	int rv = 0;
   4117 
   4118 	/* This function is only for ICH8 and newer. */
   4119 	if (sc->sc_type < WM_T_ICH8)
   4120 		return 0;
   4121 
   4122 	if (wm_phy_resetisblocked(sc)) {
   4123 		/* XXX */
   4124 		device_printf(dev, "PHY is blocked\n");
   4125 		return -1;
   4126 	}
   4127 
   4128 	/* Allow time for h/w to get to quiescent state after reset */
   4129 	delay(10*1000);
   4130 
   4131 	/* Perform any necessary post-reset workarounds */
   4132 	if (sc->sc_type == WM_T_PCH)
   4133 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4134 	else if (sc->sc_type == WM_T_PCH2)
   4135 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4136 	if (rv != 0)
   4137 		return rv;
   4138 
   4139 	/* Clear the host wakeup bit after lcd reset */
   4140 	if (sc->sc_type >= WM_T_PCH) {
   4141 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4142 		reg &= ~BM_WUC_HOST_WU_BIT;
   4143 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4144 	}
   4145 
   4146 	/* Configure the LCD with the extended configuration region in NVM */
   4147 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4148 		return rv;
   4149 
   4150 	/* Configure the LCD with the OEM bits in NVM */
   4151 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4152 
   4153 	if (sc->sc_type == WM_T_PCH2) {
   4154 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4155 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4156 			delay(10 * 1000);
   4157 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4158 		}
   4159 		/* Set EEE LPI Update Timer to 200usec */
   4160 		rv = sc->phy.acquire(sc);
   4161 		if (rv)
   4162 			return rv;
   4163 		rv = wm_write_emi_reg_locked(dev,
   4164 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4165 		sc->phy.release(sc);
   4166 	}
   4167 
   4168 	return rv;
   4169 }
   4170 
   4171 /* Only for PCH and newer */
   4172 static int
   4173 wm_write_smbus_addr(struct wm_softc *sc)
   4174 {
   4175 	uint32_t strap, freq;
   4176 	uint16_t phy_data;
   4177 	int rv;
   4178 
   4179 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4180 		device_xname(sc->sc_dev), __func__));
   4181 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4182 
   4183 	strap = CSR_READ(sc, WMREG_STRAP);
   4184 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4185 
   4186 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4187 	if (rv != 0)
   4188 		return -1;
   4189 
   4190 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4191 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4192 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4193 
   4194 	if (sc->sc_phytype == WMPHY_I217) {
   4195 		/* Restore SMBus frequency */
   4196 		if (freq --) {
   4197 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4198 			    | HV_SMB_ADDR_FREQ_HIGH);
   4199 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4200 			    HV_SMB_ADDR_FREQ_LOW);
   4201 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4202 			    HV_SMB_ADDR_FREQ_HIGH);
   4203 		} else
   4204 			DPRINTF(WM_DEBUG_INIT,
   4205 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4206 				device_xname(sc->sc_dev), __func__));
   4207 	}
   4208 
   4209 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4210 	    phy_data);
   4211 }
   4212 
   4213 static int
   4214 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4215 {
   4216 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4217 	uint16_t phy_page = 0;
   4218 	int rv = 0;
   4219 
   4220 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4221 		device_xname(sc->sc_dev), __func__));
   4222 
   4223 	switch (sc->sc_type) {
   4224 	case WM_T_ICH8:
   4225 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4226 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4227 			return 0;
   4228 
   4229 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4230 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4231 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4232 			break;
   4233 		}
   4234 		/* FALLTHROUGH */
   4235 	case WM_T_PCH:
   4236 	case WM_T_PCH2:
   4237 	case WM_T_PCH_LPT:
   4238 	case WM_T_PCH_SPT:
   4239 	case WM_T_PCH_CNP:
   4240 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4241 		break;
   4242 	default:
   4243 		return 0;
   4244 	}
   4245 
   4246 	if ((rv = sc->phy.acquire(sc)) != 0)
   4247 		return rv;
   4248 
   4249 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4250 	if ((reg & sw_cfg_mask) == 0)
   4251 		goto release;
   4252 
   4253 	/*
   4254 	 * Make sure HW does not configure LCD from PHY extended configuration
   4255 	 * before SW configuration
   4256 	 */
   4257 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4258 	if ((sc->sc_type < WM_T_PCH2)
   4259 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4260 		goto release;
   4261 
   4262 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4263 		device_xname(sc->sc_dev), __func__));
   4264 	/* word_addr is in DWORD */
   4265 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4266 
   4267 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4268 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4269 	if (cnf_size == 0)
   4270 		goto release;
   4271 
   4272 	if (((sc->sc_type == WM_T_PCH)
   4273 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4274 	    || (sc->sc_type > WM_T_PCH)) {
   4275 		/*
   4276 		 * HW configures the SMBus address and LEDs when the OEM and
   4277 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4278 		 * are cleared, SW will configure them instead.
   4279 		 */
   4280 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4281 			device_xname(sc->sc_dev), __func__));
   4282 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4283 			goto release;
   4284 
   4285 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4286 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4287 		    (uint16_t)reg);
   4288 		if (rv != 0)
   4289 			goto release;
   4290 	}
   4291 
   4292 	/* Configure LCD from extended configuration region. */
   4293 	for (i = 0; i < cnf_size; i++) {
   4294 		uint16_t reg_data, reg_addr;
   4295 
   4296 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4297 			goto release;
   4298 
   4299 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4300 			goto release;
   4301 
   4302 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4303 			phy_page = reg_data;
   4304 
   4305 		reg_addr &= IGPHY_MAXREGADDR;
   4306 		reg_addr |= phy_page;
   4307 
   4308 		KASSERT(sc->phy.writereg_locked != NULL);
   4309 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4310 		    reg_data);
   4311 	}
   4312 
   4313 release:
   4314 	sc->phy.release(sc);
   4315 	return rv;
   4316 }
   4317 
   4318 /*
   4319  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4320  *  @sc:       pointer to the HW structure
   4321  *  @d0_state: boolean if entering d0 or d3 device state
   4322  *
   4323  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4324  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4325  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4326  */
   4327 int
   4328 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4329 {
   4330 	uint32_t mac_reg;
   4331 	uint16_t oem_reg;
   4332 	int rv;
   4333 
   4334 	if (sc->sc_type < WM_T_PCH)
   4335 		return 0;
   4336 
   4337 	rv = sc->phy.acquire(sc);
   4338 	if (rv != 0)
   4339 		return rv;
   4340 
   4341 	if (sc->sc_type == WM_T_PCH) {
   4342 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4343 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4344 			goto release;
   4345 	}
   4346 
   4347 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4348 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4349 		goto release;
   4350 
   4351 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4352 
   4353 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4354 	if (rv != 0)
   4355 		goto release;
   4356 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4357 
   4358 	if (d0_state) {
   4359 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4360 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4361 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4362 			oem_reg |= HV_OEM_BITS_LPLU;
   4363 	} else {
   4364 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4365 		    != 0)
   4366 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4367 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4368 		    != 0)
   4369 			oem_reg |= HV_OEM_BITS_LPLU;
   4370 	}
   4371 
   4372 	/* Set Restart auto-neg to activate the bits */
   4373 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4374 	    && (wm_phy_resetisblocked(sc) == false))
   4375 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4376 
   4377 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4378 
   4379 release:
   4380 	sc->phy.release(sc);
   4381 
   4382 	return rv;
   4383 }
   4384 
   4385 /* Init hardware bits */
   4386 void
   4387 wm_initialize_hardware_bits(struct wm_softc *sc)
   4388 {
   4389 	uint32_t tarc0, tarc1, reg;
   4390 
   4391 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4392 		device_xname(sc->sc_dev), __func__));
   4393 
   4394 	/* For 82571 variant, 80003 and ICHs */
   4395 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4396 	    || (sc->sc_type >= WM_T_80003)) {
   4397 
   4398 		/* Transmit Descriptor Control 0 */
   4399 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4400 		reg |= TXDCTL_COUNT_DESC;
   4401 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4402 
   4403 		/* Transmit Descriptor Control 1 */
   4404 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4405 		reg |= TXDCTL_COUNT_DESC;
   4406 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4407 
   4408 		/* TARC0 */
   4409 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4410 		switch (sc->sc_type) {
   4411 		case WM_T_82571:
   4412 		case WM_T_82572:
   4413 		case WM_T_82573:
   4414 		case WM_T_82574:
   4415 		case WM_T_82583:
   4416 		case WM_T_80003:
   4417 			/* Clear bits 30..27 */
   4418 			tarc0 &= ~__BITS(30, 27);
   4419 			break;
   4420 		default:
   4421 			break;
   4422 		}
   4423 
   4424 		switch (sc->sc_type) {
   4425 		case WM_T_82571:
   4426 		case WM_T_82572:
   4427 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4428 
   4429 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4430 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4431 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4432 			/* 8257[12] Errata No.7 */
   4433 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4434 
   4435 			/* TARC1 bit 28 */
   4436 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4437 				tarc1 &= ~__BIT(28);
   4438 			else
   4439 				tarc1 |= __BIT(28);
   4440 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4441 
   4442 			/*
   4443 			 * 8257[12] Errata No.13
   4444 			 * Disable Dyamic Clock Gating.
   4445 			 */
   4446 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4447 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4448 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4449 			break;
   4450 		case WM_T_82573:
   4451 		case WM_T_82574:
   4452 		case WM_T_82583:
   4453 			if ((sc->sc_type == WM_T_82574)
   4454 			    || (sc->sc_type == WM_T_82583))
   4455 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4456 
   4457 			/* Extended Device Control */
   4458 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4459 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4460 			reg |= __BIT(22);	/* Set bit 22 */
   4461 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4462 
   4463 			/* Device Control */
   4464 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4465 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4466 
   4467 			/* PCIe Control Register */
   4468 			/*
   4469 			 * 82573 Errata (unknown).
   4470 			 *
   4471 			 * 82574 Errata 25 and 82583 Errata 12
   4472 			 * "Dropped Rx Packets":
   4473 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4474 			 */
   4475 			reg = CSR_READ(sc, WMREG_GCR);
   4476 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4477 			CSR_WRITE(sc, WMREG_GCR, reg);
   4478 
   4479 			if ((sc->sc_type == WM_T_82574)
   4480 			    || (sc->sc_type == WM_T_82583)) {
   4481 				/*
   4482 				 * Document says this bit must be set for
   4483 				 * proper operation.
   4484 				 */
   4485 				reg = CSR_READ(sc, WMREG_GCR);
   4486 				reg |= __BIT(22);
   4487 				CSR_WRITE(sc, WMREG_GCR, reg);
   4488 
   4489 				/*
   4490 				 * Apply workaround for hardware errata
   4491 				 * documented in errata docs Fixes issue where
   4492 				 * some error prone or unreliable PCIe
   4493 				 * completions are occurring, particularly
   4494 				 * with ASPM enabled. Without fix, issue can
   4495 				 * cause Tx timeouts.
   4496 				 */
   4497 				reg = CSR_READ(sc, WMREG_GCR2);
   4498 				reg |= __BIT(0);
   4499 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4500 			}
   4501 			break;
   4502 		case WM_T_80003:
   4503 			/* TARC0 */
   4504 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4505 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4506 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4507 
   4508 			/* TARC1 bit 28 */
   4509 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4510 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4511 				tarc1 &= ~__BIT(28);
   4512 			else
   4513 				tarc1 |= __BIT(28);
   4514 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4515 			break;
   4516 		case WM_T_ICH8:
   4517 		case WM_T_ICH9:
   4518 		case WM_T_ICH10:
   4519 		case WM_T_PCH:
   4520 		case WM_T_PCH2:
   4521 		case WM_T_PCH_LPT:
   4522 		case WM_T_PCH_SPT:
   4523 		case WM_T_PCH_CNP:
   4524 			/* TARC0 */
   4525 			if (sc->sc_type == WM_T_ICH8) {
   4526 				/* Set TARC0 bits 29 and 28 */
   4527 				tarc0 |= __BITS(29, 28);
   4528 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4529 				tarc0 |= __BIT(29);
   4530 				/*
   4531 				 *  Drop bit 28. From Linux.
   4532 				 * See I218/I219 spec update
   4533 				 * "5. Buffer Overrun While the I219 is
   4534 				 * Processing DMA Transactions"
   4535 				 */
   4536 				tarc0 &= ~__BIT(28);
   4537 			}
   4538 			/* Set TARC0 bits 23,24,26,27 */
   4539 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4540 
   4541 			/* CTRL_EXT */
   4542 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4543 			reg |= __BIT(22);	/* Set bit 22 */
   4544 			/*
   4545 			 * Enable PHY low-power state when MAC is at D3
   4546 			 * w/o WoL
   4547 			 */
   4548 			if (sc->sc_type >= WM_T_PCH)
   4549 				reg |= CTRL_EXT_PHYPDEN;
   4550 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4551 
   4552 			/* TARC1 */
   4553 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4554 			/* bit 28 */
   4555 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4556 				tarc1 &= ~__BIT(28);
   4557 			else
   4558 				tarc1 |= __BIT(28);
   4559 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4560 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4561 
   4562 			/* Device Status */
   4563 			if (sc->sc_type == WM_T_ICH8) {
   4564 				reg = CSR_READ(sc, WMREG_STATUS);
   4565 				reg &= ~__BIT(31);
   4566 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4567 
   4568 			}
   4569 
   4570 			/* IOSFPC */
   4571 			if (sc->sc_type == WM_T_PCH_SPT) {
   4572 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4573 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4574 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4575 			}
   4576 			/*
   4577 			 * Work-around descriptor data corruption issue during
   4578 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4579 			 * capability.
   4580 			 */
   4581 			reg = CSR_READ(sc, WMREG_RFCTL);
   4582 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4583 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4584 			break;
   4585 		default:
   4586 			break;
   4587 		}
   4588 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4589 
   4590 		switch (sc->sc_type) {
   4591 		/*
   4592 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4593 		 * Avoid RSS Hash Value bug.
   4594 		 */
   4595 		case WM_T_82571:
   4596 		case WM_T_82572:
   4597 		case WM_T_82573:
   4598 		case WM_T_80003:
   4599 		case WM_T_ICH8:
   4600 			reg = CSR_READ(sc, WMREG_RFCTL);
   4601 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4602 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4603 			break;
   4604 		case WM_T_82574:
   4605 			/* Use extened Rx descriptor. */
   4606 			reg = CSR_READ(sc, WMREG_RFCTL);
   4607 			reg |= WMREG_RFCTL_EXSTEN;
   4608 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4609 			break;
   4610 		default:
   4611 			break;
   4612 		}
   4613 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4614 		/*
   4615 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4616 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4617 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4618 		 * Correctly by the Device"
   4619 		 *
   4620 		 * I354(C2000) Errata AVR53:
   4621 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4622 		 * Hang"
   4623 		 */
   4624 		reg = CSR_READ(sc, WMREG_RFCTL);
   4625 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4626 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4627 	}
   4628 }
   4629 
   4630 static uint32_t
   4631 wm_rxpbs_adjust_82580(uint32_t val)
   4632 {
   4633 	uint32_t rv = 0;
   4634 
   4635 	if (val < __arraycount(wm_82580_rxpbs_table))
   4636 		rv = wm_82580_rxpbs_table[val];
   4637 
   4638 	return rv;
   4639 }
   4640 
   4641 /*
   4642  * wm_reset_phy:
   4643  *
   4644  *	generic PHY reset function.
   4645  *	Same as e1000_phy_hw_reset_generic()
   4646  */
   4647 static int
   4648 wm_reset_phy(struct wm_softc *sc)
   4649 {
   4650 	uint32_t reg;
   4651 
   4652 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4653 		device_xname(sc->sc_dev), __func__));
   4654 	if (wm_phy_resetisblocked(sc))
   4655 		return -1;
   4656 
   4657 	sc->phy.acquire(sc);
   4658 
   4659 	reg = CSR_READ(sc, WMREG_CTRL);
   4660 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4661 	CSR_WRITE_FLUSH(sc);
   4662 
   4663 	delay(sc->phy.reset_delay_us);
   4664 
   4665 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4666 	CSR_WRITE_FLUSH(sc);
   4667 
   4668 	delay(150);
   4669 
   4670 	sc->phy.release(sc);
   4671 
   4672 	wm_get_cfg_done(sc);
   4673 	wm_phy_post_reset(sc);
   4674 
   4675 	return 0;
   4676 }
   4677 
   4678 /*
   4679  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4680  * so it is enough to check sc->sc_queue[0] only.
   4681  */
   4682 static void
   4683 wm_flush_desc_rings(struct wm_softc *sc)
   4684 {
   4685 	pcireg_t preg;
   4686 	uint32_t reg;
   4687 	struct wm_txqueue *txq;
   4688 	wiseman_txdesc_t *txd;
   4689 	int nexttx;
   4690 	uint32_t rctl;
   4691 
   4692 	/* First, disable MULR fix in FEXTNVM11 */
   4693 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4694 	reg |= FEXTNVM11_DIS_MULRFIX;
   4695 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4696 
   4697 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4698 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4699 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4700 		return;
   4701 
   4702 	/* TX */
   4703 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
   4704 	    preg, reg);
   4705 	reg = CSR_READ(sc, WMREG_TCTL);
   4706 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4707 
   4708 	txq = &sc->sc_queue[0].wmq_txq;
   4709 	nexttx = txq->txq_next;
   4710 	txd = &txq->txq_descs[nexttx];
   4711 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4712 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4713 	txd->wtx_fields.wtxu_status = 0;
   4714 	txd->wtx_fields.wtxu_options = 0;
   4715 	txd->wtx_fields.wtxu_vlan = 0;
   4716 
   4717 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4718 	    BUS_SPACE_BARRIER_WRITE);
   4719 
   4720 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4721 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4722 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4723 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4724 	delay(250);
   4725 
   4726 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4727 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4728 		return;
   4729 
   4730 	/* RX */
   4731 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4732 	rctl = CSR_READ(sc, WMREG_RCTL);
   4733 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4734 	CSR_WRITE_FLUSH(sc);
   4735 	delay(150);
   4736 
   4737 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4738 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4739 	reg &= 0xffffc000;
   4740 	/*
   4741 	 * Update thresholds: prefetch threshold to 31, host threshold
   4742 	 * to 1 and make sure the granularity is "descriptors" and not
   4743 	 * "cache lines"
   4744 	 */
   4745 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4746 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4747 
   4748 	/* Momentarily enable the RX ring for the changes to take effect */
   4749 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4750 	CSR_WRITE_FLUSH(sc);
   4751 	delay(150);
   4752 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4753 }
   4754 
   4755 /*
   4756  * wm_reset:
   4757  *
   4758  *	Reset the i82542 chip.
   4759  */
   4760 static void
   4761 wm_reset(struct wm_softc *sc)
   4762 {
   4763 	int phy_reset = 0;
   4764 	int i, error = 0;
   4765 	uint32_t reg;
   4766 	uint16_t kmreg;
   4767 	int rv;
   4768 
   4769 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4770 		device_xname(sc->sc_dev), __func__));
   4771 	KASSERT(sc->sc_type != 0);
   4772 
   4773 	/*
   4774 	 * Allocate on-chip memory according to the MTU size.
   4775 	 * The Packet Buffer Allocation register must be written
   4776 	 * before the chip is reset.
   4777 	 */
   4778 	switch (sc->sc_type) {
   4779 	case WM_T_82547:
   4780 	case WM_T_82547_2:
   4781 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4782 		    PBA_22K : PBA_30K;
   4783 		for (i = 0; i < sc->sc_nqueues; i++) {
   4784 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4785 			txq->txq_fifo_head = 0;
   4786 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4787 			txq->txq_fifo_size =
   4788 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4789 			txq->txq_fifo_stall = 0;
   4790 		}
   4791 		break;
   4792 	case WM_T_82571:
   4793 	case WM_T_82572:
   4794 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4795 	case WM_T_80003:
   4796 		sc->sc_pba = PBA_32K;
   4797 		break;
   4798 	case WM_T_82573:
   4799 		sc->sc_pba = PBA_12K;
   4800 		break;
   4801 	case WM_T_82574:
   4802 	case WM_T_82583:
   4803 		sc->sc_pba = PBA_20K;
   4804 		break;
   4805 	case WM_T_82576:
   4806 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4807 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4808 		break;
   4809 	case WM_T_82580:
   4810 	case WM_T_I350:
   4811 	case WM_T_I354:
   4812 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4813 		break;
   4814 	case WM_T_I210:
   4815 	case WM_T_I211:
   4816 		sc->sc_pba = PBA_34K;
   4817 		break;
   4818 	case WM_T_ICH8:
   4819 		/* Workaround for a bit corruption issue in FIFO memory */
   4820 		sc->sc_pba = PBA_8K;
   4821 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4822 		break;
   4823 	case WM_T_ICH9:
   4824 	case WM_T_ICH10:
   4825 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4826 		    PBA_14K : PBA_10K;
   4827 		break;
   4828 	case WM_T_PCH:
   4829 	case WM_T_PCH2:	/* XXX 14K? */
   4830 	case WM_T_PCH_LPT:
   4831 	case WM_T_PCH_SPT:
   4832 	case WM_T_PCH_CNP:
   4833 		sc->sc_pba = PBA_26K;
   4834 		break;
   4835 	default:
   4836 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4837 		    PBA_40K : PBA_48K;
   4838 		break;
   4839 	}
   4840 	/*
   4841 	 * Only old or non-multiqueue devices have the PBA register
   4842 	 * XXX Need special handling for 82575.
   4843 	 */
   4844 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4845 	    || (sc->sc_type == WM_T_82575))
   4846 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4847 
   4848 	/* Prevent the PCI-E bus from sticking */
   4849 	if (sc->sc_flags & WM_F_PCIE) {
   4850 		int timeout = 800;
   4851 
   4852 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4853 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4854 
   4855 		while (timeout--) {
   4856 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4857 			    == 0)
   4858 				break;
   4859 			delay(100);
   4860 		}
   4861 		if (timeout == 0)
   4862 			device_printf(sc->sc_dev,
   4863 			    "failed to disable busmastering\n");
   4864 	}
   4865 
   4866 	/* Set the completion timeout for interface */
   4867 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4868 	    || (sc->sc_type == WM_T_82580)
   4869 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4870 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4871 		wm_set_pcie_completion_timeout(sc);
   4872 
   4873 	/* Clear interrupt */
   4874 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4875 	if (wm_is_using_msix(sc)) {
   4876 		if (sc->sc_type != WM_T_82574) {
   4877 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4878 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4879 		} else
   4880 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4881 	}
   4882 
   4883 	/* Stop the transmit and receive processes. */
   4884 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4885 	sc->sc_rctl &= ~RCTL_EN;
   4886 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4887 	CSR_WRITE_FLUSH(sc);
   4888 
   4889 	/* XXX set_tbi_sbp_82543() */
   4890 
   4891 	delay(10*1000);
   4892 
   4893 	/* Must acquire the MDIO ownership before MAC reset */
   4894 	switch (sc->sc_type) {
   4895 	case WM_T_82573:
   4896 	case WM_T_82574:
   4897 	case WM_T_82583:
   4898 		error = wm_get_hw_semaphore_82573(sc);
   4899 		break;
   4900 	default:
   4901 		break;
   4902 	}
   4903 
   4904 	/*
   4905 	 * 82541 Errata 29? & 82547 Errata 28?
   4906 	 * See also the description about PHY_RST bit in CTRL register
   4907 	 * in 8254x_GBe_SDM.pdf.
   4908 	 */
   4909 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4910 		CSR_WRITE(sc, WMREG_CTRL,
   4911 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4912 		CSR_WRITE_FLUSH(sc);
   4913 		delay(5000);
   4914 	}
   4915 
   4916 	switch (sc->sc_type) {
   4917 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4918 	case WM_T_82541:
   4919 	case WM_T_82541_2:
   4920 	case WM_T_82547:
   4921 	case WM_T_82547_2:
   4922 		/*
   4923 		 * On some chipsets, a reset through a memory-mapped write
   4924 		 * cycle can cause the chip to reset before completing the
   4925 		 * write cycle. This causes major headache that can be avoided
   4926 		 * by issuing the reset via indirect register writes through
   4927 		 * I/O space.
   4928 		 *
   4929 		 * So, if we successfully mapped the I/O BAR at attach time,
   4930 		 * use that. Otherwise, try our luck with a memory-mapped
   4931 		 * reset.
   4932 		 */
   4933 		if (sc->sc_flags & WM_F_IOH_VALID)
   4934 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4935 		else
   4936 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4937 		break;
   4938 	case WM_T_82545_3:
   4939 	case WM_T_82546_3:
   4940 		/* Use the shadow control register on these chips. */
   4941 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4942 		break;
   4943 	case WM_T_80003:
   4944 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4945 		sc->phy.acquire(sc);
   4946 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4947 		sc->phy.release(sc);
   4948 		break;
   4949 	case WM_T_ICH8:
   4950 	case WM_T_ICH9:
   4951 	case WM_T_ICH10:
   4952 	case WM_T_PCH:
   4953 	case WM_T_PCH2:
   4954 	case WM_T_PCH_LPT:
   4955 	case WM_T_PCH_SPT:
   4956 	case WM_T_PCH_CNP:
   4957 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4958 		if (wm_phy_resetisblocked(sc) == false) {
   4959 			/*
   4960 			 * Gate automatic PHY configuration by hardware on
   4961 			 * non-managed 82579
   4962 			 */
   4963 			if ((sc->sc_type == WM_T_PCH2)
   4964 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4965 				== 0))
   4966 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4967 
   4968 			reg |= CTRL_PHY_RESET;
   4969 			phy_reset = 1;
   4970 		} else
   4971 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   4972 		sc->phy.acquire(sc);
   4973 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4974 		/* Don't insert a completion barrier when reset */
   4975 		delay(20*1000);
   4976 		mutex_exit(sc->sc_ich_phymtx);
   4977 		break;
   4978 	case WM_T_82580:
   4979 	case WM_T_I350:
   4980 	case WM_T_I354:
   4981 	case WM_T_I210:
   4982 	case WM_T_I211:
   4983 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4984 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4985 			CSR_WRITE_FLUSH(sc);
   4986 		delay(5000);
   4987 		break;
   4988 	case WM_T_82542_2_0:
   4989 	case WM_T_82542_2_1:
   4990 	case WM_T_82543:
   4991 	case WM_T_82540:
   4992 	case WM_T_82545:
   4993 	case WM_T_82546:
   4994 	case WM_T_82571:
   4995 	case WM_T_82572:
   4996 	case WM_T_82573:
   4997 	case WM_T_82574:
   4998 	case WM_T_82575:
   4999 	case WM_T_82576:
   5000 	case WM_T_82583:
   5001 	default:
   5002 		/* Everything else can safely use the documented method. */
   5003 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5004 		break;
   5005 	}
   5006 
   5007 	/* Must release the MDIO ownership after MAC reset */
   5008 	switch (sc->sc_type) {
   5009 	case WM_T_82573:
   5010 	case WM_T_82574:
   5011 	case WM_T_82583:
   5012 		if (error == 0)
   5013 			wm_put_hw_semaphore_82573(sc);
   5014 		break;
   5015 	default:
   5016 		break;
   5017 	}
   5018 
   5019 	/* Set Phy Config Counter to 50msec */
   5020 	if (sc->sc_type == WM_T_PCH2) {
   5021 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5022 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5023 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5024 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5025 	}
   5026 
   5027 	if (phy_reset != 0)
   5028 		wm_get_cfg_done(sc);
   5029 
   5030 	/* Reload EEPROM */
   5031 	switch (sc->sc_type) {
   5032 	case WM_T_82542_2_0:
   5033 	case WM_T_82542_2_1:
   5034 	case WM_T_82543:
   5035 	case WM_T_82544:
   5036 		delay(10);
   5037 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5038 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5039 		CSR_WRITE_FLUSH(sc);
   5040 		delay(2000);
   5041 		break;
   5042 	case WM_T_82540:
   5043 	case WM_T_82545:
   5044 	case WM_T_82545_3:
   5045 	case WM_T_82546:
   5046 	case WM_T_82546_3:
   5047 		delay(5*1000);
   5048 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5049 		break;
   5050 	case WM_T_82541:
   5051 	case WM_T_82541_2:
   5052 	case WM_T_82547:
   5053 	case WM_T_82547_2:
   5054 		delay(20000);
   5055 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5056 		break;
   5057 	case WM_T_82571:
   5058 	case WM_T_82572:
   5059 	case WM_T_82573:
   5060 	case WM_T_82574:
   5061 	case WM_T_82583:
   5062 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5063 			delay(10);
   5064 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5065 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5066 			CSR_WRITE_FLUSH(sc);
   5067 		}
   5068 		/* check EECD_EE_AUTORD */
   5069 		wm_get_auto_rd_done(sc);
   5070 		/*
   5071 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5072 		 * is set.
   5073 		 */
   5074 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5075 		    || (sc->sc_type == WM_T_82583))
   5076 			delay(25*1000);
   5077 		break;
   5078 	case WM_T_82575:
   5079 	case WM_T_82576:
   5080 	case WM_T_82580:
   5081 	case WM_T_I350:
   5082 	case WM_T_I354:
   5083 	case WM_T_I210:
   5084 	case WM_T_I211:
   5085 	case WM_T_80003:
   5086 		/* check EECD_EE_AUTORD */
   5087 		wm_get_auto_rd_done(sc);
   5088 		break;
   5089 	case WM_T_ICH8:
   5090 	case WM_T_ICH9:
   5091 	case WM_T_ICH10:
   5092 	case WM_T_PCH:
   5093 	case WM_T_PCH2:
   5094 	case WM_T_PCH_LPT:
   5095 	case WM_T_PCH_SPT:
   5096 	case WM_T_PCH_CNP:
   5097 		break;
   5098 	default:
   5099 		panic("%s: unknown type\n", __func__);
   5100 	}
   5101 
   5102 	/* Check whether EEPROM is present or not */
   5103 	switch (sc->sc_type) {
   5104 	case WM_T_82575:
   5105 	case WM_T_82576:
   5106 	case WM_T_82580:
   5107 	case WM_T_I350:
   5108 	case WM_T_I354:
   5109 	case WM_T_ICH8:
   5110 	case WM_T_ICH9:
   5111 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5112 			/* Not found */
   5113 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5114 			if (sc->sc_type == WM_T_82575)
   5115 				wm_reset_init_script_82575(sc);
   5116 		}
   5117 		break;
   5118 	default:
   5119 		break;
   5120 	}
   5121 
   5122 	if (phy_reset != 0)
   5123 		wm_phy_post_reset(sc);
   5124 
   5125 	if ((sc->sc_type == WM_T_82580)
   5126 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5127 		/* Clear global device reset status bit */
   5128 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5129 	}
   5130 
   5131 	/* Clear any pending interrupt events. */
   5132 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5133 	reg = CSR_READ(sc, WMREG_ICR);
   5134 	if (wm_is_using_msix(sc)) {
   5135 		if (sc->sc_type != WM_T_82574) {
   5136 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5137 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5138 		} else
   5139 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5140 	}
   5141 
   5142 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5143 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5144 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5145 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5146 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5147 		reg |= KABGTXD_BGSQLBIAS;
   5148 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5149 	}
   5150 
   5151 	/* Reload sc_ctrl */
   5152 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5153 
   5154 	wm_set_eee(sc);
   5155 
   5156 	/*
   5157 	 * For PCH, this write will make sure that any noise will be detected
   5158 	 * as a CRC error and be dropped rather than show up as a bad packet
   5159 	 * to the DMA engine
   5160 	 */
   5161 	if (sc->sc_type == WM_T_PCH)
   5162 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5163 
   5164 	if (sc->sc_type >= WM_T_82544)
   5165 		CSR_WRITE(sc, WMREG_WUC, 0);
   5166 
   5167 	if (sc->sc_type < WM_T_82575)
   5168 		wm_disable_aspm(sc); /* Workaround for some chips */
   5169 
   5170 	wm_reset_mdicnfg_82580(sc);
   5171 
   5172 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5173 		wm_pll_workaround_i210(sc);
   5174 
   5175 	if (sc->sc_type == WM_T_80003) {
   5176 		/* Default to TRUE to enable the MDIC W/A */
   5177 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5178 
   5179 		rv = wm_kmrn_readreg(sc,
   5180 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5181 		if (rv == 0) {
   5182 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5183 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5184 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5185 			else
   5186 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5187 		}
   5188 	}
   5189 }
   5190 
   5191 /*
   5192  * wm_add_rxbuf:
   5193  *
   5194  *	Add a receive buffer to the indiciated descriptor.
   5195  */
   5196 static int
   5197 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5198 {
   5199 	struct wm_softc *sc = rxq->rxq_sc;
   5200 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5201 	struct mbuf *m;
   5202 	int error;
   5203 
   5204 	KASSERT(mutex_owned(rxq->rxq_lock));
   5205 
   5206 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5207 	if (m == NULL)
   5208 		return ENOBUFS;
   5209 
   5210 	MCLGET(m, M_DONTWAIT);
   5211 	if ((m->m_flags & M_EXT) == 0) {
   5212 		m_freem(m);
   5213 		return ENOBUFS;
   5214 	}
   5215 
   5216 	if (rxs->rxs_mbuf != NULL)
   5217 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5218 
   5219 	rxs->rxs_mbuf = m;
   5220 
   5221 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5222 	/*
   5223 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5224 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5225 	 */
   5226 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5227 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5228 	if (error) {
   5229 		/* XXX XXX XXX */
   5230 		aprint_error_dev(sc->sc_dev,
   5231 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5232 		panic("wm_add_rxbuf");
   5233 	}
   5234 
   5235 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5236 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5237 
   5238 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5239 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5240 			wm_init_rxdesc(rxq, idx);
   5241 	} else
   5242 		wm_init_rxdesc(rxq, idx);
   5243 
   5244 	return 0;
   5245 }
   5246 
   5247 /*
   5248  * wm_rxdrain:
   5249  *
   5250  *	Drain the receive queue.
   5251  */
   5252 static void
   5253 wm_rxdrain(struct wm_rxqueue *rxq)
   5254 {
   5255 	struct wm_softc *sc = rxq->rxq_sc;
   5256 	struct wm_rxsoft *rxs;
   5257 	int i;
   5258 
   5259 	KASSERT(mutex_owned(rxq->rxq_lock));
   5260 
   5261 	for (i = 0; i < WM_NRXDESC; i++) {
   5262 		rxs = &rxq->rxq_soft[i];
   5263 		if (rxs->rxs_mbuf != NULL) {
   5264 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5265 			m_freem(rxs->rxs_mbuf);
   5266 			rxs->rxs_mbuf = NULL;
   5267 		}
   5268 	}
   5269 }
   5270 
   5271 /*
   5272  * Setup registers for RSS.
   5273  *
   5274  * XXX not yet VMDq support
   5275  */
   5276 static void
   5277 wm_init_rss(struct wm_softc *sc)
   5278 {
   5279 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5280 	int i;
   5281 
   5282 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5283 
   5284 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5285 		unsigned int qid, reta_ent;
   5286 
   5287 		qid  = i % sc->sc_nqueues;
   5288 		switch (sc->sc_type) {
   5289 		case WM_T_82574:
   5290 			reta_ent = __SHIFTIN(qid,
   5291 			    RETA_ENT_QINDEX_MASK_82574);
   5292 			break;
   5293 		case WM_T_82575:
   5294 			reta_ent = __SHIFTIN(qid,
   5295 			    RETA_ENT_QINDEX1_MASK_82575);
   5296 			break;
   5297 		default:
   5298 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5299 			break;
   5300 		}
   5301 
   5302 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5303 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5304 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5305 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5306 	}
   5307 
   5308 	rss_getkey((uint8_t *)rss_key);
   5309 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5310 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5311 
   5312 	if (sc->sc_type == WM_T_82574)
   5313 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5314 	else
   5315 		mrqc = MRQC_ENABLE_RSS_MQ;
   5316 
   5317 	/*
   5318 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5319 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5320 	 */
   5321 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5322 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5323 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5324 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   5325 
   5326 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5327 }
   5328 
   5329 /*
   5330  * Adjust TX and RX queue numbers which the system actulally uses.
   5331  *
   5332  * The numbers are affected by below parameters.
   5333  *     - The nubmer of hardware queues
   5334  *     - The number of MSI-X vectors (= "nvectors" argument)
   5335  *     - ncpu
   5336  */
   5337 static void
   5338 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5339 {
   5340 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5341 
   5342 	if (nvectors < 2) {
   5343 		sc->sc_nqueues = 1;
   5344 		return;
   5345 	}
   5346 
   5347 	switch (sc->sc_type) {
   5348 	case WM_T_82572:
   5349 		hw_ntxqueues = 2;
   5350 		hw_nrxqueues = 2;
   5351 		break;
   5352 	case WM_T_82574:
   5353 		hw_ntxqueues = 2;
   5354 		hw_nrxqueues = 2;
   5355 		break;
   5356 	case WM_T_82575:
   5357 		hw_ntxqueues = 4;
   5358 		hw_nrxqueues = 4;
   5359 		break;
   5360 	case WM_T_82576:
   5361 		hw_ntxqueues = 16;
   5362 		hw_nrxqueues = 16;
   5363 		break;
   5364 	case WM_T_82580:
   5365 	case WM_T_I350:
   5366 	case WM_T_I354:
   5367 		hw_ntxqueues = 8;
   5368 		hw_nrxqueues = 8;
   5369 		break;
   5370 	case WM_T_I210:
   5371 		hw_ntxqueues = 4;
   5372 		hw_nrxqueues = 4;
   5373 		break;
   5374 	case WM_T_I211:
   5375 		hw_ntxqueues = 2;
   5376 		hw_nrxqueues = 2;
   5377 		break;
   5378 		/*
   5379 		 * As below ethernet controllers does not support MSI-X,
   5380 		 * this driver let them not use multiqueue.
   5381 		 *     - WM_T_80003
   5382 		 *     - WM_T_ICH8
   5383 		 *     - WM_T_ICH9
   5384 		 *     - WM_T_ICH10
   5385 		 *     - WM_T_PCH
   5386 		 *     - WM_T_PCH2
   5387 		 *     - WM_T_PCH_LPT
   5388 		 */
   5389 	default:
   5390 		hw_ntxqueues = 1;
   5391 		hw_nrxqueues = 1;
   5392 		break;
   5393 	}
   5394 
   5395 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5396 
   5397 	/*
   5398 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5399 	 * the number of queues used actually.
   5400 	 */
   5401 	if (nvectors < hw_nqueues + 1)
   5402 		sc->sc_nqueues = nvectors - 1;
   5403 	else
   5404 		sc->sc_nqueues = hw_nqueues;
   5405 
   5406 	/*
   5407 	 * As queues more then cpus cannot improve scaling, we limit
   5408 	 * the number of queues used actually.
   5409 	 */
   5410 	if (ncpu < sc->sc_nqueues)
   5411 		sc->sc_nqueues = ncpu;
   5412 }
   5413 
   5414 static inline bool
   5415 wm_is_using_msix(struct wm_softc *sc)
   5416 {
   5417 
   5418 	return (sc->sc_nintrs > 1);
   5419 }
   5420 
   5421 static inline bool
   5422 wm_is_using_multiqueue(struct wm_softc *sc)
   5423 {
   5424 
   5425 	return (sc->sc_nqueues > 1);
   5426 }
   5427 
   5428 static int
   5429 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5430 {
   5431 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5432 
   5433 	wmq->wmq_id = qidx;
   5434 	wmq->wmq_intr_idx = intr_idx;
   5435 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5436 #ifdef WM_MPSAFE
   5437 	    | SOFTINT_MPSAFE
   5438 #endif
   5439 	    , wm_handle_queue, wmq);
   5440 	if (wmq->wmq_si != NULL)
   5441 		return 0;
   5442 
   5443 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5444 	    wmq->wmq_id);
   5445 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5446 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5447 	return ENOMEM;
   5448 }
   5449 
   5450 /*
   5451  * Both single interrupt MSI and INTx can use this function.
   5452  */
   5453 static int
   5454 wm_setup_legacy(struct wm_softc *sc)
   5455 {
   5456 	pci_chipset_tag_t pc = sc->sc_pc;
   5457 	const char *intrstr = NULL;
   5458 	char intrbuf[PCI_INTRSTR_LEN];
   5459 	int error;
   5460 
   5461 	error = wm_alloc_txrx_queues(sc);
   5462 	if (error) {
   5463 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5464 		    error);
   5465 		return ENOMEM;
   5466 	}
   5467 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5468 	    sizeof(intrbuf));
   5469 #ifdef WM_MPSAFE
   5470 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5471 #endif
   5472 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5473 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5474 	if (sc->sc_ihs[0] == NULL) {
   5475 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5476 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5477 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5478 		return ENOMEM;
   5479 	}
   5480 
   5481 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5482 	sc->sc_nintrs = 1;
   5483 
   5484 	return wm_softint_establish(sc, 0, 0);
   5485 }
   5486 
   5487 static int
   5488 wm_setup_msix(struct wm_softc *sc)
   5489 {
   5490 	void *vih;
   5491 	kcpuset_t *affinity;
   5492 	int qidx, error, intr_idx, txrx_established;
   5493 	pci_chipset_tag_t pc = sc->sc_pc;
   5494 	const char *intrstr = NULL;
   5495 	char intrbuf[PCI_INTRSTR_LEN];
   5496 	char intr_xname[INTRDEVNAMEBUF];
   5497 
   5498 	if (sc->sc_nqueues < ncpu) {
   5499 		/*
   5500 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5501 		 * interrupts start from CPU#1.
   5502 		 */
   5503 		sc->sc_affinity_offset = 1;
   5504 	} else {
   5505 		/*
   5506 		 * In this case, this device use all CPUs. So, we unify
   5507 		 * affinitied cpu_index to msix vector number for readability.
   5508 		 */
   5509 		sc->sc_affinity_offset = 0;
   5510 	}
   5511 
   5512 	error = wm_alloc_txrx_queues(sc);
   5513 	if (error) {
   5514 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5515 		    error);
   5516 		return ENOMEM;
   5517 	}
   5518 
   5519 	kcpuset_create(&affinity, false);
   5520 	intr_idx = 0;
   5521 
   5522 	/*
   5523 	 * TX and RX
   5524 	 */
   5525 	txrx_established = 0;
   5526 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5527 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5528 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5529 
   5530 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5531 		    sizeof(intrbuf));
   5532 #ifdef WM_MPSAFE
   5533 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5534 		    PCI_INTR_MPSAFE, true);
   5535 #endif
   5536 		memset(intr_xname, 0, sizeof(intr_xname));
   5537 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5538 		    device_xname(sc->sc_dev), qidx);
   5539 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5540 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5541 		if (vih == NULL) {
   5542 			aprint_error_dev(sc->sc_dev,
   5543 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5544 			    intrstr ? " at " : "",
   5545 			    intrstr ? intrstr : "");
   5546 
   5547 			goto fail;
   5548 		}
   5549 		kcpuset_zero(affinity);
   5550 		/* Round-robin affinity */
   5551 		kcpuset_set(affinity, affinity_to);
   5552 		error = interrupt_distribute(vih, affinity, NULL);
   5553 		if (error == 0) {
   5554 			aprint_normal_dev(sc->sc_dev,
   5555 			    "for TX and RX interrupting at %s affinity to %u\n",
   5556 			    intrstr, affinity_to);
   5557 		} else {
   5558 			aprint_normal_dev(sc->sc_dev,
   5559 			    "for TX and RX interrupting at %s\n", intrstr);
   5560 		}
   5561 		sc->sc_ihs[intr_idx] = vih;
   5562 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5563 			goto fail;
   5564 		txrx_established++;
   5565 		intr_idx++;
   5566 	}
   5567 
   5568 	/* LINK */
   5569 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5570 	    sizeof(intrbuf));
   5571 #ifdef WM_MPSAFE
   5572 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5573 #endif
   5574 	memset(intr_xname, 0, sizeof(intr_xname));
   5575 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5576 	    device_xname(sc->sc_dev));
   5577 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5578 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5579 	if (vih == NULL) {
   5580 		aprint_error_dev(sc->sc_dev,
   5581 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5582 		    intrstr ? " at " : "",
   5583 		    intrstr ? intrstr : "");
   5584 
   5585 		goto fail;
   5586 	}
   5587 	/* Keep default affinity to LINK interrupt */
   5588 	aprint_normal_dev(sc->sc_dev,
   5589 	    "for LINK interrupting at %s\n", intrstr);
   5590 	sc->sc_ihs[intr_idx] = vih;
   5591 	sc->sc_link_intr_idx = intr_idx;
   5592 
   5593 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5594 	kcpuset_destroy(affinity);
   5595 	return 0;
   5596 
   5597  fail:
   5598 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5599 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5600 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5601 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5602 	}
   5603 
   5604 	kcpuset_destroy(affinity);
   5605 	return ENOMEM;
   5606 }
   5607 
   5608 static void
   5609 wm_unset_stopping_flags(struct wm_softc *sc)
   5610 {
   5611 	int i;
   5612 
   5613 	KASSERT(WM_CORE_LOCKED(sc));
   5614 
   5615 	/* Must unset stopping flags in ascending order. */
   5616 	for (i = 0; i < sc->sc_nqueues; i++) {
   5617 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5618 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5619 
   5620 		mutex_enter(txq->txq_lock);
   5621 		txq->txq_stopping = false;
   5622 		mutex_exit(txq->txq_lock);
   5623 
   5624 		mutex_enter(rxq->rxq_lock);
   5625 		rxq->rxq_stopping = false;
   5626 		mutex_exit(rxq->rxq_lock);
   5627 	}
   5628 
   5629 	sc->sc_core_stopping = false;
   5630 }
   5631 
   5632 static void
   5633 wm_set_stopping_flags(struct wm_softc *sc)
   5634 {
   5635 	int i;
   5636 
   5637 	KASSERT(WM_CORE_LOCKED(sc));
   5638 
   5639 	sc->sc_core_stopping = true;
   5640 
   5641 	/* Must set stopping flags in ascending order. */
   5642 	for (i = 0; i < sc->sc_nqueues; i++) {
   5643 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5644 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5645 
   5646 		mutex_enter(rxq->rxq_lock);
   5647 		rxq->rxq_stopping = true;
   5648 		mutex_exit(rxq->rxq_lock);
   5649 
   5650 		mutex_enter(txq->txq_lock);
   5651 		txq->txq_stopping = true;
   5652 		mutex_exit(txq->txq_lock);
   5653 	}
   5654 }
   5655 
   5656 /*
   5657  * Write interrupt interval value to ITR or EITR
   5658  */
   5659 static void
   5660 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5661 {
   5662 
   5663 	if (!wmq->wmq_set_itr)
   5664 		return;
   5665 
   5666 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5667 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5668 
   5669 		/*
   5670 		 * 82575 doesn't have CNT_INGR field.
   5671 		 * So, overwrite counter field by software.
   5672 		 */
   5673 		if (sc->sc_type == WM_T_82575)
   5674 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5675 		else
   5676 			eitr |= EITR_CNT_INGR;
   5677 
   5678 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5679 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5680 		/*
   5681 		 * 82574 has both ITR and EITR. SET EITR when we use
   5682 		 * the multi queue function with MSI-X.
   5683 		 */
   5684 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5685 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5686 	} else {
   5687 		KASSERT(wmq->wmq_id == 0);
   5688 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5689 	}
   5690 
   5691 	wmq->wmq_set_itr = false;
   5692 }
   5693 
   5694 /*
   5695  * TODO
   5696  * Below dynamic calculation of itr is almost the same as linux igb,
   5697  * however it does not fit to wm(4). So, we will have been disable AIM
   5698  * until we will find appropriate calculation of itr.
   5699  */
   5700 /*
   5701  * calculate interrupt interval value to be going to write register in
   5702  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5703  */
   5704 static void
   5705 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5706 {
   5707 #ifdef NOTYET
   5708 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5709 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5710 	uint32_t avg_size = 0;
   5711 	uint32_t new_itr;
   5712 
   5713 	if (rxq->rxq_packets)
   5714 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5715 	if (txq->txq_packets)
   5716 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5717 
   5718 	if (avg_size == 0) {
   5719 		new_itr = 450; /* restore default value */
   5720 		goto out;
   5721 	}
   5722 
   5723 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5724 	avg_size += 24;
   5725 
   5726 	/* Don't starve jumbo frames */
   5727 	avg_size = uimin(avg_size, 3000);
   5728 
   5729 	/* Give a little boost to mid-size frames */
   5730 	if ((avg_size > 300) && (avg_size < 1200))
   5731 		new_itr = avg_size / 3;
   5732 	else
   5733 		new_itr = avg_size / 2;
   5734 
   5735 out:
   5736 	/*
   5737 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5738 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5739 	 */
   5740 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5741 		new_itr *= 4;
   5742 
   5743 	if (new_itr != wmq->wmq_itr) {
   5744 		wmq->wmq_itr = new_itr;
   5745 		wmq->wmq_set_itr = true;
   5746 	} else
   5747 		wmq->wmq_set_itr = false;
   5748 
   5749 	rxq->rxq_packets = 0;
   5750 	rxq->rxq_bytes = 0;
   5751 	txq->txq_packets = 0;
   5752 	txq->txq_bytes = 0;
   5753 #endif
   5754 }
   5755 
   5756 static void
   5757 wm_init_sysctls(struct wm_softc *sc)
   5758 {
   5759 	struct sysctllog **log;
   5760 	const struct sysctlnode *rnode, *cnode;
   5761 	int rv;
   5762 	const char *dvname;
   5763 
   5764 	log = &sc->sc_sysctllog;
   5765 	dvname = device_xname(sc->sc_dev);
   5766 
   5767 	rv = sysctl_createv(log, 0, NULL, &rnode,
   5768 	    0, CTLTYPE_NODE, dvname,
   5769 	    SYSCTL_DESCR("wm information and settings"),
   5770 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   5771 	if (rv != 0)
   5772 		goto err;
   5773 
   5774 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5775 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   5776 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   5777 	if (rv != 0)
   5778 		goto teardown;
   5779 
   5780 	return;
   5781 
   5782 teardown:
   5783 	sysctl_teardown(log);
   5784 err:
   5785 	sc->sc_sysctllog = NULL;
   5786 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   5787 	    __func__, rv);
   5788 }
   5789 
   5790 /*
   5791  * wm_init:		[ifnet interface function]
   5792  *
   5793  *	Initialize the interface.
   5794  */
   5795 static int
   5796 wm_init(struct ifnet *ifp)
   5797 {
   5798 	struct wm_softc *sc = ifp->if_softc;
   5799 	int ret;
   5800 
   5801 	WM_CORE_LOCK(sc);
   5802 	ret = wm_init_locked(ifp);
   5803 	WM_CORE_UNLOCK(sc);
   5804 
   5805 	return ret;
   5806 }
   5807 
   5808 static int
   5809 wm_init_locked(struct ifnet *ifp)
   5810 {
   5811 	struct wm_softc *sc = ifp->if_softc;
   5812 	struct ethercom *ec = &sc->sc_ethercom;
   5813 	int i, j, trynum, error = 0;
   5814 	uint32_t reg, sfp_mask = 0;
   5815 
   5816 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5817 		device_xname(sc->sc_dev), __func__));
   5818 	KASSERT(WM_CORE_LOCKED(sc));
   5819 
   5820 	/*
   5821 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5822 	 * There is a small but measurable benefit to avoiding the adjusment
   5823 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5824 	 * on such platforms.  One possibility is that the DMA itself is
   5825 	 * slightly more efficient if the front of the entire packet (instead
   5826 	 * of the front of the headers) is aligned.
   5827 	 *
   5828 	 * Note we must always set align_tweak to 0 if we are using
   5829 	 * jumbo frames.
   5830 	 */
   5831 #ifdef __NO_STRICT_ALIGNMENT
   5832 	sc->sc_align_tweak = 0;
   5833 #else
   5834 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5835 		sc->sc_align_tweak = 0;
   5836 	else
   5837 		sc->sc_align_tweak = 2;
   5838 #endif /* __NO_STRICT_ALIGNMENT */
   5839 
   5840 	/* Cancel any pending I/O. */
   5841 	wm_stop_locked(ifp, 0);
   5842 
   5843 	/* Update statistics before reset */
   5844 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   5845 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   5846 
   5847 	/* PCH_SPT hardware workaround */
   5848 	if (sc->sc_type == WM_T_PCH_SPT)
   5849 		wm_flush_desc_rings(sc);
   5850 
   5851 	/* Reset the chip to a known state. */
   5852 	wm_reset(sc);
   5853 
   5854 	/*
   5855 	 * AMT based hardware can now take control from firmware
   5856 	 * Do this after reset.
   5857 	 */
   5858 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5859 		wm_get_hw_control(sc);
   5860 
   5861 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5862 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5863 		wm_legacy_irq_quirk_spt(sc);
   5864 
   5865 	/* Init hardware bits */
   5866 	wm_initialize_hardware_bits(sc);
   5867 
   5868 	/* Reset the PHY. */
   5869 	if (sc->sc_flags & WM_F_HAS_MII)
   5870 		wm_gmii_reset(sc);
   5871 
   5872 	if (sc->sc_type >= WM_T_ICH8) {
   5873 		reg = CSR_READ(sc, WMREG_GCR);
   5874 		/*
   5875 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5876 		 * default after reset.
   5877 		 */
   5878 		if (sc->sc_type == WM_T_ICH8)
   5879 			reg |= GCR_NO_SNOOP_ALL;
   5880 		else
   5881 			reg &= ~GCR_NO_SNOOP_ALL;
   5882 		CSR_WRITE(sc, WMREG_GCR, reg);
   5883 	}
   5884 	if ((sc->sc_type >= WM_T_ICH8)
   5885 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5886 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5887 
   5888 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5889 		reg |= CTRL_EXT_RO_DIS;
   5890 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5891 	}
   5892 
   5893 	/* Calculate (E)ITR value */
   5894 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5895 		/*
   5896 		 * For NEWQUEUE's EITR (except for 82575).
   5897 		 * 82575's EITR should be set same throttling value as other
   5898 		 * old controllers' ITR because the interrupt/sec calculation
   5899 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5900 		 *
   5901 		 * 82574's EITR should be set same throttling value as ITR.
   5902 		 *
   5903 		 * For N interrupts/sec, set this value to:
   5904 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5905 		 */
   5906 		sc->sc_itr_init = 450;
   5907 	} else if (sc->sc_type >= WM_T_82543) {
   5908 		/*
   5909 		 * Set up the interrupt throttling register (units of 256ns)
   5910 		 * Note that a footnote in Intel's documentation says this
   5911 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5912 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5913 		 * that that is also true for the 1024ns units of the other
   5914 		 * interrupt-related timer registers -- so, really, we ought
   5915 		 * to divide this value by 4 when the link speed is low.
   5916 		 *
   5917 		 * XXX implement this division at link speed change!
   5918 		 */
   5919 
   5920 		/*
   5921 		 * For N interrupts/sec, set this value to:
   5922 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5923 		 * absolute and packet timer values to this value
   5924 		 * divided by 4 to get "simple timer" behavior.
   5925 		 */
   5926 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5927 	}
   5928 
   5929 	error = wm_init_txrx_queues(sc);
   5930 	if (error)
   5931 		goto out;
   5932 
   5933 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   5934 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   5935 	    (sc->sc_type >= WM_T_82575))
   5936 		wm_serdes_power_up_link_82575(sc);
   5937 
   5938 	/* Clear out the VLAN table -- we don't use it (yet). */
   5939 	CSR_WRITE(sc, WMREG_VET, 0);
   5940 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5941 		trynum = 10; /* Due to hw errata */
   5942 	else
   5943 		trynum = 1;
   5944 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5945 		for (j = 0; j < trynum; j++)
   5946 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5947 
   5948 	/*
   5949 	 * Set up flow-control parameters.
   5950 	 *
   5951 	 * XXX Values could probably stand some tuning.
   5952 	 */
   5953 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5954 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5955 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5956 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5957 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5958 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5959 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5960 	}
   5961 
   5962 	sc->sc_fcrtl = FCRTL_DFLT;
   5963 	if (sc->sc_type < WM_T_82543) {
   5964 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5965 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5966 	} else {
   5967 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5968 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5969 	}
   5970 
   5971 	if (sc->sc_type == WM_T_80003)
   5972 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5973 	else
   5974 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5975 
   5976 	/* Writes the control register. */
   5977 	wm_set_vlan(sc);
   5978 
   5979 	if (sc->sc_flags & WM_F_HAS_MII) {
   5980 		uint16_t kmreg;
   5981 
   5982 		switch (sc->sc_type) {
   5983 		case WM_T_80003:
   5984 		case WM_T_ICH8:
   5985 		case WM_T_ICH9:
   5986 		case WM_T_ICH10:
   5987 		case WM_T_PCH:
   5988 		case WM_T_PCH2:
   5989 		case WM_T_PCH_LPT:
   5990 		case WM_T_PCH_SPT:
   5991 		case WM_T_PCH_CNP:
   5992 			/*
   5993 			 * Set the mac to wait the maximum time between each
   5994 			 * iteration and increase the max iterations when
   5995 			 * polling the phy; this fixes erroneous timeouts at
   5996 			 * 10Mbps.
   5997 			 */
   5998 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5999 			    0xFFFF);
   6000 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6001 			    &kmreg);
   6002 			kmreg |= 0x3F;
   6003 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6004 			    kmreg);
   6005 			break;
   6006 		default:
   6007 			break;
   6008 		}
   6009 
   6010 		if (sc->sc_type == WM_T_80003) {
   6011 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6012 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6013 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6014 
   6015 			/* Bypass RX and TX FIFO's */
   6016 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6017 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6018 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6019 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6020 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6021 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6022 		}
   6023 	}
   6024 #if 0
   6025 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6026 #endif
   6027 
   6028 	/* Set up checksum offload parameters. */
   6029 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6030 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6031 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6032 		reg |= RXCSUM_IPOFL;
   6033 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6034 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6035 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6036 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6037 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6038 
   6039 	/* Set registers about MSI-X */
   6040 	if (wm_is_using_msix(sc)) {
   6041 		uint32_t ivar, qintr_idx;
   6042 		struct wm_queue *wmq;
   6043 		unsigned int qid;
   6044 
   6045 		if (sc->sc_type == WM_T_82575) {
   6046 			/* Interrupt control */
   6047 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6048 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6049 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6050 
   6051 			/* TX and RX */
   6052 			for (i = 0; i < sc->sc_nqueues; i++) {
   6053 				wmq = &sc->sc_queue[i];
   6054 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6055 				    EITR_TX_QUEUE(wmq->wmq_id)
   6056 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6057 			}
   6058 			/* Link status */
   6059 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6060 			    EITR_OTHER);
   6061 		} else if (sc->sc_type == WM_T_82574) {
   6062 			/* Interrupt control */
   6063 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6064 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6065 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6066 
   6067 			/*
   6068 			 * Workaround issue with spurious interrupts
   6069 			 * in MSI-X mode.
   6070 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6071 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6072 			 */
   6073 			reg = CSR_READ(sc, WMREG_RFCTL);
   6074 			reg |= WMREG_RFCTL_ACKDIS;
   6075 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6076 
   6077 			ivar = 0;
   6078 			/* TX and RX */
   6079 			for (i = 0; i < sc->sc_nqueues; i++) {
   6080 				wmq = &sc->sc_queue[i];
   6081 				qid = wmq->wmq_id;
   6082 				qintr_idx = wmq->wmq_intr_idx;
   6083 
   6084 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6085 				    IVAR_TX_MASK_Q_82574(qid));
   6086 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6087 				    IVAR_RX_MASK_Q_82574(qid));
   6088 			}
   6089 			/* Link status */
   6090 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6091 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6092 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6093 		} else {
   6094 			/* Interrupt control */
   6095 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6096 			    | GPIE_EIAME | GPIE_PBA);
   6097 
   6098 			switch (sc->sc_type) {
   6099 			case WM_T_82580:
   6100 			case WM_T_I350:
   6101 			case WM_T_I354:
   6102 			case WM_T_I210:
   6103 			case WM_T_I211:
   6104 				/* TX and RX */
   6105 				for (i = 0; i < sc->sc_nqueues; i++) {
   6106 					wmq = &sc->sc_queue[i];
   6107 					qid = wmq->wmq_id;
   6108 					qintr_idx = wmq->wmq_intr_idx;
   6109 
   6110 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6111 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6112 					ivar |= __SHIFTIN((qintr_idx
   6113 						| IVAR_VALID),
   6114 					    IVAR_TX_MASK_Q(qid));
   6115 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6116 					ivar |= __SHIFTIN((qintr_idx
   6117 						| IVAR_VALID),
   6118 					    IVAR_RX_MASK_Q(qid));
   6119 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6120 				}
   6121 				break;
   6122 			case WM_T_82576:
   6123 				/* TX and RX */
   6124 				for (i = 0; i < sc->sc_nqueues; i++) {
   6125 					wmq = &sc->sc_queue[i];
   6126 					qid = wmq->wmq_id;
   6127 					qintr_idx = wmq->wmq_intr_idx;
   6128 
   6129 					ivar = CSR_READ(sc,
   6130 					    WMREG_IVAR_Q_82576(qid));
   6131 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6132 					ivar |= __SHIFTIN((qintr_idx
   6133 						| IVAR_VALID),
   6134 					    IVAR_TX_MASK_Q_82576(qid));
   6135 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6136 					ivar |= __SHIFTIN((qintr_idx
   6137 						| IVAR_VALID),
   6138 					    IVAR_RX_MASK_Q_82576(qid));
   6139 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6140 					    ivar);
   6141 				}
   6142 				break;
   6143 			default:
   6144 				break;
   6145 			}
   6146 
   6147 			/* Link status */
   6148 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6149 			    IVAR_MISC_OTHER);
   6150 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6151 		}
   6152 
   6153 		if (wm_is_using_multiqueue(sc)) {
   6154 			wm_init_rss(sc);
   6155 
   6156 			/*
   6157 			** NOTE: Receive Full-Packet Checksum Offload
   6158 			** is mutually exclusive with Multiqueue. However
   6159 			** this is not the same as TCP/IP checksums which
   6160 			** still work.
   6161 			*/
   6162 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6163 			reg |= RXCSUM_PCSD;
   6164 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6165 		}
   6166 	}
   6167 
   6168 	/* Set up the interrupt registers. */
   6169 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6170 
   6171 	/* Enable SFP module insertion interrupt if it's required */
   6172 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6173 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6174 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6175 		sfp_mask = ICR_GPI(0);
   6176 	}
   6177 
   6178 	if (wm_is_using_msix(sc)) {
   6179 		uint32_t mask;
   6180 		struct wm_queue *wmq;
   6181 
   6182 		switch (sc->sc_type) {
   6183 		case WM_T_82574:
   6184 			mask = 0;
   6185 			for (i = 0; i < sc->sc_nqueues; i++) {
   6186 				wmq = &sc->sc_queue[i];
   6187 				mask |= ICR_TXQ(wmq->wmq_id);
   6188 				mask |= ICR_RXQ(wmq->wmq_id);
   6189 			}
   6190 			mask |= ICR_OTHER;
   6191 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6192 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6193 			break;
   6194 		default:
   6195 			if (sc->sc_type == WM_T_82575) {
   6196 				mask = 0;
   6197 				for (i = 0; i < sc->sc_nqueues; i++) {
   6198 					wmq = &sc->sc_queue[i];
   6199 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6200 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6201 				}
   6202 				mask |= EITR_OTHER;
   6203 			} else {
   6204 				mask = 0;
   6205 				for (i = 0; i < sc->sc_nqueues; i++) {
   6206 					wmq = &sc->sc_queue[i];
   6207 					mask |= 1 << wmq->wmq_intr_idx;
   6208 				}
   6209 				mask |= 1 << sc->sc_link_intr_idx;
   6210 			}
   6211 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6212 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6213 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6214 
   6215 			/* For other interrupts */
   6216 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6217 			break;
   6218 		}
   6219 	} else {
   6220 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6221 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6222 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6223 	}
   6224 
   6225 	/* Set up the inter-packet gap. */
   6226 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6227 
   6228 	if (sc->sc_type >= WM_T_82543) {
   6229 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6230 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6231 			wm_itrs_writereg(sc, wmq);
   6232 		}
   6233 		/*
   6234 		 * Link interrupts occur much less than TX
   6235 		 * interrupts and RX interrupts. So, we don't
   6236 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6237 		 * FreeBSD's if_igb.
   6238 		 */
   6239 	}
   6240 
   6241 	/* Set the VLAN ethernetype. */
   6242 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6243 
   6244 	/*
   6245 	 * Set up the transmit control register; we start out with
   6246 	 * a collision distance suitable for FDX, but update it whe
   6247 	 * we resolve the media type.
   6248 	 */
   6249 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6250 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6251 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6252 	if (sc->sc_type >= WM_T_82571)
   6253 		sc->sc_tctl |= TCTL_MULR;
   6254 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6255 
   6256 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6257 		/* Write TDT after TCTL.EN is set. See the document. */
   6258 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6259 	}
   6260 
   6261 	if (sc->sc_type == WM_T_80003) {
   6262 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6263 		reg &= ~TCTL_EXT_GCEX_MASK;
   6264 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6265 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6266 	}
   6267 
   6268 	/* Set the media. */
   6269 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6270 		goto out;
   6271 
   6272 	/* Configure for OS presence */
   6273 	wm_init_manageability(sc);
   6274 
   6275 	/*
   6276 	 * Set up the receive control register; we actually program the
   6277 	 * register when we set the receive filter. Use multicast address
   6278 	 * offset type 0.
   6279 	 *
   6280 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6281 	 * don't enable that feature.
   6282 	 */
   6283 	sc->sc_mchash_type = 0;
   6284 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6285 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6286 
   6287 	/* 82574 use one buffer extended Rx descriptor. */
   6288 	if (sc->sc_type == WM_T_82574)
   6289 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6290 
   6291 	/*
   6292 	 * The I350 has a bug where it always strips the CRC whether
   6293 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6294 	 */
   6295 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6296 	    || (sc->sc_type == WM_T_I210))
   6297 		sc->sc_rctl |= RCTL_SECRC;
   6298 
   6299 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6300 	    && (ifp->if_mtu > ETHERMTU)) {
   6301 		sc->sc_rctl |= RCTL_LPE;
   6302 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6303 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6304 	}
   6305 
   6306 	if (MCLBYTES == 2048)
   6307 		sc->sc_rctl |= RCTL_2k;
   6308 	else {
   6309 		if (sc->sc_type >= WM_T_82543) {
   6310 			switch (MCLBYTES) {
   6311 			case 4096:
   6312 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6313 				break;
   6314 			case 8192:
   6315 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6316 				break;
   6317 			case 16384:
   6318 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6319 				break;
   6320 			default:
   6321 				panic("wm_init: MCLBYTES %d unsupported",
   6322 				    MCLBYTES);
   6323 				break;
   6324 			}
   6325 		} else
   6326 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6327 	}
   6328 
   6329 	/* Enable ECC */
   6330 	switch (sc->sc_type) {
   6331 	case WM_T_82571:
   6332 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6333 		reg |= PBA_ECC_CORR_EN;
   6334 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6335 		break;
   6336 	case WM_T_PCH_LPT:
   6337 	case WM_T_PCH_SPT:
   6338 	case WM_T_PCH_CNP:
   6339 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6340 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6341 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6342 
   6343 		sc->sc_ctrl |= CTRL_MEHE;
   6344 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6345 		break;
   6346 	default:
   6347 		break;
   6348 	}
   6349 
   6350 	/*
   6351 	 * Set the receive filter.
   6352 	 *
   6353 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6354 	 * the setting of RCTL.EN in wm_set_filter()
   6355 	 */
   6356 	wm_set_filter(sc);
   6357 
   6358 	/* On 575 and later set RDT only if RX enabled */
   6359 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6360 		int qidx;
   6361 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6362 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6363 			for (i = 0; i < WM_NRXDESC; i++) {
   6364 				mutex_enter(rxq->rxq_lock);
   6365 				wm_init_rxdesc(rxq, i);
   6366 				mutex_exit(rxq->rxq_lock);
   6367 
   6368 			}
   6369 		}
   6370 	}
   6371 
   6372 	wm_unset_stopping_flags(sc);
   6373 
   6374 	/* Start the one second link check clock. */
   6375 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   6376 
   6377 	/* ...all done! */
   6378 	ifp->if_flags |= IFF_RUNNING;
   6379 	ifp->if_flags &= ~IFF_OACTIVE;
   6380 
   6381  out:
   6382 	/* Save last flags for the callback */
   6383 	sc->sc_if_flags = ifp->if_flags;
   6384 	sc->sc_ec_capenable = ec->ec_capenable;
   6385 	if (error)
   6386 		log(LOG_ERR, "%s: interface not running\n",
   6387 		    device_xname(sc->sc_dev));
   6388 	return error;
   6389 }
   6390 
   6391 /*
   6392  * wm_stop:		[ifnet interface function]
   6393  *
   6394  *	Stop transmission on the interface.
   6395  */
   6396 static void
   6397 wm_stop(struct ifnet *ifp, int disable)
   6398 {
   6399 	struct wm_softc *sc = ifp->if_softc;
   6400 
   6401 	WM_CORE_LOCK(sc);
   6402 	wm_stop_locked(ifp, disable);
   6403 	WM_CORE_UNLOCK(sc);
   6404 }
   6405 
   6406 static void
   6407 wm_stop_locked(struct ifnet *ifp, int disable)
   6408 {
   6409 	struct wm_softc *sc = ifp->if_softc;
   6410 	struct wm_txsoft *txs;
   6411 	int i, qidx;
   6412 
   6413 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6414 		device_xname(sc->sc_dev), __func__));
   6415 	KASSERT(WM_CORE_LOCKED(sc));
   6416 
   6417 	wm_set_stopping_flags(sc);
   6418 
   6419 	/* Stop the one second clock. */
   6420 	callout_stop(&sc->sc_tick_ch);
   6421 
   6422 	/* Stop the 82547 Tx FIFO stall check timer. */
   6423 	if (sc->sc_type == WM_T_82547)
   6424 		callout_stop(&sc->sc_txfifo_ch);
   6425 
   6426 	if (sc->sc_flags & WM_F_HAS_MII) {
   6427 		/* Down the MII. */
   6428 		mii_down(&sc->sc_mii);
   6429 	} else {
   6430 #if 0
   6431 		/* Should we clear PHY's status properly? */
   6432 		wm_reset(sc);
   6433 #endif
   6434 	}
   6435 
   6436 	/* Stop the transmit and receive processes. */
   6437 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6438 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6439 	sc->sc_rctl &= ~RCTL_EN;
   6440 
   6441 	/*
   6442 	 * Clear the interrupt mask to ensure the device cannot assert its
   6443 	 * interrupt line.
   6444 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6445 	 * service any currently pending or shared interrupt.
   6446 	 */
   6447 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6448 	sc->sc_icr = 0;
   6449 	if (wm_is_using_msix(sc)) {
   6450 		if (sc->sc_type != WM_T_82574) {
   6451 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6452 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6453 		} else
   6454 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6455 	}
   6456 
   6457 	/* Release any queued transmit buffers. */
   6458 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6459 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6460 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6461 		mutex_enter(txq->txq_lock);
   6462 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6463 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6464 			txs = &txq->txq_soft[i];
   6465 			if (txs->txs_mbuf != NULL) {
   6466 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6467 				m_freem(txs->txs_mbuf);
   6468 				txs->txs_mbuf = NULL;
   6469 			}
   6470 		}
   6471 		mutex_exit(txq->txq_lock);
   6472 	}
   6473 
   6474 	/* Mark the interface as down and cancel the watchdog timer. */
   6475 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6476 
   6477 	if (disable) {
   6478 		for (i = 0; i < sc->sc_nqueues; i++) {
   6479 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6480 			mutex_enter(rxq->rxq_lock);
   6481 			wm_rxdrain(rxq);
   6482 			mutex_exit(rxq->rxq_lock);
   6483 		}
   6484 	}
   6485 
   6486 #if 0 /* notyet */
   6487 	if (sc->sc_type >= WM_T_82544)
   6488 		CSR_WRITE(sc, WMREG_WUC, 0);
   6489 #endif
   6490 }
   6491 
   6492 static void
   6493 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6494 {
   6495 	struct mbuf *m;
   6496 	int i;
   6497 
   6498 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6499 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6500 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6501 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6502 		    m->m_data, m->m_len, m->m_flags);
   6503 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6504 	    i, i == 1 ? "" : "s");
   6505 }
   6506 
   6507 /*
   6508  * wm_82547_txfifo_stall:
   6509  *
   6510  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6511  *	reset the FIFO pointers, and restart packet transmission.
   6512  */
   6513 static void
   6514 wm_82547_txfifo_stall(void *arg)
   6515 {
   6516 	struct wm_softc *sc = arg;
   6517 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6518 
   6519 	mutex_enter(txq->txq_lock);
   6520 
   6521 	if (txq->txq_stopping)
   6522 		goto out;
   6523 
   6524 	if (txq->txq_fifo_stall) {
   6525 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6526 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6527 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6528 			/*
   6529 			 * Packets have drained.  Stop transmitter, reset
   6530 			 * FIFO pointers, restart transmitter, and kick
   6531 			 * the packet queue.
   6532 			 */
   6533 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6534 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6535 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6536 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6537 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6538 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6539 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6540 			CSR_WRITE_FLUSH(sc);
   6541 
   6542 			txq->txq_fifo_head = 0;
   6543 			txq->txq_fifo_stall = 0;
   6544 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6545 		} else {
   6546 			/*
   6547 			 * Still waiting for packets to drain; try again in
   6548 			 * another tick.
   6549 			 */
   6550 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6551 		}
   6552 	}
   6553 
   6554 out:
   6555 	mutex_exit(txq->txq_lock);
   6556 }
   6557 
   6558 /*
   6559  * wm_82547_txfifo_bugchk:
   6560  *
   6561  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6562  *	prevent enqueueing a packet that would wrap around the end
   6563  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6564  *
   6565  *	We do this by checking the amount of space before the end
   6566  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6567  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6568  *	the internal FIFO pointers to the beginning, and restart
   6569  *	transmission on the interface.
   6570  */
   6571 #define	WM_FIFO_HDR		0x10
   6572 #define	WM_82547_PAD_LEN	0x3e0
   6573 static int
   6574 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6575 {
   6576 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6577 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6578 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6579 
   6580 	/* Just return if already stalled. */
   6581 	if (txq->txq_fifo_stall)
   6582 		return 1;
   6583 
   6584 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6585 		/* Stall only occurs in half-duplex mode. */
   6586 		goto send_packet;
   6587 	}
   6588 
   6589 	if (len >= WM_82547_PAD_LEN + space) {
   6590 		txq->txq_fifo_stall = 1;
   6591 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6592 		return 1;
   6593 	}
   6594 
   6595  send_packet:
   6596 	txq->txq_fifo_head += len;
   6597 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6598 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6599 
   6600 	return 0;
   6601 }
   6602 
   6603 static int
   6604 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6605 {
   6606 	int error;
   6607 
   6608 	/*
   6609 	 * Allocate the control data structures, and create and load the
   6610 	 * DMA map for it.
   6611 	 *
   6612 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6613 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6614 	 * both sets within the same 4G segment.
   6615 	 */
   6616 	if (sc->sc_type < WM_T_82544)
   6617 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6618 	else
   6619 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6620 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6621 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6622 	else
   6623 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6624 
   6625 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6626 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6627 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6628 		aprint_error_dev(sc->sc_dev,
   6629 		    "unable to allocate TX control data, error = %d\n",
   6630 		    error);
   6631 		goto fail_0;
   6632 	}
   6633 
   6634 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6635 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6636 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6637 		aprint_error_dev(sc->sc_dev,
   6638 		    "unable to map TX control data, error = %d\n", error);
   6639 		goto fail_1;
   6640 	}
   6641 
   6642 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6643 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6644 		aprint_error_dev(sc->sc_dev,
   6645 		    "unable to create TX control data DMA map, error = %d\n",
   6646 		    error);
   6647 		goto fail_2;
   6648 	}
   6649 
   6650 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6651 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6652 		aprint_error_dev(sc->sc_dev,
   6653 		    "unable to load TX control data DMA map, error = %d\n",
   6654 		    error);
   6655 		goto fail_3;
   6656 	}
   6657 
   6658 	return 0;
   6659 
   6660  fail_3:
   6661 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6662  fail_2:
   6663 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6664 	    WM_TXDESCS_SIZE(txq));
   6665  fail_1:
   6666 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6667  fail_0:
   6668 	return error;
   6669 }
   6670 
   6671 static void
   6672 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6673 {
   6674 
   6675 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6676 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6677 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6678 	    WM_TXDESCS_SIZE(txq));
   6679 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6680 }
   6681 
   6682 static int
   6683 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6684 {
   6685 	int error;
   6686 	size_t rxq_descs_size;
   6687 
   6688 	/*
   6689 	 * Allocate the control data structures, and create and load the
   6690 	 * DMA map for it.
   6691 	 *
   6692 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6693 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6694 	 * both sets within the same 4G segment.
   6695 	 */
   6696 	rxq->rxq_ndesc = WM_NRXDESC;
   6697 	if (sc->sc_type == WM_T_82574)
   6698 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6699 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6700 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6701 	else
   6702 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6703 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6704 
   6705 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6706 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6707 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6708 		aprint_error_dev(sc->sc_dev,
   6709 		    "unable to allocate RX control data, error = %d\n",
   6710 		    error);
   6711 		goto fail_0;
   6712 	}
   6713 
   6714 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6715 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6716 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6717 		aprint_error_dev(sc->sc_dev,
   6718 		    "unable to map RX control data, error = %d\n", error);
   6719 		goto fail_1;
   6720 	}
   6721 
   6722 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6723 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6724 		aprint_error_dev(sc->sc_dev,
   6725 		    "unable to create RX control data DMA map, error = %d\n",
   6726 		    error);
   6727 		goto fail_2;
   6728 	}
   6729 
   6730 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6731 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6732 		aprint_error_dev(sc->sc_dev,
   6733 		    "unable to load RX control data DMA map, error = %d\n",
   6734 		    error);
   6735 		goto fail_3;
   6736 	}
   6737 
   6738 	return 0;
   6739 
   6740  fail_3:
   6741 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6742  fail_2:
   6743 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6744 	    rxq_descs_size);
   6745  fail_1:
   6746 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6747  fail_0:
   6748 	return error;
   6749 }
   6750 
   6751 static void
   6752 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6753 {
   6754 
   6755 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6756 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6757 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6758 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6759 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6760 }
   6761 
   6762 
   6763 static int
   6764 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6765 {
   6766 	int i, error;
   6767 
   6768 	/* Create the transmit buffer DMA maps. */
   6769 	WM_TXQUEUELEN(txq) =
   6770 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6771 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6772 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6773 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6774 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6775 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6776 			aprint_error_dev(sc->sc_dev,
   6777 			    "unable to create Tx DMA map %d, error = %d\n",
   6778 			    i, error);
   6779 			goto fail;
   6780 		}
   6781 	}
   6782 
   6783 	return 0;
   6784 
   6785  fail:
   6786 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6787 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6788 			bus_dmamap_destroy(sc->sc_dmat,
   6789 			    txq->txq_soft[i].txs_dmamap);
   6790 	}
   6791 	return error;
   6792 }
   6793 
   6794 static void
   6795 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6796 {
   6797 	int i;
   6798 
   6799 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6800 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6801 			bus_dmamap_destroy(sc->sc_dmat,
   6802 			    txq->txq_soft[i].txs_dmamap);
   6803 	}
   6804 }
   6805 
   6806 static int
   6807 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6808 {
   6809 	int i, error;
   6810 
   6811 	/* Create the receive buffer DMA maps. */
   6812 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6813 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6814 			    MCLBYTES, 0, 0,
   6815 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6816 			aprint_error_dev(sc->sc_dev,
   6817 			    "unable to create Rx DMA map %d error = %d\n",
   6818 			    i, error);
   6819 			goto fail;
   6820 		}
   6821 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6822 	}
   6823 
   6824 	return 0;
   6825 
   6826  fail:
   6827 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6828 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6829 			bus_dmamap_destroy(sc->sc_dmat,
   6830 			    rxq->rxq_soft[i].rxs_dmamap);
   6831 	}
   6832 	return error;
   6833 }
   6834 
   6835 static void
   6836 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6837 {
   6838 	int i;
   6839 
   6840 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6841 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6842 			bus_dmamap_destroy(sc->sc_dmat,
   6843 			    rxq->rxq_soft[i].rxs_dmamap);
   6844 	}
   6845 }
   6846 
   6847 /*
   6848  * wm_alloc_quques:
   6849  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6850  */
   6851 static int
   6852 wm_alloc_txrx_queues(struct wm_softc *sc)
   6853 {
   6854 	int i, error, tx_done, rx_done;
   6855 
   6856 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6857 	    KM_SLEEP);
   6858 	if (sc->sc_queue == NULL) {
   6859 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6860 		error = ENOMEM;
   6861 		goto fail_0;
   6862 	}
   6863 
   6864 	/* For transmission */
   6865 	error = 0;
   6866 	tx_done = 0;
   6867 	for (i = 0; i < sc->sc_nqueues; i++) {
   6868 #ifdef WM_EVENT_COUNTERS
   6869 		int j;
   6870 		const char *xname;
   6871 #endif
   6872 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6873 		txq->txq_sc = sc;
   6874 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6875 
   6876 		error = wm_alloc_tx_descs(sc, txq);
   6877 		if (error)
   6878 			break;
   6879 		error = wm_alloc_tx_buffer(sc, txq);
   6880 		if (error) {
   6881 			wm_free_tx_descs(sc, txq);
   6882 			break;
   6883 		}
   6884 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6885 		if (txq->txq_interq == NULL) {
   6886 			wm_free_tx_descs(sc, txq);
   6887 			wm_free_tx_buffer(sc, txq);
   6888 			error = ENOMEM;
   6889 			break;
   6890 		}
   6891 
   6892 #ifdef WM_EVENT_COUNTERS
   6893 		xname = device_xname(sc->sc_dev);
   6894 
   6895 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6896 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6897 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6898 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6899 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6900 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6901 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6902 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6903 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6904 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6905 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6906 
   6907 		for (j = 0; j < WM_NTXSEGS; j++) {
   6908 			snprintf(txq->txq_txseg_evcnt_names[j],
   6909 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6910 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6911 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6912 		}
   6913 
   6914 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6915 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6916 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6917 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6918 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6919 #endif /* WM_EVENT_COUNTERS */
   6920 
   6921 		tx_done++;
   6922 	}
   6923 	if (error)
   6924 		goto fail_1;
   6925 
   6926 	/* For receive */
   6927 	error = 0;
   6928 	rx_done = 0;
   6929 	for (i = 0; i < sc->sc_nqueues; i++) {
   6930 #ifdef WM_EVENT_COUNTERS
   6931 		const char *xname;
   6932 #endif
   6933 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6934 		rxq->rxq_sc = sc;
   6935 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6936 
   6937 		error = wm_alloc_rx_descs(sc, rxq);
   6938 		if (error)
   6939 			break;
   6940 
   6941 		error = wm_alloc_rx_buffer(sc, rxq);
   6942 		if (error) {
   6943 			wm_free_rx_descs(sc, rxq);
   6944 			break;
   6945 		}
   6946 
   6947 #ifdef WM_EVENT_COUNTERS
   6948 		xname = device_xname(sc->sc_dev);
   6949 
   6950 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6951 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6952 
   6953 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6954 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6955 #endif /* WM_EVENT_COUNTERS */
   6956 
   6957 		rx_done++;
   6958 	}
   6959 	if (error)
   6960 		goto fail_2;
   6961 
   6962 	for (i = 0; i < sc->sc_nqueues; i++) {
   6963 		char rndname[16];
   6964 
   6965 		snprintf(rndname, sizeof(rndname), "%sTXRX%d",
   6966 		    device_xname(sc->sc_dev), i);
   6967 		rnd_attach_source(&sc->sc_queue[i].rnd_source, rndname,
   6968 		    RND_TYPE_NET, RND_FLAG_DEFAULT);
   6969 	}
   6970 
   6971 	return 0;
   6972 
   6973  fail_2:
   6974 	for (i = 0; i < rx_done; i++) {
   6975 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6976 		wm_free_rx_buffer(sc, rxq);
   6977 		wm_free_rx_descs(sc, rxq);
   6978 		if (rxq->rxq_lock)
   6979 			mutex_obj_free(rxq->rxq_lock);
   6980 	}
   6981  fail_1:
   6982 	for (i = 0; i < tx_done; i++) {
   6983 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6984 		pcq_destroy(txq->txq_interq);
   6985 		wm_free_tx_buffer(sc, txq);
   6986 		wm_free_tx_descs(sc, txq);
   6987 		if (txq->txq_lock)
   6988 			mutex_obj_free(txq->txq_lock);
   6989 	}
   6990 
   6991 	kmem_free(sc->sc_queue,
   6992 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6993  fail_0:
   6994 	return error;
   6995 }
   6996 
   6997 /*
   6998  * wm_free_quques:
   6999  *	Free {tx,rx}descs and {tx,rx} buffers
   7000  */
   7001 static void
   7002 wm_free_txrx_queues(struct wm_softc *sc)
   7003 {
   7004 	int i;
   7005 
   7006 	for (i = 0; i < sc->sc_nqueues; i++)
   7007 		rnd_detach_source(&sc->sc_queue[i].rnd_source);
   7008 
   7009 	for (i = 0; i < sc->sc_nqueues; i++) {
   7010 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7011 
   7012 #ifdef WM_EVENT_COUNTERS
   7013 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7014 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7015 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7016 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7017 #endif /* WM_EVENT_COUNTERS */
   7018 
   7019 		wm_free_rx_buffer(sc, rxq);
   7020 		wm_free_rx_descs(sc, rxq);
   7021 		if (rxq->rxq_lock)
   7022 			mutex_obj_free(rxq->rxq_lock);
   7023 	}
   7024 
   7025 	for (i = 0; i < sc->sc_nqueues; i++) {
   7026 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7027 		struct mbuf *m;
   7028 #ifdef WM_EVENT_COUNTERS
   7029 		int j;
   7030 
   7031 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7032 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7033 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7034 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7035 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7036 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7037 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7038 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7039 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7040 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7041 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7042 
   7043 		for (j = 0; j < WM_NTXSEGS; j++)
   7044 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7045 
   7046 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7047 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7048 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7049 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7050 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7051 #endif /* WM_EVENT_COUNTERS */
   7052 
   7053 		/* Drain txq_interq */
   7054 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7055 			m_freem(m);
   7056 		pcq_destroy(txq->txq_interq);
   7057 
   7058 		wm_free_tx_buffer(sc, txq);
   7059 		wm_free_tx_descs(sc, txq);
   7060 		if (txq->txq_lock)
   7061 			mutex_obj_free(txq->txq_lock);
   7062 	}
   7063 
   7064 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7065 }
   7066 
   7067 static void
   7068 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7069 {
   7070 
   7071 	KASSERT(mutex_owned(txq->txq_lock));
   7072 
   7073 	/* Initialize the transmit descriptor ring. */
   7074 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7075 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7076 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7077 	txq->txq_free = WM_NTXDESC(txq);
   7078 	txq->txq_next = 0;
   7079 }
   7080 
   7081 static void
   7082 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7083     struct wm_txqueue *txq)
   7084 {
   7085 
   7086 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7087 		device_xname(sc->sc_dev), __func__));
   7088 	KASSERT(mutex_owned(txq->txq_lock));
   7089 
   7090 	if (sc->sc_type < WM_T_82543) {
   7091 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7092 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7093 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7094 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7095 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7096 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7097 	} else {
   7098 		int qid = wmq->wmq_id;
   7099 
   7100 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7101 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7102 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7103 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7104 
   7105 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7106 			/*
   7107 			 * Don't write TDT before TCTL.EN is set.
   7108 			 * See the document.
   7109 			 */
   7110 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7111 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7112 			    | TXDCTL_WTHRESH(0));
   7113 		else {
   7114 			/* XXX should update with AIM? */
   7115 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7116 			if (sc->sc_type >= WM_T_82540) {
   7117 				/* Should be the same */
   7118 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7119 			}
   7120 
   7121 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7122 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7123 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7124 		}
   7125 	}
   7126 }
   7127 
   7128 static void
   7129 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7130 {
   7131 	int i;
   7132 
   7133 	KASSERT(mutex_owned(txq->txq_lock));
   7134 
   7135 	/* Initialize the transmit job descriptors. */
   7136 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7137 		txq->txq_soft[i].txs_mbuf = NULL;
   7138 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7139 	txq->txq_snext = 0;
   7140 	txq->txq_sdirty = 0;
   7141 }
   7142 
   7143 static void
   7144 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7145     struct wm_txqueue *txq)
   7146 {
   7147 
   7148 	KASSERT(mutex_owned(txq->txq_lock));
   7149 
   7150 	/*
   7151 	 * Set up some register offsets that are different between
   7152 	 * the i82542 and the i82543 and later chips.
   7153 	 */
   7154 	if (sc->sc_type < WM_T_82543)
   7155 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7156 	else
   7157 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7158 
   7159 	wm_init_tx_descs(sc, txq);
   7160 	wm_init_tx_regs(sc, wmq, txq);
   7161 	wm_init_tx_buffer(sc, txq);
   7162 
   7163 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7164 	txq->txq_sending = false;
   7165 }
   7166 
   7167 static void
   7168 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7169     struct wm_rxqueue *rxq)
   7170 {
   7171 
   7172 	KASSERT(mutex_owned(rxq->rxq_lock));
   7173 
   7174 	/*
   7175 	 * Initialize the receive descriptor and receive job
   7176 	 * descriptor rings.
   7177 	 */
   7178 	if (sc->sc_type < WM_T_82543) {
   7179 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7180 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7181 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7182 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7183 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7184 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7185 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7186 
   7187 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7188 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7189 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7190 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7191 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7192 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7193 	} else {
   7194 		int qid = wmq->wmq_id;
   7195 
   7196 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7197 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7198 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7199 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7200 
   7201 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7202 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7203 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7204 
   7205 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7206 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7207 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7208 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7209 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7210 			    | RXDCTL_WTHRESH(1));
   7211 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7212 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7213 		} else {
   7214 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7215 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7216 			/* XXX should update with AIM? */
   7217 			CSR_WRITE(sc, WMREG_RDTR,
   7218 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7219 			/* MUST be same */
   7220 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7221 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7222 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7223 		}
   7224 	}
   7225 }
   7226 
   7227 static int
   7228 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7229 {
   7230 	struct wm_rxsoft *rxs;
   7231 	int error, i;
   7232 
   7233 	KASSERT(mutex_owned(rxq->rxq_lock));
   7234 
   7235 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7236 		rxs = &rxq->rxq_soft[i];
   7237 		if (rxs->rxs_mbuf == NULL) {
   7238 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7239 				log(LOG_ERR, "%s: unable to allocate or map "
   7240 				    "rx buffer %d, error = %d\n",
   7241 				    device_xname(sc->sc_dev), i, error);
   7242 				/*
   7243 				 * XXX Should attempt to run with fewer receive
   7244 				 * XXX buffers instead of just failing.
   7245 				 */
   7246 				wm_rxdrain(rxq);
   7247 				return ENOMEM;
   7248 			}
   7249 		} else {
   7250 			/*
   7251 			 * For 82575 and 82576, the RX descriptors must be
   7252 			 * initialized after the setting of RCTL.EN in
   7253 			 * wm_set_filter()
   7254 			 */
   7255 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7256 				wm_init_rxdesc(rxq, i);
   7257 		}
   7258 	}
   7259 	rxq->rxq_ptr = 0;
   7260 	rxq->rxq_discard = 0;
   7261 	WM_RXCHAIN_RESET(rxq);
   7262 
   7263 	return 0;
   7264 }
   7265 
   7266 static int
   7267 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7268     struct wm_rxqueue *rxq)
   7269 {
   7270 
   7271 	KASSERT(mutex_owned(rxq->rxq_lock));
   7272 
   7273 	/*
   7274 	 * Set up some register offsets that are different between
   7275 	 * the i82542 and the i82543 and later chips.
   7276 	 */
   7277 	if (sc->sc_type < WM_T_82543)
   7278 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7279 	else
   7280 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7281 
   7282 	wm_init_rx_regs(sc, wmq, rxq);
   7283 	return wm_init_rx_buffer(sc, rxq);
   7284 }
   7285 
   7286 /*
   7287  * wm_init_quques:
   7288  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7289  */
   7290 static int
   7291 wm_init_txrx_queues(struct wm_softc *sc)
   7292 {
   7293 	int i, error = 0;
   7294 
   7295 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7296 		device_xname(sc->sc_dev), __func__));
   7297 
   7298 	for (i = 0; i < sc->sc_nqueues; i++) {
   7299 		struct wm_queue *wmq = &sc->sc_queue[i];
   7300 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7301 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7302 
   7303 		/*
   7304 		 * TODO
   7305 		 * Currently, use constant variable instead of AIM.
   7306 		 * Furthermore, the interrupt interval of multiqueue which use
   7307 		 * polling mode is less than default value.
   7308 		 * More tuning and AIM are required.
   7309 		 */
   7310 		if (wm_is_using_multiqueue(sc))
   7311 			wmq->wmq_itr = 50;
   7312 		else
   7313 			wmq->wmq_itr = sc->sc_itr_init;
   7314 		wmq->wmq_set_itr = true;
   7315 
   7316 		mutex_enter(txq->txq_lock);
   7317 		wm_init_tx_queue(sc, wmq, txq);
   7318 		mutex_exit(txq->txq_lock);
   7319 
   7320 		mutex_enter(rxq->rxq_lock);
   7321 		error = wm_init_rx_queue(sc, wmq, rxq);
   7322 		mutex_exit(rxq->rxq_lock);
   7323 		if (error)
   7324 			break;
   7325 	}
   7326 
   7327 	return error;
   7328 }
   7329 
   7330 /*
   7331  * wm_tx_offload:
   7332  *
   7333  *	Set up TCP/IP checksumming parameters for the
   7334  *	specified packet.
   7335  */
   7336 static int
   7337 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7338     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7339 {
   7340 	struct mbuf *m0 = txs->txs_mbuf;
   7341 	struct livengood_tcpip_ctxdesc *t;
   7342 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7343 	uint32_t ipcse;
   7344 	struct ether_header *eh;
   7345 	int offset, iphl;
   7346 	uint8_t fields;
   7347 
   7348 	/*
   7349 	 * XXX It would be nice if the mbuf pkthdr had offset
   7350 	 * fields for the protocol headers.
   7351 	 */
   7352 
   7353 	eh = mtod(m0, struct ether_header *);
   7354 	switch (htons(eh->ether_type)) {
   7355 	case ETHERTYPE_IP:
   7356 	case ETHERTYPE_IPV6:
   7357 		offset = ETHER_HDR_LEN;
   7358 		break;
   7359 
   7360 	case ETHERTYPE_VLAN:
   7361 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7362 		break;
   7363 
   7364 	default:
   7365 		/* Don't support this protocol or encapsulation. */
   7366 		*fieldsp = 0;
   7367 		*cmdp = 0;
   7368 		return 0;
   7369 	}
   7370 
   7371 	if ((m0->m_pkthdr.csum_flags &
   7372 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7373 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7374 	} else
   7375 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7376 
   7377 	ipcse = offset + iphl - 1;
   7378 
   7379 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7380 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7381 	seg = 0;
   7382 	fields = 0;
   7383 
   7384 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7385 		int hlen = offset + iphl;
   7386 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7387 
   7388 		if (__predict_false(m0->m_len <
   7389 				    (hlen + sizeof(struct tcphdr)))) {
   7390 			/*
   7391 			 * TCP/IP headers are not in the first mbuf; we need
   7392 			 * to do this the slow and painful way. Let's just
   7393 			 * hope this doesn't happen very often.
   7394 			 */
   7395 			struct tcphdr th;
   7396 
   7397 			WM_Q_EVCNT_INCR(txq, tsopain);
   7398 
   7399 			m_copydata(m0, hlen, sizeof(th), &th);
   7400 			if (v4) {
   7401 				struct ip ip;
   7402 
   7403 				m_copydata(m0, offset, sizeof(ip), &ip);
   7404 				ip.ip_len = 0;
   7405 				m_copyback(m0,
   7406 				    offset + offsetof(struct ip, ip_len),
   7407 				    sizeof(ip.ip_len), &ip.ip_len);
   7408 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7409 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7410 			} else {
   7411 				struct ip6_hdr ip6;
   7412 
   7413 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7414 				ip6.ip6_plen = 0;
   7415 				m_copyback(m0,
   7416 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7417 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7418 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7419 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7420 			}
   7421 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7422 			    sizeof(th.th_sum), &th.th_sum);
   7423 
   7424 			hlen += th.th_off << 2;
   7425 		} else {
   7426 			/*
   7427 			 * TCP/IP headers are in the first mbuf; we can do
   7428 			 * this the easy way.
   7429 			 */
   7430 			struct tcphdr *th;
   7431 
   7432 			if (v4) {
   7433 				struct ip *ip =
   7434 				    (void *)(mtod(m0, char *) + offset);
   7435 				th = (void *)(mtod(m0, char *) + hlen);
   7436 
   7437 				ip->ip_len = 0;
   7438 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7439 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7440 			} else {
   7441 				struct ip6_hdr *ip6 =
   7442 				    (void *)(mtod(m0, char *) + offset);
   7443 				th = (void *)(mtod(m0, char *) + hlen);
   7444 
   7445 				ip6->ip6_plen = 0;
   7446 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7447 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7448 			}
   7449 			hlen += th->th_off << 2;
   7450 		}
   7451 
   7452 		if (v4) {
   7453 			WM_Q_EVCNT_INCR(txq, tso);
   7454 			cmdlen |= WTX_TCPIP_CMD_IP;
   7455 		} else {
   7456 			WM_Q_EVCNT_INCR(txq, tso6);
   7457 			ipcse = 0;
   7458 		}
   7459 		cmd |= WTX_TCPIP_CMD_TSE;
   7460 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7461 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7462 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7463 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7464 	}
   7465 
   7466 	/*
   7467 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7468 	 * offload feature, if we load the context descriptor, we
   7469 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7470 	 */
   7471 
   7472 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7473 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7474 	    WTX_TCPIP_IPCSE(ipcse);
   7475 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7476 		WM_Q_EVCNT_INCR(txq, ipsum);
   7477 		fields |= WTX_IXSM;
   7478 	}
   7479 
   7480 	offset += iphl;
   7481 
   7482 	if (m0->m_pkthdr.csum_flags &
   7483 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7484 		WM_Q_EVCNT_INCR(txq, tusum);
   7485 		fields |= WTX_TXSM;
   7486 		tucs = WTX_TCPIP_TUCSS(offset) |
   7487 		    WTX_TCPIP_TUCSO(offset +
   7488 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7489 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7490 	} else if ((m0->m_pkthdr.csum_flags &
   7491 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7492 		WM_Q_EVCNT_INCR(txq, tusum6);
   7493 		fields |= WTX_TXSM;
   7494 		tucs = WTX_TCPIP_TUCSS(offset) |
   7495 		    WTX_TCPIP_TUCSO(offset +
   7496 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7497 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7498 	} else {
   7499 		/* Just initialize it to a valid TCP context. */
   7500 		tucs = WTX_TCPIP_TUCSS(offset) |
   7501 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7502 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7503 	}
   7504 
   7505 	/*
   7506 	 * We don't have to write context descriptor for every packet
   7507 	 * except for 82574. For 82574, we must write context descriptor
   7508 	 * for every packet when we use two descriptor queues.
   7509 	 * It would be overhead to write context descriptor for every packet,
   7510 	 * however it does not cause problems.
   7511 	 */
   7512 	/* Fill in the context descriptor. */
   7513 	t = (struct livengood_tcpip_ctxdesc *)
   7514 	    &txq->txq_descs[txq->txq_next];
   7515 	t->tcpip_ipcs = htole32(ipcs);
   7516 	t->tcpip_tucs = htole32(tucs);
   7517 	t->tcpip_cmdlen = htole32(cmdlen);
   7518 	t->tcpip_seg = htole32(seg);
   7519 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7520 
   7521 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7522 	txs->txs_ndesc++;
   7523 
   7524 	*cmdp = cmd;
   7525 	*fieldsp = fields;
   7526 
   7527 	return 0;
   7528 }
   7529 
   7530 static inline int
   7531 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7532 {
   7533 	struct wm_softc *sc = ifp->if_softc;
   7534 	u_int cpuid = cpu_index(curcpu());
   7535 
   7536 	/*
   7537 	 * Currently, simple distribute strategy.
   7538 	 * TODO:
   7539 	 * distribute by flowid(RSS has value).
   7540 	 */
   7541 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7542 }
   7543 
   7544 /*
   7545  * wm_start:		[ifnet interface function]
   7546  *
   7547  *	Start packet transmission on the interface.
   7548  */
   7549 static void
   7550 wm_start(struct ifnet *ifp)
   7551 {
   7552 	struct wm_softc *sc = ifp->if_softc;
   7553 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7554 
   7555 #ifdef WM_MPSAFE
   7556 	KASSERT(if_is_mpsafe(ifp));
   7557 #endif
   7558 	/*
   7559 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   7560 	 */
   7561 
   7562 	mutex_enter(txq->txq_lock);
   7563 	if (!txq->txq_stopping)
   7564 		wm_start_locked(ifp);
   7565 	mutex_exit(txq->txq_lock);
   7566 }
   7567 
   7568 static void
   7569 wm_start_locked(struct ifnet *ifp)
   7570 {
   7571 	struct wm_softc *sc = ifp->if_softc;
   7572 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7573 
   7574 	wm_send_common_locked(ifp, txq, false);
   7575 }
   7576 
   7577 static int
   7578 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7579 {
   7580 	int qid;
   7581 	struct wm_softc *sc = ifp->if_softc;
   7582 	struct wm_txqueue *txq;
   7583 
   7584 	qid = wm_select_txqueue(ifp, m);
   7585 	txq = &sc->sc_queue[qid].wmq_txq;
   7586 
   7587 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7588 		m_freem(m);
   7589 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7590 		return ENOBUFS;
   7591 	}
   7592 
   7593 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   7594 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   7595 	if (m->m_flags & M_MCAST)
   7596 		if_statinc_ref(nsr, if_omcasts);
   7597 	IF_STAT_PUTREF(ifp);
   7598 
   7599 	if (mutex_tryenter(txq->txq_lock)) {
   7600 		if (!txq->txq_stopping)
   7601 			wm_transmit_locked(ifp, txq);
   7602 		mutex_exit(txq->txq_lock);
   7603 	}
   7604 
   7605 	return 0;
   7606 }
   7607 
   7608 static void
   7609 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7610 {
   7611 
   7612 	wm_send_common_locked(ifp, txq, true);
   7613 }
   7614 
   7615 static void
   7616 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7617     bool is_transmit)
   7618 {
   7619 	struct wm_softc *sc = ifp->if_softc;
   7620 	struct mbuf *m0;
   7621 	struct wm_txsoft *txs;
   7622 	bus_dmamap_t dmamap;
   7623 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7624 	bus_addr_t curaddr;
   7625 	bus_size_t seglen, curlen;
   7626 	uint32_t cksumcmd;
   7627 	uint8_t cksumfields;
   7628 	bool remap = true;
   7629 
   7630 	KASSERT(mutex_owned(txq->txq_lock));
   7631 
   7632 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7633 		return;
   7634 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7635 		return;
   7636 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7637 		return;
   7638 
   7639 	/* Remember the previous number of free descriptors. */
   7640 	ofree = txq->txq_free;
   7641 
   7642 	/*
   7643 	 * Loop through the send queue, setting up transmit descriptors
   7644 	 * until we drain the queue, or use up all available transmit
   7645 	 * descriptors.
   7646 	 */
   7647 	for (;;) {
   7648 		m0 = NULL;
   7649 
   7650 		/* Get a work queue entry. */
   7651 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7652 			wm_txeof(txq, UINT_MAX);
   7653 			if (txq->txq_sfree == 0) {
   7654 				DPRINTF(WM_DEBUG_TX,
   7655 				    ("%s: TX: no free job descriptors\n",
   7656 					device_xname(sc->sc_dev)));
   7657 				WM_Q_EVCNT_INCR(txq, txsstall);
   7658 				break;
   7659 			}
   7660 		}
   7661 
   7662 		/* Grab a packet off the queue. */
   7663 		if (is_transmit)
   7664 			m0 = pcq_get(txq->txq_interq);
   7665 		else
   7666 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7667 		if (m0 == NULL)
   7668 			break;
   7669 
   7670 		DPRINTF(WM_DEBUG_TX,
   7671 		    ("%s: TX: have packet to transmit: %p\n",
   7672 			device_xname(sc->sc_dev), m0));
   7673 
   7674 		txs = &txq->txq_soft[txq->txq_snext];
   7675 		dmamap = txs->txs_dmamap;
   7676 
   7677 		use_tso = (m0->m_pkthdr.csum_flags &
   7678 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7679 
   7680 		/*
   7681 		 * So says the Linux driver:
   7682 		 * The controller does a simple calculation to make sure
   7683 		 * there is enough room in the FIFO before initiating the
   7684 		 * DMA for each buffer. The calc is:
   7685 		 *	4 = ceil(buffer len / MSS)
   7686 		 * To make sure we don't overrun the FIFO, adjust the max
   7687 		 * buffer len if the MSS drops.
   7688 		 */
   7689 		dmamap->dm_maxsegsz =
   7690 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7691 		    ? m0->m_pkthdr.segsz << 2
   7692 		    : WTX_MAX_LEN;
   7693 
   7694 		/*
   7695 		 * Load the DMA map.  If this fails, the packet either
   7696 		 * didn't fit in the allotted number of segments, or we
   7697 		 * were short on resources.  For the too-many-segments
   7698 		 * case, we simply report an error and drop the packet,
   7699 		 * since we can't sanely copy a jumbo packet to a single
   7700 		 * buffer.
   7701 		 */
   7702 retry:
   7703 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7704 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7705 		if (__predict_false(error)) {
   7706 			if (error == EFBIG) {
   7707 				if (remap == true) {
   7708 					struct mbuf *m;
   7709 
   7710 					remap = false;
   7711 					m = m_defrag(m0, M_NOWAIT);
   7712 					if (m != NULL) {
   7713 						WM_Q_EVCNT_INCR(txq, defrag);
   7714 						m0 = m;
   7715 						goto retry;
   7716 					}
   7717 				}
   7718 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7719 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7720 				    "DMA segments, dropping...\n",
   7721 				    device_xname(sc->sc_dev));
   7722 				wm_dump_mbuf_chain(sc, m0);
   7723 				m_freem(m0);
   7724 				continue;
   7725 			}
   7726 			/* Short on resources, just stop for now. */
   7727 			DPRINTF(WM_DEBUG_TX,
   7728 			    ("%s: TX: dmamap load failed: %d\n",
   7729 				device_xname(sc->sc_dev), error));
   7730 			break;
   7731 		}
   7732 
   7733 		segs_needed = dmamap->dm_nsegs;
   7734 		if (use_tso) {
   7735 			/* For sentinel descriptor; see below. */
   7736 			segs_needed++;
   7737 		}
   7738 
   7739 		/*
   7740 		 * Ensure we have enough descriptors free to describe
   7741 		 * the packet. Note, we always reserve one descriptor
   7742 		 * at the end of the ring due to the semantics of the
   7743 		 * TDT register, plus one more in the event we need
   7744 		 * to load offload context.
   7745 		 */
   7746 		if (segs_needed > txq->txq_free - 2) {
   7747 			/*
   7748 			 * Not enough free descriptors to transmit this
   7749 			 * packet.  We haven't committed anything yet,
   7750 			 * so just unload the DMA map, put the packet
   7751 			 * pack on the queue, and punt. Notify the upper
   7752 			 * layer that there are no more slots left.
   7753 			 */
   7754 			DPRINTF(WM_DEBUG_TX,
   7755 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7756 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7757 				segs_needed, txq->txq_free - 1));
   7758 			if (!is_transmit)
   7759 				ifp->if_flags |= IFF_OACTIVE;
   7760 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7761 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7762 			WM_Q_EVCNT_INCR(txq, txdstall);
   7763 			break;
   7764 		}
   7765 
   7766 		/*
   7767 		 * Check for 82547 Tx FIFO bug. We need to do this
   7768 		 * once we know we can transmit the packet, since we
   7769 		 * do some internal FIFO space accounting here.
   7770 		 */
   7771 		if (sc->sc_type == WM_T_82547 &&
   7772 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7773 			DPRINTF(WM_DEBUG_TX,
   7774 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7775 				device_xname(sc->sc_dev)));
   7776 			if (!is_transmit)
   7777 				ifp->if_flags |= IFF_OACTIVE;
   7778 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7779 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7780 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7781 			break;
   7782 		}
   7783 
   7784 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7785 
   7786 		DPRINTF(WM_DEBUG_TX,
   7787 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7788 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7789 
   7790 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7791 
   7792 		/*
   7793 		 * Store a pointer to the packet so that we can free it
   7794 		 * later.
   7795 		 *
   7796 		 * Initially, we consider the number of descriptors the
   7797 		 * packet uses the number of DMA segments.  This may be
   7798 		 * incremented by 1 if we do checksum offload (a descriptor
   7799 		 * is used to set the checksum context).
   7800 		 */
   7801 		txs->txs_mbuf = m0;
   7802 		txs->txs_firstdesc = txq->txq_next;
   7803 		txs->txs_ndesc = segs_needed;
   7804 
   7805 		/* Set up offload parameters for this packet. */
   7806 		if (m0->m_pkthdr.csum_flags &
   7807 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7808 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7809 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7810 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7811 					  &cksumfields) != 0) {
   7812 				/* Error message already displayed. */
   7813 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7814 				continue;
   7815 			}
   7816 		} else {
   7817 			cksumcmd = 0;
   7818 			cksumfields = 0;
   7819 		}
   7820 
   7821 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7822 
   7823 		/* Sync the DMA map. */
   7824 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7825 		    BUS_DMASYNC_PREWRITE);
   7826 
   7827 		/* Initialize the transmit descriptor. */
   7828 		for (nexttx = txq->txq_next, seg = 0;
   7829 		     seg < dmamap->dm_nsegs; seg++) {
   7830 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7831 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7832 			     seglen != 0;
   7833 			     curaddr += curlen, seglen -= curlen,
   7834 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7835 				curlen = seglen;
   7836 
   7837 				/*
   7838 				 * So says the Linux driver:
   7839 				 * Work around for premature descriptor
   7840 				 * write-backs in TSO mode.  Append a
   7841 				 * 4-byte sentinel descriptor.
   7842 				 */
   7843 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7844 				    curlen > 8)
   7845 					curlen -= 4;
   7846 
   7847 				wm_set_dma_addr(
   7848 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7849 				txq->txq_descs[nexttx].wtx_cmdlen
   7850 				    = htole32(cksumcmd | curlen);
   7851 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7852 				    = 0;
   7853 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7854 				    = cksumfields;
   7855 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7856 				lasttx = nexttx;
   7857 
   7858 				DPRINTF(WM_DEBUG_TX,
   7859 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7860 					"len %#04zx\n",
   7861 					device_xname(sc->sc_dev), nexttx,
   7862 					(uint64_t)curaddr, curlen));
   7863 			}
   7864 		}
   7865 
   7866 		KASSERT(lasttx != -1);
   7867 
   7868 		/*
   7869 		 * Set up the command byte on the last descriptor of
   7870 		 * the packet. If we're in the interrupt delay window,
   7871 		 * delay the interrupt.
   7872 		 */
   7873 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7874 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7875 
   7876 		/*
   7877 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7878 		 * up the descriptor to encapsulate the packet for us.
   7879 		 *
   7880 		 * This is only valid on the last descriptor of the packet.
   7881 		 */
   7882 		if (vlan_has_tag(m0)) {
   7883 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7884 			    htole32(WTX_CMD_VLE);
   7885 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7886 			    = htole16(vlan_get_tag(m0));
   7887 		}
   7888 
   7889 		txs->txs_lastdesc = lasttx;
   7890 
   7891 		DPRINTF(WM_DEBUG_TX,
   7892 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7893 			device_xname(sc->sc_dev),
   7894 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7895 
   7896 		/* Sync the descriptors we're using. */
   7897 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7898 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7899 
   7900 		/* Give the packet to the chip. */
   7901 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7902 
   7903 		DPRINTF(WM_DEBUG_TX,
   7904 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7905 
   7906 		DPRINTF(WM_DEBUG_TX,
   7907 		    ("%s: TX: finished transmitting packet, job %d\n",
   7908 			device_xname(sc->sc_dev), txq->txq_snext));
   7909 
   7910 		/* Advance the tx pointer. */
   7911 		txq->txq_free -= txs->txs_ndesc;
   7912 		txq->txq_next = nexttx;
   7913 
   7914 		txq->txq_sfree--;
   7915 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7916 
   7917 		/* Pass the packet to any BPF listeners. */
   7918 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7919 	}
   7920 
   7921 	if (m0 != NULL) {
   7922 		if (!is_transmit)
   7923 			ifp->if_flags |= IFF_OACTIVE;
   7924 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7925 		WM_Q_EVCNT_INCR(txq, descdrop);
   7926 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7927 			__func__));
   7928 		m_freem(m0);
   7929 	}
   7930 
   7931 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7932 		/* No more slots; notify upper layer. */
   7933 		if (!is_transmit)
   7934 			ifp->if_flags |= IFF_OACTIVE;
   7935 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7936 	}
   7937 
   7938 	if (txq->txq_free != ofree) {
   7939 		/* Set a watchdog timer in case the chip flakes out. */
   7940 		txq->txq_lastsent = time_uptime;
   7941 		txq->txq_sending = true;
   7942 	}
   7943 }
   7944 
   7945 /*
   7946  * wm_nq_tx_offload:
   7947  *
   7948  *	Set up TCP/IP checksumming parameters for the
   7949  *	specified packet, for NEWQUEUE devices
   7950  */
   7951 static int
   7952 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7953     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7954 {
   7955 	struct mbuf *m0 = txs->txs_mbuf;
   7956 	uint32_t vl_len, mssidx, cmdc;
   7957 	struct ether_header *eh;
   7958 	int offset, iphl;
   7959 
   7960 	/*
   7961 	 * XXX It would be nice if the mbuf pkthdr had offset
   7962 	 * fields for the protocol headers.
   7963 	 */
   7964 	*cmdlenp = 0;
   7965 	*fieldsp = 0;
   7966 
   7967 	eh = mtod(m0, struct ether_header *);
   7968 	switch (htons(eh->ether_type)) {
   7969 	case ETHERTYPE_IP:
   7970 	case ETHERTYPE_IPV6:
   7971 		offset = ETHER_HDR_LEN;
   7972 		break;
   7973 
   7974 	case ETHERTYPE_VLAN:
   7975 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7976 		break;
   7977 
   7978 	default:
   7979 		/* Don't support this protocol or encapsulation. */
   7980 		*do_csum = false;
   7981 		return 0;
   7982 	}
   7983 	*do_csum = true;
   7984 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7985 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7986 
   7987 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7988 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7989 
   7990 	if ((m0->m_pkthdr.csum_flags &
   7991 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7992 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7993 	} else {
   7994 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7995 	}
   7996 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7997 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7998 
   7999 	if (vlan_has_tag(m0)) {
   8000 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8001 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8002 		*cmdlenp |= NQTX_CMD_VLE;
   8003 	}
   8004 
   8005 	mssidx = 0;
   8006 
   8007 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8008 		int hlen = offset + iphl;
   8009 		int tcp_hlen;
   8010 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8011 
   8012 		if (__predict_false(m0->m_len <
   8013 				    (hlen + sizeof(struct tcphdr)))) {
   8014 			/*
   8015 			 * TCP/IP headers are not in the first mbuf; we need
   8016 			 * to do this the slow and painful way. Let's just
   8017 			 * hope this doesn't happen very often.
   8018 			 */
   8019 			struct tcphdr th;
   8020 
   8021 			WM_Q_EVCNT_INCR(txq, tsopain);
   8022 
   8023 			m_copydata(m0, hlen, sizeof(th), &th);
   8024 			if (v4) {
   8025 				struct ip ip;
   8026 
   8027 				m_copydata(m0, offset, sizeof(ip), &ip);
   8028 				ip.ip_len = 0;
   8029 				m_copyback(m0,
   8030 				    offset + offsetof(struct ip, ip_len),
   8031 				    sizeof(ip.ip_len), &ip.ip_len);
   8032 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8033 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8034 			} else {
   8035 				struct ip6_hdr ip6;
   8036 
   8037 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8038 				ip6.ip6_plen = 0;
   8039 				m_copyback(m0,
   8040 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8041 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8042 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8043 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8044 			}
   8045 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8046 			    sizeof(th.th_sum), &th.th_sum);
   8047 
   8048 			tcp_hlen = th.th_off << 2;
   8049 		} else {
   8050 			/*
   8051 			 * TCP/IP headers are in the first mbuf; we can do
   8052 			 * this the easy way.
   8053 			 */
   8054 			struct tcphdr *th;
   8055 
   8056 			if (v4) {
   8057 				struct ip *ip =
   8058 				    (void *)(mtod(m0, char *) + offset);
   8059 				th = (void *)(mtod(m0, char *) + hlen);
   8060 
   8061 				ip->ip_len = 0;
   8062 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8063 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8064 			} else {
   8065 				struct ip6_hdr *ip6 =
   8066 				    (void *)(mtod(m0, char *) + offset);
   8067 				th = (void *)(mtod(m0, char *) + hlen);
   8068 
   8069 				ip6->ip6_plen = 0;
   8070 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8071 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8072 			}
   8073 			tcp_hlen = th->th_off << 2;
   8074 		}
   8075 		hlen += tcp_hlen;
   8076 		*cmdlenp |= NQTX_CMD_TSE;
   8077 
   8078 		if (v4) {
   8079 			WM_Q_EVCNT_INCR(txq, tso);
   8080 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8081 		} else {
   8082 			WM_Q_EVCNT_INCR(txq, tso6);
   8083 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8084 		}
   8085 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8086 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8087 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8088 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8089 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8090 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8091 	} else {
   8092 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8093 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8094 	}
   8095 
   8096 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8097 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8098 		cmdc |= NQTXC_CMD_IP4;
   8099 	}
   8100 
   8101 	if (m0->m_pkthdr.csum_flags &
   8102 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8103 		WM_Q_EVCNT_INCR(txq, tusum);
   8104 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8105 			cmdc |= NQTXC_CMD_TCP;
   8106 		else
   8107 			cmdc |= NQTXC_CMD_UDP;
   8108 
   8109 		cmdc |= NQTXC_CMD_IP4;
   8110 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8111 	}
   8112 	if (m0->m_pkthdr.csum_flags &
   8113 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8114 		WM_Q_EVCNT_INCR(txq, tusum6);
   8115 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8116 			cmdc |= NQTXC_CMD_TCP;
   8117 		else
   8118 			cmdc |= NQTXC_CMD_UDP;
   8119 
   8120 		cmdc |= NQTXC_CMD_IP6;
   8121 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8122 	}
   8123 
   8124 	/*
   8125 	 * We don't have to write context descriptor for every packet to
   8126 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8127 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8128 	 * controllers.
   8129 	 * It would be overhead to write context descriptor for every packet,
   8130 	 * however it does not cause problems.
   8131 	 */
   8132 	/* Fill in the context descriptor. */
   8133 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8134 	    htole32(vl_len);
   8135 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8136 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8137 	    htole32(cmdc);
   8138 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8139 	    htole32(mssidx);
   8140 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8141 	DPRINTF(WM_DEBUG_TX,
   8142 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8143 		txq->txq_next, 0, vl_len));
   8144 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8145 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8146 	txs->txs_ndesc++;
   8147 	return 0;
   8148 }
   8149 
   8150 /*
   8151  * wm_nq_start:		[ifnet interface function]
   8152  *
   8153  *	Start packet transmission on the interface for NEWQUEUE devices
   8154  */
   8155 static void
   8156 wm_nq_start(struct ifnet *ifp)
   8157 {
   8158 	struct wm_softc *sc = ifp->if_softc;
   8159 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8160 
   8161 #ifdef WM_MPSAFE
   8162 	KASSERT(if_is_mpsafe(ifp));
   8163 #endif
   8164 	/*
   8165 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8166 	 */
   8167 
   8168 	mutex_enter(txq->txq_lock);
   8169 	if (!txq->txq_stopping)
   8170 		wm_nq_start_locked(ifp);
   8171 	mutex_exit(txq->txq_lock);
   8172 }
   8173 
   8174 static void
   8175 wm_nq_start_locked(struct ifnet *ifp)
   8176 {
   8177 	struct wm_softc *sc = ifp->if_softc;
   8178 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8179 
   8180 	wm_nq_send_common_locked(ifp, txq, false);
   8181 }
   8182 
   8183 static int
   8184 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8185 {
   8186 	int qid;
   8187 	struct wm_softc *sc = ifp->if_softc;
   8188 	struct wm_txqueue *txq;
   8189 
   8190 	qid = wm_select_txqueue(ifp, m);
   8191 	txq = &sc->sc_queue[qid].wmq_txq;
   8192 
   8193 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8194 		m_freem(m);
   8195 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8196 		return ENOBUFS;
   8197 	}
   8198 
   8199 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8200 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8201 	if (m->m_flags & M_MCAST)
   8202 		if_statinc_ref(nsr, if_omcasts);
   8203 	IF_STAT_PUTREF(ifp);
   8204 
   8205 	/*
   8206 	 * The situations which this mutex_tryenter() fails at running time
   8207 	 * are below two patterns.
   8208 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8209 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8210 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8211 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8212 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8213 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8214 	 * stuck, either.
   8215 	 */
   8216 	if (mutex_tryenter(txq->txq_lock)) {
   8217 		if (!txq->txq_stopping)
   8218 			wm_nq_transmit_locked(ifp, txq);
   8219 		mutex_exit(txq->txq_lock);
   8220 	}
   8221 
   8222 	return 0;
   8223 }
   8224 
   8225 static void
   8226 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8227 {
   8228 
   8229 	wm_nq_send_common_locked(ifp, txq, true);
   8230 }
   8231 
   8232 static void
   8233 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8234     bool is_transmit)
   8235 {
   8236 	struct wm_softc *sc = ifp->if_softc;
   8237 	struct mbuf *m0;
   8238 	struct wm_txsoft *txs;
   8239 	bus_dmamap_t dmamap;
   8240 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8241 	bool do_csum, sent;
   8242 	bool remap = true;
   8243 
   8244 	KASSERT(mutex_owned(txq->txq_lock));
   8245 
   8246 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8247 		return;
   8248 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   8249 		return;
   8250 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8251 		return;
   8252 
   8253 	sent = false;
   8254 
   8255 	/*
   8256 	 * Loop through the send queue, setting up transmit descriptors
   8257 	 * until we drain the queue, or use up all available transmit
   8258 	 * descriptors.
   8259 	 */
   8260 	for (;;) {
   8261 		m0 = NULL;
   8262 
   8263 		/* Get a work queue entry. */
   8264 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8265 			wm_txeof(txq, UINT_MAX);
   8266 			if (txq->txq_sfree == 0) {
   8267 				DPRINTF(WM_DEBUG_TX,
   8268 				    ("%s: TX: no free job descriptors\n",
   8269 					device_xname(sc->sc_dev)));
   8270 				WM_Q_EVCNT_INCR(txq, txsstall);
   8271 				break;
   8272 			}
   8273 		}
   8274 
   8275 		/* Grab a packet off the queue. */
   8276 		if (is_transmit)
   8277 			m0 = pcq_get(txq->txq_interq);
   8278 		else
   8279 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8280 		if (m0 == NULL)
   8281 			break;
   8282 
   8283 		DPRINTF(WM_DEBUG_TX,
   8284 		    ("%s: TX: have packet to transmit: %p\n",
   8285 		    device_xname(sc->sc_dev), m0));
   8286 
   8287 		txs = &txq->txq_soft[txq->txq_snext];
   8288 		dmamap = txs->txs_dmamap;
   8289 
   8290 		/*
   8291 		 * Load the DMA map.  If this fails, the packet either
   8292 		 * didn't fit in the allotted number of segments, or we
   8293 		 * were short on resources.  For the too-many-segments
   8294 		 * case, we simply report an error and drop the packet,
   8295 		 * since we can't sanely copy a jumbo packet to a single
   8296 		 * buffer.
   8297 		 */
   8298 retry:
   8299 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8300 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8301 		if (__predict_false(error)) {
   8302 			if (error == EFBIG) {
   8303 				if (remap == true) {
   8304 					struct mbuf *m;
   8305 
   8306 					remap = false;
   8307 					m = m_defrag(m0, M_NOWAIT);
   8308 					if (m != NULL) {
   8309 						WM_Q_EVCNT_INCR(txq, defrag);
   8310 						m0 = m;
   8311 						goto retry;
   8312 					}
   8313 				}
   8314 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8315 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8316 				    "DMA segments, dropping...\n",
   8317 				    device_xname(sc->sc_dev));
   8318 				wm_dump_mbuf_chain(sc, m0);
   8319 				m_freem(m0);
   8320 				continue;
   8321 			}
   8322 			/* Short on resources, just stop for now. */
   8323 			DPRINTF(WM_DEBUG_TX,
   8324 			    ("%s: TX: dmamap load failed: %d\n",
   8325 				device_xname(sc->sc_dev), error));
   8326 			break;
   8327 		}
   8328 
   8329 		segs_needed = dmamap->dm_nsegs;
   8330 
   8331 		/*
   8332 		 * Ensure we have enough descriptors free to describe
   8333 		 * the packet. Note, we always reserve one descriptor
   8334 		 * at the end of the ring due to the semantics of the
   8335 		 * TDT register, plus one more in the event we need
   8336 		 * to load offload context.
   8337 		 */
   8338 		if (segs_needed > txq->txq_free - 2) {
   8339 			/*
   8340 			 * Not enough free descriptors to transmit this
   8341 			 * packet.  We haven't committed anything yet,
   8342 			 * so just unload the DMA map, put the packet
   8343 			 * pack on the queue, and punt. Notify the upper
   8344 			 * layer that there are no more slots left.
   8345 			 */
   8346 			DPRINTF(WM_DEBUG_TX,
   8347 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8348 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8349 				segs_needed, txq->txq_free - 1));
   8350 			if (!is_transmit)
   8351 				ifp->if_flags |= IFF_OACTIVE;
   8352 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8353 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8354 			WM_Q_EVCNT_INCR(txq, txdstall);
   8355 			break;
   8356 		}
   8357 
   8358 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8359 
   8360 		DPRINTF(WM_DEBUG_TX,
   8361 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8362 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8363 
   8364 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8365 
   8366 		/*
   8367 		 * Store a pointer to the packet so that we can free it
   8368 		 * later.
   8369 		 *
   8370 		 * Initially, we consider the number of descriptors the
   8371 		 * packet uses the number of DMA segments.  This may be
   8372 		 * incremented by 1 if we do checksum offload (a descriptor
   8373 		 * is used to set the checksum context).
   8374 		 */
   8375 		txs->txs_mbuf = m0;
   8376 		txs->txs_firstdesc = txq->txq_next;
   8377 		txs->txs_ndesc = segs_needed;
   8378 
   8379 		/* Set up offload parameters for this packet. */
   8380 		uint32_t cmdlen, fields, dcmdlen;
   8381 		if (m0->m_pkthdr.csum_flags &
   8382 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8383 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8384 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8385 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8386 			    &do_csum) != 0) {
   8387 				/* Error message already displayed. */
   8388 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   8389 				continue;
   8390 			}
   8391 		} else {
   8392 			do_csum = false;
   8393 			cmdlen = 0;
   8394 			fields = 0;
   8395 		}
   8396 
   8397 		/* Sync the DMA map. */
   8398 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8399 		    BUS_DMASYNC_PREWRITE);
   8400 
   8401 		/* Initialize the first transmit descriptor. */
   8402 		nexttx = txq->txq_next;
   8403 		if (!do_csum) {
   8404 			/* Setup a legacy descriptor */
   8405 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8406 			    dmamap->dm_segs[0].ds_addr);
   8407 			txq->txq_descs[nexttx].wtx_cmdlen =
   8408 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8409 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8410 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8411 			if (vlan_has_tag(m0)) {
   8412 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8413 				    htole32(WTX_CMD_VLE);
   8414 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8415 				    htole16(vlan_get_tag(m0));
   8416 			} else
   8417 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8418 
   8419 			dcmdlen = 0;
   8420 		} else {
   8421 			/* Setup an advanced data descriptor */
   8422 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8423 			    htole64(dmamap->dm_segs[0].ds_addr);
   8424 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8425 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8426 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   8427 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8428 			    htole32(fields);
   8429 			DPRINTF(WM_DEBUG_TX,
   8430 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8431 				device_xname(sc->sc_dev), nexttx,
   8432 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8433 			DPRINTF(WM_DEBUG_TX,
   8434 			    ("\t 0x%08x%08x\n", fields,
   8435 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8436 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8437 		}
   8438 
   8439 		lasttx = nexttx;
   8440 		nexttx = WM_NEXTTX(txq, nexttx);
   8441 		/*
   8442 		 * Fill in the next descriptors. legacy or advanced format
   8443 		 * is the same here
   8444 		 */
   8445 		for (seg = 1; seg < dmamap->dm_nsegs;
   8446 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8447 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8448 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8449 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8450 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8451 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8452 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8453 			lasttx = nexttx;
   8454 
   8455 			DPRINTF(WM_DEBUG_TX,
   8456 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8457 				device_xname(sc->sc_dev), nexttx,
   8458 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8459 				dmamap->dm_segs[seg].ds_len));
   8460 		}
   8461 
   8462 		KASSERT(lasttx != -1);
   8463 
   8464 		/*
   8465 		 * Set up the command byte on the last descriptor of
   8466 		 * the packet. If we're in the interrupt delay window,
   8467 		 * delay the interrupt.
   8468 		 */
   8469 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8470 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8471 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8472 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8473 
   8474 		txs->txs_lastdesc = lasttx;
   8475 
   8476 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8477 		    device_xname(sc->sc_dev),
   8478 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8479 
   8480 		/* Sync the descriptors we're using. */
   8481 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8482 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8483 
   8484 		/* Give the packet to the chip. */
   8485 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8486 		sent = true;
   8487 
   8488 		DPRINTF(WM_DEBUG_TX,
   8489 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8490 
   8491 		DPRINTF(WM_DEBUG_TX,
   8492 		    ("%s: TX: finished transmitting packet, job %d\n",
   8493 			device_xname(sc->sc_dev), txq->txq_snext));
   8494 
   8495 		/* Advance the tx pointer. */
   8496 		txq->txq_free -= txs->txs_ndesc;
   8497 		txq->txq_next = nexttx;
   8498 
   8499 		txq->txq_sfree--;
   8500 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8501 
   8502 		/* Pass the packet to any BPF listeners. */
   8503 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8504 	}
   8505 
   8506 	if (m0 != NULL) {
   8507 		if (!is_transmit)
   8508 			ifp->if_flags |= IFF_OACTIVE;
   8509 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8510 		WM_Q_EVCNT_INCR(txq, descdrop);
   8511 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8512 			__func__));
   8513 		m_freem(m0);
   8514 	}
   8515 
   8516 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8517 		/* No more slots; notify upper layer. */
   8518 		if (!is_transmit)
   8519 			ifp->if_flags |= IFF_OACTIVE;
   8520 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8521 	}
   8522 
   8523 	if (sent) {
   8524 		/* Set a watchdog timer in case the chip flakes out. */
   8525 		txq->txq_lastsent = time_uptime;
   8526 		txq->txq_sending = true;
   8527 	}
   8528 }
   8529 
   8530 static void
   8531 wm_deferred_start_locked(struct wm_txqueue *txq)
   8532 {
   8533 	struct wm_softc *sc = txq->txq_sc;
   8534 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8535 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8536 	int qid = wmq->wmq_id;
   8537 
   8538 	KASSERT(mutex_owned(txq->txq_lock));
   8539 
   8540 	if (txq->txq_stopping) {
   8541 		mutex_exit(txq->txq_lock);
   8542 		return;
   8543 	}
   8544 
   8545 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8546 		/* XXX need for ALTQ or one CPU system */
   8547 		if (qid == 0)
   8548 			wm_nq_start_locked(ifp);
   8549 		wm_nq_transmit_locked(ifp, txq);
   8550 	} else {
   8551 		/* XXX need for ALTQ or one CPU system */
   8552 		if (qid == 0)
   8553 			wm_start_locked(ifp);
   8554 		wm_transmit_locked(ifp, txq);
   8555 	}
   8556 }
   8557 
   8558 /* Interrupt */
   8559 
   8560 /*
   8561  * wm_txeof:
   8562  *
   8563  *	Helper; handle transmit interrupts.
   8564  */
   8565 static bool
   8566 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8567 {
   8568 	struct wm_softc *sc = txq->txq_sc;
   8569 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8570 	struct wm_txsoft *txs;
   8571 	int count = 0;
   8572 	int i;
   8573 	uint8_t status;
   8574 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8575 	bool more = false;
   8576 
   8577 	KASSERT(mutex_owned(txq->txq_lock));
   8578 
   8579 	if (txq->txq_stopping)
   8580 		return false;
   8581 
   8582 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8583 	/* For ALTQ and legacy(not use multiqueue) ethernet controller */
   8584 	if (wmq->wmq_id == 0)
   8585 		ifp->if_flags &= ~IFF_OACTIVE;
   8586 
   8587 	/*
   8588 	 * Go through the Tx list and free mbufs for those
   8589 	 * frames which have been transmitted.
   8590 	 */
   8591 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8592 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8593 		if (limit-- == 0) {
   8594 			more = true;
   8595 			DPRINTF(WM_DEBUG_TX,
   8596 			    ("%s: TX: loop limited, job %d is not processed\n",
   8597 				device_xname(sc->sc_dev), i));
   8598 			break;
   8599 		}
   8600 
   8601 		txs = &txq->txq_soft[i];
   8602 
   8603 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8604 			device_xname(sc->sc_dev), i));
   8605 
   8606 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8607 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8608 
   8609 		status =
   8610 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8611 		if ((status & WTX_ST_DD) == 0) {
   8612 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8613 			    BUS_DMASYNC_PREREAD);
   8614 			break;
   8615 		}
   8616 
   8617 		count++;
   8618 		DPRINTF(WM_DEBUG_TX,
   8619 		    ("%s: TX: job %d done: descs %d..%d\n",
   8620 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8621 		    txs->txs_lastdesc));
   8622 
   8623 		/*
   8624 		 * XXX We should probably be using the statistics
   8625 		 * XXX registers, but I don't know if they exist
   8626 		 * XXX on chips before the i82544.
   8627 		 */
   8628 
   8629 #ifdef WM_EVENT_COUNTERS
   8630 		if (status & WTX_ST_TU)
   8631 			WM_Q_EVCNT_INCR(txq, underrun);
   8632 #endif /* WM_EVENT_COUNTERS */
   8633 
   8634 		/*
   8635 		 * 82574 and newer's document says the status field has neither
   8636 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8637 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8638 		 * Developer's Manual", 82574 datasheet and newer.
   8639 		 *
   8640 		 * XXX I saw the LC bit was set on I218 even though the media
   8641 		 * was full duplex, so the bit might be used for other
   8642 		 * meaning ...(I have no document).
   8643 		 */
   8644 
   8645 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8646 		    && ((sc->sc_type < WM_T_82574)
   8647 			|| (sc->sc_type == WM_T_80003))) {
   8648 			if_statinc(ifp, if_oerrors);
   8649 			if (status & WTX_ST_LC)
   8650 				log(LOG_WARNING, "%s: late collision\n",
   8651 				    device_xname(sc->sc_dev));
   8652 			else if (status & WTX_ST_EC) {
   8653 				if_statadd(ifp, if_collisions,
   8654 				    TX_COLLISION_THRESHOLD + 1);
   8655 				log(LOG_WARNING, "%s: excessive collisions\n",
   8656 				    device_xname(sc->sc_dev));
   8657 			}
   8658 		} else
   8659 			if_statinc(ifp, if_opackets);
   8660 
   8661 		txq->txq_packets++;
   8662 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8663 
   8664 		txq->txq_free += txs->txs_ndesc;
   8665 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8666 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8667 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8668 		m_freem(txs->txs_mbuf);
   8669 		txs->txs_mbuf = NULL;
   8670 	}
   8671 
   8672 	/* Update the dirty transmit buffer pointer. */
   8673 	txq->txq_sdirty = i;
   8674 	DPRINTF(WM_DEBUG_TX,
   8675 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8676 
   8677 	/*
   8678 	 * If there are no more pending transmissions, cancel the watchdog
   8679 	 * timer.
   8680 	 */
   8681 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8682 		txq->txq_sending = false;
   8683 
   8684 	return more;
   8685 }
   8686 
   8687 static inline uint32_t
   8688 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8689 {
   8690 	struct wm_softc *sc = rxq->rxq_sc;
   8691 
   8692 	if (sc->sc_type == WM_T_82574)
   8693 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8694 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8695 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8696 	else
   8697 		return rxq->rxq_descs[idx].wrx_status;
   8698 }
   8699 
   8700 static inline uint32_t
   8701 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8702 {
   8703 	struct wm_softc *sc = rxq->rxq_sc;
   8704 
   8705 	if (sc->sc_type == WM_T_82574)
   8706 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8707 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8708 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8709 	else
   8710 		return rxq->rxq_descs[idx].wrx_errors;
   8711 }
   8712 
   8713 static inline uint16_t
   8714 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8715 {
   8716 	struct wm_softc *sc = rxq->rxq_sc;
   8717 
   8718 	if (sc->sc_type == WM_T_82574)
   8719 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8720 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8721 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8722 	else
   8723 		return rxq->rxq_descs[idx].wrx_special;
   8724 }
   8725 
   8726 static inline int
   8727 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8728 {
   8729 	struct wm_softc *sc = rxq->rxq_sc;
   8730 
   8731 	if (sc->sc_type == WM_T_82574)
   8732 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8733 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8734 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8735 	else
   8736 		return rxq->rxq_descs[idx].wrx_len;
   8737 }
   8738 
   8739 #ifdef WM_DEBUG
   8740 static inline uint32_t
   8741 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8742 {
   8743 	struct wm_softc *sc = rxq->rxq_sc;
   8744 
   8745 	if (sc->sc_type == WM_T_82574)
   8746 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8747 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8748 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8749 	else
   8750 		return 0;
   8751 }
   8752 
   8753 static inline uint8_t
   8754 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8755 {
   8756 	struct wm_softc *sc = rxq->rxq_sc;
   8757 
   8758 	if (sc->sc_type == WM_T_82574)
   8759 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8760 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8761 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8762 	else
   8763 		return 0;
   8764 }
   8765 #endif /* WM_DEBUG */
   8766 
   8767 static inline bool
   8768 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8769     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8770 {
   8771 
   8772 	if (sc->sc_type == WM_T_82574)
   8773 		return (status & ext_bit) != 0;
   8774 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8775 		return (status & nq_bit) != 0;
   8776 	else
   8777 		return (status & legacy_bit) != 0;
   8778 }
   8779 
   8780 static inline bool
   8781 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8782     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8783 {
   8784 
   8785 	if (sc->sc_type == WM_T_82574)
   8786 		return (error & ext_bit) != 0;
   8787 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8788 		return (error & nq_bit) != 0;
   8789 	else
   8790 		return (error & legacy_bit) != 0;
   8791 }
   8792 
   8793 static inline bool
   8794 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8795 {
   8796 
   8797 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8798 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8799 		return true;
   8800 	else
   8801 		return false;
   8802 }
   8803 
   8804 static inline bool
   8805 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8806 {
   8807 	struct wm_softc *sc = rxq->rxq_sc;
   8808 
   8809 	/* XXX missing error bit for newqueue? */
   8810 	if (wm_rxdesc_is_set_error(sc, errors,
   8811 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8812 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8813 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8814 		NQRXC_ERROR_RXE)) {
   8815 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8816 		    EXTRXC_ERROR_SE, 0))
   8817 			log(LOG_WARNING, "%s: symbol error\n",
   8818 			    device_xname(sc->sc_dev));
   8819 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8820 		    EXTRXC_ERROR_SEQ, 0))
   8821 			log(LOG_WARNING, "%s: receive sequence error\n",
   8822 			    device_xname(sc->sc_dev));
   8823 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8824 		    EXTRXC_ERROR_CE, 0))
   8825 			log(LOG_WARNING, "%s: CRC error\n",
   8826 			    device_xname(sc->sc_dev));
   8827 		return true;
   8828 	}
   8829 
   8830 	return false;
   8831 }
   8832 
   8833 static inline bool
   8834 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8835 {
   8836 	struct wm_softc *sc = rxq->rxq_sc;
   8837 
   8838 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8839 		NQRXC_STATUS_DD)) {
   8840 		/* We have processed all of the receive descriptors. */
   8841 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8842 		return false;
   8843 	}
   8844 
   8845 	return true;
   8846 }
   8847 
   8848 static inline bool
   8849 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8850     uint16_t vlantag, struct mbuf *m)
   8851 {
   8852 
   8853 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8854 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8855 		vlan_set_tag(m, le16toh(vlantag));
   8856 	}
   8857 
   8858 	return true;
   8859 }
   8860 
   8861 static inline void
   8862 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8863     uint32_t errors, struct mbuf *m)
   8864 {
   8865 	struct wm_softc *sc = rxq->rxq_sc;
   8866 
   8867 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8868 		if (wm_rxdesc_is_set_status(sc, status,
   8869 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8870 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8871 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8872 			if (wm_rxdesc_is_set_error(sc, errors,
   8873 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8874 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8875 		}
   8876 		if (wm_rxdesc_is_set_status(sc, status,
   8877 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8878 			/*
   8879 			 * Note: we don't know if this was TCP or UDP,
   8880 			 * so we just set both bits, and expect the
   8881 			 * upper layers to deal.
   8882 			 */
   8883 			WM_Q_EVCNT_INCR(rxq, tusum);
   8884 			m->m_pkthdr.csum_flags |=
   8885 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8886 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8887 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8888 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8889 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8890 		}
   8891 	}
   8892 }
   8893 
   8894 /*
   8895  * wm_rxeof:
   8896  *
   8897  *	Helper; handle receive interrupts.
   8898  */
   8899 static bool
   8900 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8901 {
   8902 	struct wm_softc *sc = rxq->rxq_sc;
   8903 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8904 	struct wm_rxsoft *rxs;
   8905 	struct mbuf *m;
   8906 	int i, len;
   8907 	int count = 0;
   8908 	uint32_t status, errors;
   8909 	uint16_t vlantag;
   8910 	bool more = false;
   8911 
   8912 	KASSERT(mutex_owned(rxq->rxq_lock));
   8913 
   8914 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8915 		if (limit-- == 0) {
   8916 			rxq->rxq_ptr = i;
   8917 			more = true;
   8918 			DPRINTF(WM_DEBUG_RX,
   8919 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8920 				device_xname(sc->sc_dev), i));
   8921 			break;
   8922 		}
   8923 
   8924 		rxs = &rxq->rxq_soft[i];
   8925 
   8926 		DPRINTF(WM_DEBUG_RX,
   8927 		    ("%s: RX: checking descriptor %d\n",
   8928 			device_xname(sc->sc_dev), i));
   8929 		wm_cdrxsync(rxq, i,
   8930 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8931 
   8932 		status = wm_rxdesc_get_status(rxq, i);
   8933 		errors = wm_rxdesc_get_errors(rxq, i);
   8934 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8935 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8936 #ifdef WM_DEBUG
   8937 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8938 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8939 #endif
   8940 
   8941 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8942 			/*
   8943 			 * Update the receive pointer holding rxq_lock
   8944 			 * consistent with increment counter.
   8945 			 */
   8946 			rxq->rxq_ptr = i;
   8947 			break;
   8948 		}
   8949 
   8950 		count++;
   8951 		if (__predict_false(rxq->rxq_discard)) {
   8952 			DPRINTF(WM_DEBUG_RX,
   8953 			    ("%s: RX: discarding contents of descriptor %d\n",
   8954 				device_xname(sc->sc_dev), i));
   8955 			wm_init_rxdesc(rxq, i);
   8956 			if (wm_rxdesc_is_eop(rxq, status)) {
   8957 				/* Reset our state. */
   8958 				DPRINTF(WM_DEBUG_RX,
   8959 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8960 					device_xname(sc->sc_dev)));
   8961 				rxq->rxq_discard = 0;
   8962 			}
   8963 			continue;
   8964 		}
   8965 
   8966 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8967 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8968 
   8969 		m = rxs->rxs_mbuf;
   8970 
   8971 		/*
   8972 		 * Add a new receive buffer to the ring, unless of
   8973 		 * course the length is zero. Treat the latter as a
   8974 		 * failed mapping.
   8975 		 */
   8976 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8977 			/*
   8978 			 * Failed, throw away what we've done so
   8979 			 * far, and discard the rest of the packet.
   8980 			 */
   8981 			if_statinc(ifp, if_ierrors);
   8982 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8983 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8984 			wm_init_rxdesc(rxq, i);
   8985 			if (!wm_rxdesc_is_eop(rxq, status))
   8986 				rxq->rxq_discard = 1;
   8987 			if (rxq->rxq_head != NULL)
   8988 				m_freem(rxq->rxq_head);
   8989 			WM_RXCHAIN_RESET(rxq);
   8990 			DPRINTF(WM_DEBUG_RX,
   8991 			    ("%s: RX: Rx buffer allocation failed, "
   8992 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8993 				rxq->rxq_discard ? " (discard)" : ""));
   8994 			continue;
   8995 		}
   8996 
   8997 		m->m_len = len;
   8998 		rxq->rxq_len += len;
   8999 		DPRINTF(WM_DEBUG_RX,
   9000 		    ("%s: RX: buffer at %p len %d\n",
   9001 			device_xname(sc->sc_dev), m->m_data, len));
   9002 
   9003 		/* If this is not the end of the packet, keep looking. */
   9004 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9005 			WM_RXCHAIN_LINK(rxq, m);
   9006 			DPRINTF(WM_DEBUG_RX,
   9007 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9008 				device_xname(sc->sc_dev), rxq->rxq_len));
   9009 			continue;
   9010 		}
   9011 
   9012 		/*
   9013 		 * Okay, we have the entire packet now. The chip is
   9014 		 * configured to include the FCS except I350 and I21[01]
   9015 		 * (not all chips can be configured to strip it),
   9016 		 * so we need to trim it.
   9017 		 * May need to adjust length of previous mbuf in the
   9018 		 * chain if the current mbuf is too short.
   9019 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   9020 		 * is always set in I350, so we don't trim it.
   9021 		 */
   9022 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   9023 		    && (sc->sc_type != WM_T_I210)
   9024 		    && (sc->sc_type != WM_T_I211)) {
   9025 			if (m->m_len < ETHER_CRC_LEN) {
   9026 				rxq->rxq_tail->m_len
   9027 				    -= (ETHER_CRC_LEN - m->m_len);
   9028 				m->m_len = 0;
   9029 			} else
   9030 				m->m_len -= ETHER_CRC_LEN;
   9031 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9032 		} else
   9033 			len = rxq->rxq_len;
   9034 
   9035 		WM_RXCHAIN_LINK(rxq, m);
   9036 
   9037 		*rxq->rxq_tailp = NULL;
   9038 		m = rxq->rxq_head;
   9039 
   9040 		WM_RXCHAIN_RESET(rxq);
   9041 
   9042 		DPRINTF(WM_DEBUG_RX,
   9043 		    ("%s: RX: have entire packet, len -> %d\n",
   9044 			device_xname(sc->sc_dev), len));
   9045 
   9046 		/* If an error occurred, update stats and drop the packet. */
   9047 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9048 			m_freem(m);
   9049 			continue;
   9050 		}
   9051 
   9052 		/* No errors.  Receive the packet. */
   9053 		m_set_rcvif(m, ifp);
   9054 		m->m_pkthdr.len = len;
   9055 		/*
   9056 		 * TODO
   9057 		 * should be save rsshash and rsstype to this mbuf.
   9058 		 */
   9059 		DPRINTF(WM_DEBUG_RX,
   9060 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9061 			device_xname(sc->sc_dev), rsstype, rsshash));
   9062 
   9063 		/*
   9064 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9065 		 * for us.  Associate the tag with the packet.
   9066 		 */
   9067 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9068 			continue;
   9069 
   9070 		/* Set up checksum info for this packet. */
   9071 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9072 		/*
   9073 		 * Update the receive pointer holding rxq_lock consistent with
   9074 		 * increment counter.
   9075 		 */
   9076 		rxq->rxq_ptr = i;
   9077 		rxq->rxq_packets++;
   9078 		rxq->rxq_bytes += len;
   9079 		mutex_exit(rxq->rxq_lock);
   9080 
   9081 		/* Pass it on. */
   9082 		if_percpuq_enqueue(sc->sc_ipq, m);
   9083 
   9084 		mutex_enter(rxq->rxq_lock);
   9085 
   9086 		if (rxq->rxq_stopping)
   9087 			break;
   9088 	}
   9089 
   9090 	DPRINTF(WM_DEBUG_RX,
   9091 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9092 
   9093 	return more;
   9094 }
   9095 
   9096 /*
   9097  * wm_linkintr_gmii:
   9098  *
   9099  *	Helper; handle link interrupts for GMII.
   9100  */
   9101 static void
   9102 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9103 {
   9104 	device_t dev = sc->sc_dev;
   9105 	uint32_t status, reg;
   9106 	bool link;
   9107 	int rv;
   9108 
   9109 	KASSERT(WM_CORE_LOCKED(sc));
   9110 
   9111 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9112 		__func__));
   9113 
   9114 	if ((icr & ICR_LSC) == 0) {
   9115 		if (icr & ICR_RXSEQ)
   9116 			DPRINTF(WM_DEBUG_LINK,
   9117 			    ("%s: LINK Receive sequence error\n",
   9118 				device_xname(dev)));
   9119 		return;
   9120 	}
   9121 
   9122 	/* Link status changed */
   9123 	status = CSR_READ(sc, WMREG_STATUS);
   9124 	link = status & STATUS_LU;
   9125 	if (link) {
   9126 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9127 			device_xname(dev),
   9128 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9129 	} else {
   9130 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9131 			device_xname(dev)));
   9132 	}
   9133 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9134 		wm_gig_downshift_workaround_ich8lan(sc);
   9135 
   9136 	if ((sc->sc_type == WM_T_ICH8)
   9137 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9138 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9139 	}
   9140 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9141 		device_xname(dev)));
   9142 	mii_pollstat(&sc->sc_mii);
   9143 	if (sc->sc_type == WM_T_82543) {
   9144 		int miistatus, active;
   9145 
   9146 		/*
   9147 		 * With 82543, we need to force speed and
   9148 		 * duplex on the MAC equal to what the PHY
   9149 		 * speed and duplex configuration is.
   9150 		 */
   9151 		miistatus = sc->sc_mii.mii_media_status;
   9152 
   9153 		if (miistatus & IFM_ACTIVE) {
   9154 			active = sc->sc_mii.mii_media_active;
   9155 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9156 			switch (IFM_SUBTYPE(active)) {
   9157 			case IFM_10_T:
   9158 				sc->sc_ctrl |= CTRL_SPEED_10;
   9159 				break;
   9160 			case IFM_100_TX:
   9161 				sc->sc_ctrl |= CTRL_SPEED_100;
   9162 				break;
   9163 			case IFM_1000_T:
   9164 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9165 				break;
   9166 			default:
   9167 				/*
   9168 				 * Fiber?
   9169 				 * Shoud not enter here.
   9170 				 */
   9171 				device_printf(dev, "unknown media (%x)\n",
   9172 				    active);
   9173 				break;
   9174 			}
   9175 			if (active & IFM_FDX)
   9176 				sc->sc_ctrl |= CTRL_FD;
   9177 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9178 		}
   9179 	} else if (sc->sc_type == WM_T_PCH) {
   9180 		wm_k1_gig_workaround_hv(sc,
   9181 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9182 	}
   9183 
   9184 	/*
   9185 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9186 	 * aggressive resulting in many collisions. To avoid this, increase
   9187 	 * the IPG and reduce Rx latency in the PHY.
   9188 	 */
   9189 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9190 	    && link) {
   9191 		uint32_t tipg_reg;
   9192 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9193 		bool fdx;
   9194 		uint16_t emi_addr, emi_val;
   9195 
   9196 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9197 		tipg_reg &= ~TIPG_IPGT_MASK;
   9198 		fdx = status & STATUS_FD;
   9199 
   9200 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9201 			tipg_reg |= 0xff;
   9202 			/* Reduce Rx latency in analog PHY */
   9203 			emi_val = 0;
   9204 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9205 		    fdx && speed != STATUS_SPEED_1000) {
   9206 			tipg_reg |= 0xc;
   9207 			emi_val = 1;
   9208 		} else {
   9209 			/* Roll back the default values */
   9210 			tipg_reg |= 0x08;
   9211 			emi_val = 1;
   9212 		}
   9213 
   9214 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9215 
   9216 		rv = sc->phy.acquire(sc);
   9217 		if (rv)
   9218 			return;
   9219 
   9220 		if (sc->sc_type == WM_T_PCH2)
   9221 			emi_addr = I82579_RX_CONFIG;
   9222 		else
   9223 			emi_addr = I217_RX_CONFIG;
   9224 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9225 
   9226 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9227 			uint16_t phy_reg;
   9228 
   9229 			sc->phy.readreg_locked(dev, 2,
   9230 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9231 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9232 			if (speed == STATUS_SPEED_100
   9233 			    || speed == STATUS_SPEED_10)
   9234 				phy_reg |= 0x3e8;
   9235 			else
   9236 				phy_reg |= 0xfa;
   9237 			sc->phy.writereg_locked(dev, 2,
   9238 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9239 
   9240 			if (speed == STATUS_SPEED_1000) {
   9241 				sc->phy.readreg_locked(dev, 2,
   9242 				    HV_PM_CTRL, &phy_reg);
   9243 
   9244 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9245 
   9246 				sc->phy.writereg_locked(dev, 2,
   9247 				    HV_PM_CTRL, phy_reg);
   9248 			}
   9249 		}
   9250 		sc->phy.release(sc);
   9251 
   9252 		if (rv)
   9253 			return;
   9254 
   9255 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9256 			uint16_t data, ptr_gap;
   9257 
   9258 			if (speed == STATUS_SPEED_1000) {
   9259 				rv = sc->phy.acquire(sc);
   9260 				if (rv)
   9261 					return;
   9262 
   9263 				rv = sc->phy.readreg_locked(dev, 2,
   9264 				    I219_UNKNOWN1, &data);
   9265 				if (rv) {
   9266 					sc->phy.release(sc);
   9267 					return;
   9268 				}
   9269 
   9270 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9271 				if (ptr_gap < 0x18) {
   9272 					data &= ~(0x3ff << 2);
   9273 					data |= (0x18 << 2);
   9274 					rv = sc->phy.writereg_locked(dev,
   9275 					    2, I219_UNKNOWN1, data);
   9276 				}
   9277 				sc->phy.release(sc);
   9278 				if (rv)
   9279 					return;
   9280 			} else {
   9281 				rv = sc->phy.acquire(sc);
   9282 				if (rv)
   9283 					return;
   9284 
   9285 				rv = sc->phy.writereg_locked(dev, 2,
   9286 				    I219_UNKNOWN1, 0xc023);
   9287 				sc->phy.release(sc);
   9288 				if (rv)
   9289 					return;
   9290 
   9291 			}
   9292 		}
   9293 	}
   9294 
   9295 	/*
   9296 	 * I217 Packet Loss issue:
   9297 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9298 	 * on power up.
   9299 	 * Set the Beacon Duration for I217 to 8 usec
   9300 	 */
   9301 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9302 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9303 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9304 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9305 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9306 	}
   9307 
   9308 	/* Work-around I218 hang issue */
   9309 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9310 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9311 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9312 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9313 		wm_k1_workaround_lpt_lp(sc, link);
   9314 
   9315 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9316 		/*
   9317 		 * Set platform power management values for Latency
   9318 		 * Tolerance Reporting (LTR)
   9319 		 */
   9320 		wm_platform_pm_pch_lpt(sc,
   9321 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9322 	}
   9323 
   9324 	/* Clear link partner's EEE ability */
   9325 	sc->eee_lp_ability = 0;
   9326 
   9327 	/* FEXTNVM6 K1-off workaround */
   9328 	if (sc->sc_type == WM_T_PCH_SPT) {
   9329 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9330 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9331 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9332 		else
   9333 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9334 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9335 	}
   9336 
   9337 	if (!link)
   9338 		return;
   9339 
   9340 	switch (sc->sc_type) {
   9341 	case WM_T_PCH2:
   9342 		wm_k1_workaround_lv(sc);
   9343 		/* FALLTHROUGH */
   9344 	case WM_T_PCH:
   9345 		if (sc->sc_phytype == WMPHY_82578)
   9346 			wm_link_stall_workaround_hv(sc);
   9347 		break;
   9348 	default:
   9349 		break;
   9350 	}
   9351 
   9352 	/* Enable/Disable EEE after link up */
   9353 	if (sc->sc_phytype > WMPHY_82579)
   9354 		wm_set_eee_pchlan(sc);
   9355 }
   9356 
   9357 /*
   9358  * wm_linkintr_tbi:
   9359  *
   9360  *	Helper; handle link interrupts for TBI mode.
   9361  */
   9362 static void
   9363 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9364 {
   9365 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9366 	uint32_t status;
   9367 
   9368 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9369 		__func__));
   9370 
   9371 	status = CSR_READ(sc, WMREG_STATUS);
   9372 	if (icr & ICR_LSC) {
   9373 		wm_check_for_link(sc);
   9374 		if (status & STATUS_LU) {
   9375 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9376 				device_xname(sc->sc_dev),
   9377 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9378 			/*
   9379 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9380 			 * so we should update sc->sc_ctrl
   9381 			 */
   9382 
   9383 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9384 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9385 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9386 			if (status & STATUS_FD)
   9387 				sc->sc_tctl |=
   9388 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9389 			else
   9390 				sc->sc_tctl |=
   9391 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9392 			if (sc->sc_ctrl & CTRL_TFCE)
   9393 				sc->sc_fcrtl |= FCRTL_XONE;
   9394 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9395 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9396 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9397 			sc->sc_tbi_linkup = 1;
   9398 			if_link_state_change(ifp, LINK_STATE_UP);
   9399 		} else {
   9400 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9401 				device_xname(sc->sc_dev)));
   9402 			sc->sc_tbi_linkup = 0;
   9403 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9404 		}
   9405 		/* Update LED */
   9406 		wm_tbi_serdes_set_linkled(sc);
   9407 	} else if (icr & ICR_RXSEQ)
   9408 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9409 			device_xname(sc->sc_dev)));
   9410 }
   9411 
   9412 /*
   9413  * wm_linkintr_serdes:
   9414  *
   9415  *	Helper; handle link interrupts for TBI mode.
   9416  */
   9417 static void
   9418 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9419 {
   9420 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9421 	struct mii_data *mii = &sc->sc_mii;
   9422 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9423 	uint32_t pcs_adv, pcs_lpab, reg;
   9424 
   9425 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9426 		__func__));
   9427 
   9428 	if (icr & ICR_LSC) {
   9429 		/* Check PCS */
   9430 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9431 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9432 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9433 				device_xname(sc->sc_dev)));
   9434 			mii->mii_media_status |= IFM_ACTIVE;
   9435 			sc->sc_tbi_linkup = 1;
   9436 			if_link_state_change(ifp, LINK_STATE_UP);
   9437 		} else {
   9438 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9439 				device_xname(sc->sc_dev)));
   9440 			mii->mii_media_status |= IFM_NONE;
   9441 			sc->sc_tbi_linkup = 0;
   9442 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9443 			wm_tbi_serdes_set_linkled(sc);
   9444 			return;
   9445 		}
   9446 		mii->mii_media_active |= IFM_1000_SX;
   9447 		if ((reg & PCS_LSTS_FDX) != 0)
   9448 			mii->mii_media_active |= IFM_FDX;
   9449 		else
   9450 			mii->mii_media_active |= IFM_HDX;
   9451 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9452 			/* Check flow */
   9453 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9454 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9455 				DPRINTF(WM_DEBUG_LINK,
   9456 				    ("XXX LINKOK but not ACOMP\n"));
   9457 				return;
   9458 			}
   9459 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9460 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9461 			DPRINTF(WM_DEBUG_LINK,
   9462 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9463 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9464 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9465 				mii->mii_media_active |= IFM_FLOW
   9466 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9467 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9468 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9469 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9470 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9471 				mii->mii_media_active |= IFM_FLOW
   9472 				    | IFM_ETH_TXPAUSE;
   9473 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9474 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9475 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9476 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9477 				mii->mii_media_active |= IFM_FLOW
   9478 				    | IFM_ETH_RXPAUSE;
   9479 		}
   9480 		/* Update LED */
   9481 		wm_tbi_serdes_set_linkled(sc);
   9482 	} else
   9483 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9484 		    device_xname(sc->sc_dev)));
   9485 }
   9486 
   9487 /*
   9488  * wm_linkintr:
   9489  *
   9490  *	Helper; handle link interrupts.
   9491  */
   9492 static void
   9493 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9494 {
   9495 
   9496 	KASSERT(WM_CORE_LOCKED(sc));
   9497 
   9498 	if (sc->sc_flags & WM_F_HAS_MII)
   9499 		wm_linkintr_gmii(sc, icr);
   9500 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9501 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9502 		wm_linkintr_serdes(sc, icr);
   9503 	else
   9504 		wm_linkintr_tbi(sc, icr);
   9505 }
   9506 
   9507 
   9508 static inline void
   9509 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   9510 {
   9511 
   9512 	if (wmq->wmq_txrx_use_workqueue)
   9513 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   9514 	else
   9515 		softint_schedule(wmq->wmq_si);
   9516 }
   9517 
   9518 /*
   9519  * wm_intr_legacy:
   9520  *
   9521  *	Interrupt service routine for INTx and MSI.
   9522  */
   9523 static int
   9524 wm_intr_legacy(void *arg)
   9525 {
   9526 	struct wm_softc *sc = arg;
   9527 	struct wm_queue *wmq = &sc->sc_queue[0];
   9528 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9529 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9530 	uint32_t icr, rndval = 0;
   9531 	int handled = 0;
   9532 
   9533 	while (1 /* CONSTCOND */) {
   9534 		icr = CSR_READ(sc, WMREG_ICR);
   9535 		if ((icr & sc->sc_icr) == 0)
   9536 			break;
   9537 		if (handled == 0)
   9538 			DPRINTF(WM_DEBUG_TX,
   9539 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9540 		if (rndval == 0)
   9541 			rndval = icr;
   9542 
   9543 		mutex_enter(rxq->rxq_lock);
   9544 
   9545 		if (rxq->rxq_stopping) {
   9546 			mutex_exit(rxq->rxq_lock);
   9547 			break;
   9548 		}
   9549 
   9550 		handled = 1;
   9551 
   9552 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9553 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9554 			DPRINTF(WM_DEBUG_RX,
   9555 			    ("%s: RX: got Rx intr 0x%08x\n",
   9556 				device_xname(sc->sc_dev),
   9557 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9558 			WM_Q_EVCNT_INCR(rxq, intr);
   9559 		}
   9560 #endif
   9561 		/*
   9562 		 * wm_rxeof() does *not* call upper layer functions directly,
   9563 		 * as if_percpuq_enqueue() just call softint_schedule().
   9564 		 * So, we can call wm_rxeof() in interrupt context.
   9565 		 */
   9566 		wm_rxeof(rxq, UINT_MAX);
   9567 		/* Fill lower bits with RX index. See below for the upper. */
   9568 		rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
   9569 
   9570 		mutex_exit(rxq->rxq_lock);
   9571 		mutex_enter(txq->txq_lock);
   9572 
   9573 		if (txq->txq_stopping) {
   9574 			mutex_exit(txq->txq_lock);
   9575 			break;
   9576 		}
   9577 
   9578 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9579 		if (icr & ICR_TXDW) {
   9580 			DPRINTF(WM_DEBUG_TX,
   9581 			    ("%s: TX: got TXDW interrupt\n",
   9582 				device_xname(sc->sc_dev)));
   9583 			WM_Q_EVCNT_INCR(txq, txdw);
   9584 		}
   9585 #endif
   9586 		wm_txeof(txq, UINT_MAX);
   9587 		/* Fill upper bits with TX index. See above for the lower. */
   9588 		rndval = txq->txq_next * WM_NRXDESC;
   9589 
   9590 		mutex_exit(txq->txq_lock);
   9591 		WM_CORE_LOCK(sc);
   9592 
   9593 		if (sc->sc_core_stopping) {
   9594 			WM_CORE_UNLOCK(sc);
   9595 			break;
   9596 		}
   9597 
   9598 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9599 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9600 			wm_linkintr(sc, icr);
   9601 		}
   9602 		if ((icr & ICR_GPI(0)) != 0)
   9603 			device_printf(sc->sc_dev, "got module interrupt\n");
   9604 
   9605 		WM_CORE_UNLOCK(sc);
   9606 
   9607 		if (icr & ICR_RXO) {
   9608 #if defined(WM_DEBUG)
   9609 			log(LOG_WARNING, "%s: Receive overrun\n",
   9610 			    device_xname(sc->sc_dev));
   9611 #endif /* defined(WM_DEBUG) */
   9612 		}
   9613 	}
   9614 
   9615 	rnd_add_uint32(&sc->sc_queue[0].rnd_source, rndval);
   9616 
   9617 	if (handled) {
   9618 		/* Try to get more packets going. */
   9619 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9620 		wm_sched_handle_queue(sc, wmq);
   9621 	}
   9622 
   9623 	return handled;
   9624 }
   9625 
   9626 static inline void
   9627 wm_txrxintr_disable(struct wm_queue *wmq)
   9628 {
   9629 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9630 
   9631 	if (sc->sc_type == WM_T_82574)
   9632 		CSR_WRITE(sc, WMREG_IMC,
   9633 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9634 	else if (sc->sc_type == WM_T_82575)
   9635 		CSR_WRITE(sc, WMREG_EIMC,
   9636 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9637 	else
   9638 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9639 }
   9640 
   9641 static inline void
   9642 wm_txrxintr_enable(struct wm_queue *wmq)
   9643 {
   9644 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9645 
   9646 	wm_itrs_calculate(sc, wmq);
   9647 
   9648 	/*
   9649 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9650 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9651 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9652 	 * while each wm_handle_queue(wmq) is runnig.
   9653 	 */
   9654 	if (sc->sc_type == WM_T_82574)
   9655 		CSR_WRITE(sc, WMREG_IMS,
   9656 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9657 	else if (sc->sc_type == WM_T_82575)
   9658 		CSR_WRITE(sc, WMREG_EIMS,
   9659 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9660 	else
   9661 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9662 }
   9663 
   9664 static int
   9665 wm_txrxintr_msix(void *arg)
   9666 {
   9667 	struct wm_queue *wmq = arg;
   9668 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9669 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9670 	struct wm_softc *sc = txq->txq_sc;
   9671 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9672 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9673 	uint32_t rndval = 0;
   9674 	bool txmore;
   9675 	bool rxmore;
   9676 
   9677 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9678 
   9679 	DPRINTF(WM_DEBUG_TX,
   9680 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9681 
   9682 	wm_txrxintr_disable(wmq);
   9683 
   9684 	mutex_enter(txq->txq_lock);
   9685 
   9686 	if (txq->txq_stopping) {
   9687 		mutex_exit(txq->txq_lock);
   9688 		return 0;
   9689 	}
   9690 
   9691 	WM_Q_EVCNT_INCR(txq, txdw);
   9692 	txmore = wm_txeof(txq, txlimit);
   9693 	/* Fill upper bits with TX index. See below for the lower. */
   9694 	rndval = txq->txq_next * WM_NRXDESC;
   9695 	/* wm_deferred start() is done in wm_handle_queue(). */
   9696 	mutex_exit(txq->txq_lock);
   9697 
   9698 	DPRINTF(WM_DEBUG_RX,
   9699 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9700 	mutex_enter(rxq->rxq_lock);
   9701 
   9702 	if (rxq->rxq_stopping) {
   9703 		mutex_exit(rxq->rxq_lock);
   9704 		return 0;
   9705 	}
   9706 
   9707 	WM_Q_EVCNT_INCR(rxq, intr);
   9708 	rxmore = wm_rxeof(rxq, rxlimit);
   9709 
   9710 	/* Fill lower bits with RX index. See above for the upper. */
   9711 	rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
   9712 	mutex_exit(rxq->rxq_lock);
   9713 
   9714 	wm_itrs_writereg(sc, wmq);
   9715 
   9716 	/*
   9717 	 * This function is called in the hardware interrupt context and
   9718 	 * per-CPU, so it's not required to take a lock.
   9719 	 */
   9720 	if (rndval != 0)
   9721 		rnd_add_uint32(&sc->sc_queue[wmq->wmq_id].rnd_source, rndval);
   9722 
   9723 	if (txmore || rxmore) {
   9724 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9725 		wm_sched_handle_queue(sc, wmq);
   9726 	} else
   9727 		wm_txrxintr_enable(wmq);
   9728 
   9729 	return 1;
   9730 }
   9731 
   9732 static void
   9733 wm_handle_queue(void *arg)
   9734 {
   9735 	struct wm_queue *wmq = arg;
   9736 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9737 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9738 	struct wm_softc *sc = txq->txq_sc;
   9739 	u_int txlimit = sc->sc_tx_process_limit;
   9740 	u_int rxlimit = sc->sc_rx_process_limit;
   9741 	bool txmore;
   9742 	bool rxmore;
   9743 
   9744 	mutex_enter(txq->txq_lock);
   9745 	if (txq->txq_stopping) {
   9746 		mutex_exit(txq->txq_lock);
   9747 		return;
   9748 	}
   9749 	txmore = wm_txeof(txq, txlimit);
   9750 	wm_deferred_start_locked(txq);
   9751 	mutex_exit(txq->txq_lock);
   9752 
   9753 	mutex_enter(rxq->rxq_lock);
   9754 	if (rxq->rxq_stopping) {
   9755 		mutex_exit(rxq->rxq_lock);
   9756 		return;
   9757 	}
   9758 	WM_Q_EVCNT_INCR(rxq, defer);
   9759 	rxmore = wm_rxeof(rxq, rxlimit);
   9760 	mutex_exit(rxq->rxq_lock);
   9761 
   9762 	if (txmore || rxmore) {
   9763 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9764 		wm_sched_handle_queue(sc, wmq);
   9765 	} else
   9766 		wm_txrxintr_enable(wmq);
   9767 }
   9768 
   9769 static void
   9770 wm_handle_queue_work(struct work *wk, void *context)
   9771 {
   9772 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   9773 
   9774 	/*
   9775 	 * "enqueued flag" is not required here.
   9776 	 */
   9777 	wm_handle_queue(wmq);
   9778 }
   9779 
   9780 /*
   9781  * wm_linkintr_msix:
   9782  *
   9783  *	Interrupt service routine for link status change for MSI-X.
   9784  */
   9785 static int
   9786 wm_linkintr_msix(void *arg)
   9787 {
   9788 	struct wm_softc *sc = arg;
   9789 	uint32_t reg;
   9790 	bool has_rxo;
   9791 
   9792 	reg = CSR_READ(sc, WMREG_ICR);
   9793 	WM_CORE_LOCK(sc);
   9794 	DPRINTF(WM_DEBUG_LINK,
   9795 	    ("%s: LINK: got link intr. ICR = %08x\n",
   9796 		device_xname(sc->sc_dev), reg));
   9797 
   9798 	if (sc->sc_core_stopping)
   9799 		goto out;
   9800 
   9801 	if ((reg & ICR_LSC) != 0) {
   9802 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9803 		wm_linkintr(sc, ICR_LSC);
   9804 	}
   9805 	if ((reg & ICR_GPI(0)) != 0)
   9806 		device_printf(sc->sc_dev, "got module interrupt\n");
   9807 
   9808 	/*
   9809 	 * XXX 82574 MSI-X mode workaround
   9810 	 *
   9811 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9812 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9813 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9814 	 * interrupts by writing WMREG_ICS to process receive packets.
   9815 	 */
   9816 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9817 #if defined(WM_DEBUG)
   9818 		log(LOG_WARNING, "%s: Receive overrun\n",
   9819 		    device_xname(sc->sc_dev));
   9820 #endif /* defined(WM_DEBUG) */
   9821 
   9822 		has_rxo = true;
   9823 		/*
   9824 		 * The RXO interrupt is very high rate when receive traffic is
   9825 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9826 		 * interrupts. ICR_OTHER will be enabled at the end of
   9827 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9828 		 * ICR_RXQ(1) interrupts.
   9829 		 */
   9830 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9831 
   9832 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9833 	}
   9834 
   9835 
   9836 
   9837 out:
   9838 	WM_CORE_UNLOCK(sc);
   9839 
   9840 	if (sc->sc_type == WM_T_82574) {
   9841 		if (!has_rxo)
   9842 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9843 		else
   9844 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9845 	} else if (sc->sc_type == WM_T_82575)
   9846 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9847 	else
   9848 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9849 
   9850 	return 1;
   9851 }
   9852 
   9853 /*
   9854  * Media related.
   9855  * GMII, SGMII, TBI (and SERDES)
   9856  */
   9857 
   9858 /* Common */
   9859 
   9860 /*
   9861  * wm_tbi_serdes_set_linkled:
   9862  *
   9863  *	Update the link LED on TBI and SERDES devices.
   9864  */
   9865 static void
   9866 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9867 {
   9868 
   9869 	if (sc->sc_tbi_linkup)
   9870 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9871 	else
   9872 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9873 
   9874 	/* 82540 or newer devices are active low */
   9875 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9876 
   9877 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9878 }
   9879 
   9880 /* GMII related */
   9881 
   9882 /*
   9883  * wm_gmii_reset:
   9884  *
   9885  *	Reset the PHY.
   9886  */
   9887 static void
   9888 wm_gmii_reset(struct wm_softc *sc)
   9889 {
   9890 	uint32_t reg;
   9891 	int rv;
   9892 
   9893 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9894 		device_xname(sc->sc_dev), __func__));
   9895 
   9896 	rv = sc->phy.acquire(sc);
   9897 	if (rv != 0) {
   9898 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9899 		    __func__);
   9900 		return;
   9901 	}
   9902 
   9903 	switch (sc->sc_type) {
   9904 	case WM_T_82542_2_0:
   9905 	case WM_T_82542_2_1:
   9906 		/* null */
   9907 		break;
   9908 	case WM_T_82543:
   9909 		/*
   9910 		 * With 82543, we need to force speed and duplex on the MAC
   9911 		 * equal to what the PHY speed and duplex configuration is.
   9912 		 * In addition, we need to perform a hardware reset on the PHY
   9913 		 * to take it out of reset.
   9914 		 */
   9915 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9916 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9917 
   9918 		/* The PHY reset pin is active-low. */
   9919 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9920 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9921 		    CTRL_EXT_SWDPIN(4));
   9922 		reg |= CTRL_EXT_SWDPIO(4);
   9923 
   9924 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9925 		CSR_WRITE_FLUSH(sc);
   9926 		delay(10*1000);
   9927 
   9928 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9929 		CSR_WRITE_FLUSH(sc);
   9930 		delay(150);
   9931 #if 0
   9932 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9933 #endif
   9934 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9935 		break;
   9936 	case WM_T_82544:	/* Reset 10000us */
   9937 	case WM_T_82540:
   9938 	case WM_T_82545:
   9939 	case WM_T_82545_3:
   9940 	case WM_T_82546:
   9941 	case WM_T_82546_3:
   9942 	case WM_T_82541:
   9943 	case WM_T_82541_2:
   9944 	case WM_T_82547:
   9945 	case WM_T_82547_2:
   9946 	case WM_T_82571:	/* Reset 100us */
   9947 	case WM_T_82572:
   9948 	case WM_T_82573:
   9949 	case WM_T_82574:
   9950 	case WM_T_82575:
   9951 	case WM_T_82576:
   9952 	case WM_T_82580:
   9953 	case WM_T_I350:
   9954 	case WM_T_I354:
   9955 	case WM_T_I210:
   9956 	case WM_T_I211:
   9957 	case WM_T_82583:
   9958 	case WM_T_80003:
   9959 		/* Generic reset */
   9960 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9961 		CSR_WRITE_FLUSH(sc);
   9962 		delay(20000);
   9963 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9964 		CSR_WRITE_FLUSH(sc);
   9965 		delay(20000);
   9966 
   9967 		if ((sc->sc_type == WM_T_82541)
   9968 		    || (sc->sc_type == WM_T_82541_2)
   9969 		    || (sc->sc_type == WM_T_82547)
   9970 		    || (sc->sc_type == WM_T_82547_2)) {
   9971 			/* Workaround for igp are done in igp_reset() */
   9972 			/* XXX add code to set LED after phy reset */
   9973 		}
   9974 		break;
   9975 	case WM_T_ICH8:
   9976 	case WM_T_ICH9:
   9977 	case WM_T_ICH10:
   9978 	case WM_T_PCH:
   9979 	case WM_T_PCH2:
   9980 	case WM_T_PCH_LPT:
   9981 	case WM_T_PCH_SPT:
   9982 	case WM_T_PCH_CNP:
   9983 		/* Generic reset */
   9984 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9985 		CSR_WRITE_FLUSH(sc);
   9986 		delay(100);
   9987 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9988 		CSR_WRITE_FLUSH(sc);
   9989 		delay(150);
   9990 		break;
   9991 	default:
   9992 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9993 		    __func__);
   9994 		break;
   9995 	}
   9996 
   9997 	sc->phy.release(sc);
   9998 
   9999 	/* get_cfg_done */
   10000 	wm_get_cfg_done(sc);
   10001 
   10002 	/* Extra setup */
   10003 	switch (sc->sc_type) {
   10004 	case WM_T_82542_2_0:
   10005 	case WM_T_82542_2_1:
   10006 	case WM_T_82543:
   10007 	case WM_T_82544:
   10008 	case WM_T_82540:
   10009 	case WM_T_82545:
   10010 	case WM_T_82545_3:
   10011 	case WM_T_82546:
   10012 	case WM_T_82546_3:
   10013 	case WM_T_82541_2:
   10014 	case WM_T_82547_2:
   10015 	case WM_T_82571:
   10016 	case WM_T_82572:
   10017 	case WM_T_82573:
   10018 	case WM_T_82574:
   10019 	case WM_T_82583:
   10020 	case WM_T_82575:
   10021 	case WM_T_82576:
   10022 	case WM_T_82580:
   10023 	case WM_T_I350:
   10024 	case WM_T_I354:
   10025 	case WM_T_I210:
   10026 	case WM_T_I211:
   10027 	case WM_T_80003:
   10028 		/* Null */
   10029 		break;
   10030 	case WM_T_82541:
   10031 	case WM_T_82547:
   10032 		/* XXX Configure actively LED after PHY reset */
   10033 		break;
   10034 	case WM_T_ICH8:
   10035 	case WM_T_ICH9:
   10036 	case WM_T_ICH10:
   10037 	case WM_T_PCH:
   10038 	case WM_T_PCH2:
   10039 	case WM_T_PCH_LPT:
   10040 	case WM_T_PCH_SPT:
   10041 	case WM_T_PCH_CNP:
   10042 		wm_phy_post_reset(sc);
   10043 		break;
   10044 	default:
   10045 		panic("%s: unknown type\n", __func__);
   10046 		break;
   10047 	}
   10048 }
   10049 
   10050 /*
   10051  * Setup sc_phytype and mii_{read|write}reg.
   10052  *
   10053  *  To identify PHY type, correct read/write function should be selected.
   10054  * To select correct read/write function, PCI ID or MAC type are required
   10055  * without accessing PHY registers.
   10056  *
   10057  *  On the first call of this function, PHY ID is not known yet. Check
   10058  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10059  * result might be incorrect.
   10060  *
   10061  *  In the second call, PHY OUI and model is used to identify PHY type.
   10062  * It might not be perfect because of the lack of compared entry, but it
   10063  * would be better than the first call.
   10064  *
   10065  *  If the detected new result and previous assumption is different,
   10066  * diagnous message will be printed.
   10067  */
   10068 static void
   10069 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10070     uint16_t phy_model)
   10071 {
   10072 	device_t dev = sc->sc_dev;
   10073 	struct mii_data *mii = &sc->sc_mii;
   10074 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10075 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10076 	mii_readreg_t new_readreg;
   10077 	mii_writereg_t new_writereg;
   10078 	bool dodiag = true;
   10079 
   10080 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10081 		device_xname(sc->sc_dev), __func__));
   10082 
   10083 	/*
   10084 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10085 	 * incorrect. So don't print diag output when it's 2nd call.
   10086 	 */
   10087 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10088 		dodiag = false;
   10089 
   10090 	if (mii->mii_readreg == NULL) {
   10091 		/*
   10092 		 *  This is the first call of this function. For ICH and PCH
   10093 		 * variants, it's difficult to determine the PHY access method
   10094 		 * by sc_type, so use the PCI product ID for some devices.
   10095 		 */
   10096 
   10097 		switch (sc->sc_pcidevid) {
   10098 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10099 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10100 			/* 82577 */
   10101 			new_phytype = WMPHY_82577;
   10102 			break;
   10103 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10104 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10105 			/* 82578 */
   10106 			new_phytype = WMPHY_82578;
   10107 			break;
   10108 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10109 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10110 			/* 82579 */
   10111 			new_phytype = WMPHY_82579;
   10112 			break;
   10113 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10114 		case PCI_PRODUCT_INTEL_82801I_BM:
   10115 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10116 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10117 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10118 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10119 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10120 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10121 			/* ICH8, 9, 10 with 82567 */
   10122 			new_phytype = WMPHY_BM;
   10123 			break;
   10124 		default:
   10125 			break;
   10126 		}
   10127 	} else {
   10128 		/* It's not the first call. Use PHY OUI and model */
   10129 		switch (phy_oui) {
   10130 		case MII_OUI_ATTANSIC: /* XXX ??? */
   10131 			switch (phy_model) {
   10132 			case 0x0004: /* XXX */
   10133 				new_phytype = WMPHY_82578;
   10134 				break;
   10135 			default:
   10136 				break;
   10137 			}
   10138 			break;
   10139 		case MII_OUI_xxMARVELL:
   10140 			switch (phy_model) {
   10141 			case MII_MODEL_xxMARVELL_I210:
   10142 				new_phytype = WMPHY_I210;
   10143 				break;
   10144 			case MII_MODEL_xxMARVELL_E1011:
   10145 			case MII_MODEL_xxMARVELL_E1000_3:
   10146 			case MII_MODEL_xxMARVELL_E1000_5:
   10147 			case MII_MODEL_xxMARVELL_E1112:
   10148 				new_phytype = WMPHY_M88;
   10149 				break;
   10150 			case MII_MODEL_xxMARVELL_E1149:
   10151 				new_phytype = WMPHY_BM;
   10152 				break;
   10153 			case MII_MODEL_xxMARVELL_E1111:
   10154 			case MII_MODEL_xxMARVELL_I347:
   10155 			case MII_MODEL_xxMARVELL_E1512:
   10156 			case MII_MODEL_xxMARVELL_E1340M:
   10157 			case MII_MODEL_xxMARVELL_E1543:
   10158 				new_phytype = WMPHY_M88;
   10159 				break;
   10160 			case MII_MODEL_xxMARVELL_I82563:
   10161 				new_phytype = WMPHY_GG82563;
   10162 				break;
   10163 			default:
   10164 				break;
   10165 			}
   10166 			break;
   10167 		case MII_OUI_INTEL:
   10168 			switch (phy_model) {
   10169 			case MII_MODEL_INTEL_I82577:
   10170 				new_phytype = WMPHY_82577;
   10171 				break;
   10172 			case MII_MODEL_INTEL_I82579:
   10173 				new_phytype = WMPHY_82579;
   10174 				break;
   10175 			case MII_MODEL_INTEL_I217:
   10176 				new_phytype = WMPHY_I217;
   10177 				break;
   10178 			case MII_MODEL_INTEL_I82580:
   10179 			case MII_MODEL_INTEL_I350:
   10180 				new_phytype = WMPHY_82580;
   10181 				break;
   10182 			default:
   10183 				break;
   10184 			}
   10185 			break;
   10186 		case MII_OUI_yyINTEL:
   10187 			switch (phy_model) {
   10188 			case MII_MODEL_yyINTEL_I82562G:
   10189 			case MII_MODEL_yyINTEL_I82562EM:
   10190 			case MII_MODEL_yyINTEL_I82562ET:
   10191 				new_phytype = WMPHY_IFE;
   10192 				break;
   10193 			case MII_MODEL_yyINTEL_IGP01E1000:
   10194 				new_phytype = WMPHY_IGP;
   10195 				break;
   10196 			case MII_MODEL_yyINTEL_I82566:
   10197 				new_phytype = WMPHY_IGP_3;
   10198 				break;
   10199 			default:
   10200 				break;
   10201 			}
   10202 			break;
   10203 		default:
   10204 			break;
   10205 		}
   10206 
   10207 		if (dodiag) {
   10208 			if (new_phytype == WMPHY_UNKNOWN)
   10209 				aprint_verbose_dev(dev,
   10210 				    "%s: Unknown PHY model. OUI=%06x, "
   10211 				    "model=%04x\n", __func__, phy_oui,
   10212 				    phy_model);
   10213 
   10214 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10215 			    && (sc->sc_phytype != new_phytype)) {
   10216 				aprint_error_dev(dev, "Previously assumed PHY "
   10217 				    "type(%u) was incorrect. PHY type from PHY"
   10218 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   10219 			}
   10220 		}
   10221 	}
   10222 
   10223 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10224 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10225 		/* SGMII */
   10226 		new_readreg = wm_sgmii_readreg;
   10227 		new_writereg = wm_sgmii_writereg;
   10228 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10229 		/* BM2 (phyaddr == 1) */
   10230 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10231 		    && (new_phytype != WMPHY_BM)
   10232 		    && (new_phytype != WMPHY_UNKNOWN))
   10233 			doubt_phytype = new_phytype;
   10234 		new_phytype = WMPHY_BM;
   10235 		new_readreg = wm_gmii_bm_readreg;
   10236 		new_writereg = wm_gmii_bm_writereg;
   10237 	} else if (sc->sc_type >= WM_T_PCH) {
   10238 		/* All PCH* use _hv_ */
   10239 		new_readreg = wm_gmii_hv_readreg;
   10240 		new_writereg = wm_gmii_hv_writereg;
   10241 	} else if (sc->sc_type >= WM_T_ICH8) {
   10242 		/* non-82567 ICH8, 9 and 10 */
   10243 		new_readreg = wm_gmii_i82544_readreg;
   10244 		new_writereg = wm_gmii_i82544_writereg;
   10245 	} else if (sc->sc_type >= WM_T_80003) {
   10246 		/* 80003 */
   10247 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10248 		    && (new_phytype != WMPHY_GG82563)
   10249 		    && (new_phytype != WMPHY_UNKNOWN))
   10250 			doubt_phytype = new_phytype;
   10251 		new_phytype = WMPHY_GG82563;
   10252 		new_readreg = wm_gmii_i80003_readreg;
   10253 		new_writereg = wm_gmii_i80003_writereg;
   10254 	} else if (sc->sc_type >= WM_T_I210) {
   10255 		/* I210 and I211 */
   10256 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10257 		    && (new_phytype != WMPHY_I210)
   10258 		    && (new_phytype != WMPHY_UNKNOWN))
   10259 			doubt_phytype = new_phytype;
   10260 		new_phytype = WMPHY_I210;
   10261 		new_readreg = wm_gmii_gs40g_readreg;
   10262 		new_writereg = wm_gmii_gs40g_writereg;
   10263 	} else if (sc->sc_type >= WM_T_82580) {
   10264 		/* 82580, I350 and I354 */
   10265 		new_readreg = wm_gmii_82580_readreg;
   10266 		new_writereg = wm_gmii_82580_writereg;
   10267 	} else if (sc->sc_type >= WM_T_82544) {
   10268 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10269 		new_readreg = wm_gmii_i82544_readreg;
   10270 		new_writereg = wm_gmii_i82544_writereg;
   10271 	} else {
   10272 		new_readreg = wm_gmii_i82543_readreg;
   10273 		new_writereg = wm_gmii_i82543_writereg;
   10274 	}
   10275 
   10276 	if (new_phytype == WMPHY_BM) {
   10277 		/* All BM use _bm_ */
   10278 		new_readreg = wm_gmii_bm_readreg;
   10279 		new_writereg = wm_gmii_bm_writereg;
   10280 	}
   10281 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10282 		/* All PCH* use _hv_ */
   10283 		new_readreg = wm_gmii_hv_readreg;
   10284 		new_writereg = wm_gmii_hv_writereg;
   10285 	}
   10286 
   10287 	/* Diag output */
   10288 	if (dodiag) {
   10289 		if (doubt_phytype != WMPHY_UNKNOWN)
   10290 			aprint_error_dev(dev, "Assumed new PHY type was "
   10291 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10292 			    new_phytype);
   10293 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10294 		    && (sc->sc_phytype != new_phytype))
   10295 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10296 			    "was incorrect. New PHY type = %u\n",
   10297 			    sc->sc_phytype, new_phytype);
   10298 
   10299 		if ((mii->mii_readreg != NULL) &&
   10300 		    (new_phytype == WMPHY_UNKNOWN))
   10301 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   10302 
   10303 		if ((mii->mii_readreg != NULL) &&
   10304 		    (mii->mii_readreg != new_readreg))
   10305 			aprint_error_dev(dev, "Previously assumed PHY "
   10306 			    "read/write function was incorrect.\n");
   10307 	}
   10308 
   10309 	/* Update now */
   10310 	sc->sc_phytype = new_phytype;
   10311 	mii->mii_readreg = new_readreg;
   10312 	mii->mii_writereg = new_writereg;
   10313 	if (new_readreg == wm_gmii_hv_readreg) {
   10314 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10315 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10316 	} else if (new_readreg == wm_sgmii_readreg) {
   10317 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10318 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10319 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10320 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10321 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10322 	}
   10323 }
   10324 
   10325 /*
   10326  * wm_get_phy_id_82575:
   10327  *
   10328  * Return PHY ID. Return -1 if it failed.
   10329  */
   10330 static int
   10331 wm_get_phy_id_82575(struct wm_softc *sc)
   10332 {
   10333 	uint32_t reg;
   10334 	int phyid = -1;
   10335 
   10336 	/* XXX */
   10337 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10338 		return -1;
   10339 
   10340 	if (wm_sgmii_uses_mdio(sc)) {
   10341 		switch (sc->sc_type) {
   10342 		case WM_T_82575:
   10343 		case WM_T_82576:
   10344 			reg = CSR_READ(sc, WMREG_MDIC);
   10345 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10346 			break;
   10347 		case WM_T_82580:
   10348 		case WM_T_I350:
   10349 		case WM_T_I354:
   10350 		case WM_T_I210:
   10351 		case WM_T_I211:
   10352 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10353 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10354 			break;
   10355 		default:
   10356 			return -1;
   10357 		}
   10358 	}
   10359 
   10360 	return phyid;
   10361 }
   10362 
   10363 
   10364 /*
   10365  * wm_gmii_mediainit:
   10366  *
   10367  *	Initialize media for use on 1000BASE-T devices.
   10368  */
   10369 static void
   10370 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10371 {
   10372 	device_t dev = sc->sc_dev;
   10373 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10374 	struct mii_data *mii = &sc->sc_mii;
   10375 
   10376 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10377 		device_xname(sc->sc_dev), __func__));
   10378 
   10379 	/* We have GMII. */
   10380 	sc->sc_flags |= WM_F_HAS_MII;
   10381 
   10382 	if (sc->sc_type == WM_T_80003)
   10383 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10384 	else
   10385 		sc->sc_tipg = TIPG_1000T_DFLT;
   10386 
   10387 	/*
   10388 	 * Let the chip set speed/duplex on its own based on
   10389 	 * signals from the PHY.
   10390 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10391 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10392 	 */
   10393 	sc->sc_ctrl |= CTRL_SLU;
   10394 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10395 
   10396 	/* Initialize our media structures and probe the GMII. */
   10397 	mii->mii_ifp = ifp;
   10398 
   10399 	mii->mii_statchg = wm_gmii_statchg;
   10400 
   10401 	/* get PHY control from SMBus to PCIe */
   10402 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10403 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10404 	    || (sc->sc_type == WM_T_PCH_CNP))
   10405 		wm_init_phy_workarounds_pchlan(sc);
   10406 
   10407 	wm_gmii_reset(sc);
   10408 
   10409 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10410 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10411 	    wm_gmii_mediastatus);
   10412 
   10413 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10414 	    || (sc->sc_type == WM_T_82580)
   10415 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10416 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10417 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10418 			/* Attach only one port */
   10419 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10420 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10421 		} else {
   10422 			int i, id;
   10423 			uint32_t ctrl_ext;
   10424 
   10425 			id = wm_get_phy_id_82575(sc);
   10426 			if (id != -1) {
   10427 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10428 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10429 			}
   10430 			if ((id == -1)
   10431 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10432 				/* Power on sgmii phy if it is disabled */
   10433 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10434 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10435 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10436 				CSR_WRITE_FLUSH(sc);
   10437 				delay(300*1000); /* XXX too long */
   10438 
   10439 				/*
   10440 				 * From 1 to 8.
   10441 				 *
   10442 				 * I2C access fails with I2C register's ERROR
   10443 				 * bit set, so prevent error message while
   10444 				 * scanning.
   10445 				 */
   10446 				sc->phy.no_errprint = true;
   10447 				for (i = 1; i < 8; i++)
   10448 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10449 					    0xffffffff, i, MII_OFFSET_ANY,
   10450 					    MIIF_DOPAUSE);
   10451 				sc->phy.no_errprint = false;
   10452 
   10453 				/* Restore previous sfp cage power state */
   10454 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10455 			}
   10456 		}
   10457 	} else
   10458 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10459 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10460 
   10461 	/*
   10462 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10463 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10464 	 */
   10465 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10466 		|| (sc->sc_type == WM_T_PCH_SPT)
   10467 		|| (sc->sc_type == WM_T_PCH_CNP))
   10468 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10469 		wm_set_mdio_slow_mode_hv(sc);
   10470 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10471 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10472 	}
   10473 
   10474 	/*
   10475 	 * (For ICH8 variants)
   10476 	 * If PHY detection failed, use BM's r/w function and retry.
   10477 	 */
   10478 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10479 		/* if failed, retry with *_bm_* */
   10480 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10481 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10482 		    sc->sc_phytype);
   10483 		sc->sc_phytype = WMPHY_BM;
   10484 		mii->mii_readreg = wm_gmii_bm_readreg;
   10485 		mii->mii_writereg = wm_gmii_bm_writereg;
   10486 
   10487 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10488 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10489 	}
   10490 
   10491 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10492 		/* Any PHY wasn't find */
   10493 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10494 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10495 		sc->sc_phytype = WMPHY_NONE;
   10496 	} else {
   10497 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10498 
   10499 		/*
   10500 		 * PHY Found! Check PHY type again by the second call of
   10501 		 * wm_gmii_setup_phytype.
   10502 		 */
   10503 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10504 		    child->mii_mpd_model);
   10505 
   10506 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10507 	}
   10508 }
   10509 
   10510 /*
   10511  * wm_gmii_mediachange:	[ifmedia interface function]
   10512  *
   10513  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10514  */
   10515 static int
   10516 wm_gmii_mediachange(struct ifnet *ifp)
   10517 {
   10518 	struct wm_softc *sc = ifp->if_softc;
   10519 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10520 	uint32_t reg;
   10521 	int rc;
   10522 
   10523 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10524 		device_xname(sc->sc_dev), __func__));
   10525 	if ((ifp->if_flags & IFF_UP) == 0)
   10526 		return 0;
   10527 
   10528 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10529 	if ((sc->sc_type == WM_T_82580)
   10530 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10531 	    || (sc->sc_type == WM_T_I211)) {
   10532 		reg = CSR_READ(sc, WMREG_PHPM);
   10533 		reg &= ~PHPM_GO_LINK_D;
   10534 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10535 	}
   10536 
   10537 	/* Disable D0 LPLU. */
   10538 	wm_lplu_d0_disable(sc);
   10539 
   10540 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10541 	sc->sc_ctrl |= CTRL_SLU;
   10542 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10543 	    || (sc->sc_type > WM_T_82543)) {
   10544 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10545 	} else {
   10546 		sc->sc_ctrl &= ~CTRL_ASDE;
   10547 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10548 		if (ife->ifm_media & IFM_FDX)
   10549 			sc->sc_ctrl |= CTRL_FD;
   10550 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10551 		case IFM_10_T:
   10552 			sc->sc_ctrl |= CTRL_SPEED_10;
   10553 			break;
   10554 		case IFM_100_TX:
   10555 			sc->sc_ctrl |= CTRL_SPEED_100;
   10556 			break;
   10557 		case IFM_1000_T:
   10558 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10559 			break;
   10560 		case IFM_NONE:
   10561 			/* There is no specific setting for IFM_NONE */
   10562 			break;
   10563 		default:
   10564 			panic("wm_gmii_mediachange: bad media 0x%x",
   10565 			    ife->ifm_media);
   10566 		}
   10567 	}
   10568 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10569 	CSR_WRITE_FLUSH(sc);
   10570 
   10571 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   10572 		wm_serdes_mediachange(ifp);
   10573 
   10574 	if (sc->sc_type <= WM_T_82543)
   10575 		wm_gmii_reset(sc);
   10576 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   10577 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   10578 		/* allow time for SFP cage time to power up phy */
   10579 		delay(300 * 1000);
   10580 		wm_gmii_reset(sc);
   10581 	}
   10582 
   10583 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10584 		return 0;
   10585 	return rc;
   10586 }
   10587 
   10588 /*
   10589  * wm_gmii_mediastatus:	[ifmedia interface function]
   10590  *
   10591  *	Get the current interface media status on a 1000BASE-T device.
   10592  */
   10593 static void
   10594 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10595 {
   10596 	struct wm_softc *sc = ifp->if_softc;
   10597 
   10598 	ether_mediastatus(ifp, ifmr);
   10599 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10600 	    | sc->sc_flowflags;
   10601 }
   10602 
   10603 #define	MDI_IO		CTRL_SWDPIN(2)
   10604 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10605 #define	MDI_CLK		CTRL_SWDPIN(3)
   10606 
   10607 static void
   10608 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10609 {
   10610 	uint32_t i, v;
   10611 
   10612 	v = CSR_READ(sc, WMREG_CTRL);
   10613 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10614 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10615 
   10616 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10617 		if (data & i)
   10618 			v |= MDI_IO;
   10619 		else
   10620 			v &= ~MDI_IO;
   10621 		CSR_WRITE(sc, WMREG_CTRL, v);
   10622 		CSR_WRITE_FLUSH(sc);
   10623 		delay(10);
   10624 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10625 		CSR_WRITE_FLUSH(sc);
   10626 		delay(10);
   10627 		CSR_WRITE(sc, WMREG_CTRL, v);
   10628 		CSR_WRITE_FLUSH(sc);
   10629 		delay(10);
   10630 	}
   10631 }
   10632 
   10633 static uint16_t
   10634 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10635 {
   10636 	uint32_t v, i;
   10637 	uint16_t data = 0;
   10638 
   10639 	v = CSR_READ(sc, WMREG_CTRL);
   10640 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10641 	v |= CTRL_SWDPIO(3);
   10642 
   10643 	CSR_WRITE(sc, WMREG_CTRL, v);
   10644 	CSR_WRITE_FLUSH(sc);
   10645 	delay(10);
   10646 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10647 	CSR_WRITE_FLUSH(sc);
   10648 	delay(10);
   10649 	CSR_WRITE(sc, WMREG_CTRL, v);
   10650 	CSR_WRITE_FLUSH(sc);
   10651 	delay(10);
   10652 
   10653 	for (i = 0; i < 16; i++) {
   10654 		data <<= 1;
   10655 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10656 		CSR_WRITE_FLUSH(sc);
   10657 		delay(10);
   10658 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10659 			data |= 1;
   10660 		CSR_WRITE(sc, WMREG_CTRL, v);
   10661 		CSR_WRITE_FLUSH(sc);
   10662 		delay(10);
   10663 	}
   10664 
   10665 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10666 	CSR_WRITE_FLUSH(sc);
   10667 	delay(10);
   10668 	CSR_WRITE(sc, WMREG_CTRL, v);
   10669 	CSR_WRITE_FLUSH(sc);
   10670 	delay(10);
   10671 
   10672 	return data;
   10673 }
   10674 
   10675 #undef MDI_IO
   10676 #undef MDI_DIR
   10677 #undef MDI_CLK
   10678 
   10679 /*
   10680  * wm_gmii_i82543_readreg:	[mii interface function]
   10681  *
   10682  *	Read a PHY register on the GMII (i82543 version).
   10683  */
   10684 static int
   10685 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10686 {
   10687 	struct wm_softc *sc = device_private(dev);
   10688 
   10689 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10690 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10691 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10692 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10693 
   10694 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10695 		device_xname(dev), phy, reg, *val));
   10696 
   10697 	return 0;
   10698 }
   10699 
   10700 /*
   10701  * wm_gmii_i82543_writereg:	[mii interface function]
   10702  *
   10703  *	Write a PHY register on the GMII (i82543 version).
   10704  */
   10705 static int
   10706 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10707 {
   10708 	struct wm_softc *sc = device_private(dev);
   10709 
   10710 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10711 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10712 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10713 	    (MII_COMMAND_START << 30), 32);
   10714 
   10715 	return 0;
   10716 }
   10717 
   10718 /*
   10719  * wm_gmii_mdic_readreg:	[mii interface function]
   10720  *
   10721  *	Read a PHY register on the GMII.
   10722  */
   10723 static int
   10724 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10725 {
   10726 	struct wm_softc *sc = device_private(dev);
   10727 	uint32_t mdic = 0;
   10728 	int i;
   10729 
   10730 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10731 	    && (reg > MII_ADDRMASK)) {
   10732 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10733 		    __func__, sc->sc_phytype, reg);
   10734 		reg &= MII_ADDRMASK;
   10735 	}
   10736 
   10737 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10738 	    MDIC_REGADD(reg));
   10739 
   10740 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10741 		delay(50);
   10742 		mdic = CSR_READ(sc, WMREG_MDIC);
   10743 		if (mdic & MDIC_READY)
   10744 			break;
   10745 	}
   10746 
   10747 	if ((mdic & MDIC_READY) == 0) {
   10748 		DPRINTF(WM_DEBUG_GMII,
   10749 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10750 			device_xname(dev), phy, reg));
   10751 		return ETIMEDOUT;
   10752 	} else if (mdic & MDIC_E) {
   10753 		/* This is normal if no PHY is present. */
   10754 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10755 			device_xname(sc->sc_dev), phy, reg));
   10756 		return -1;
   10757 	} else
   10758 		*val = MDIC_DATA(mdic);
   10759 
   10760 	/*
   10761 	 * Allow some time after each MDIC transaction to avoid
   10762 	 * reading duplicate data in the next MDIC transaction.
   10763 	 */
   10764 	if (sc->sc_type == WM_T_PCH2)
   10765 		delay(100);
   10766 
   10767 	return 0;
   10768 }
   10769 
   10770 /*
   10771  * wm_gmii_mdic_writereg:	[mii interface function]
   10772  *
   10773  *	Write a PHY register on the GMII.
   10774  */
   10775 static int
   10776 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10777 {
   10778 	struct wm_softc *sc = device_private(dev);
   10779 	uint32_t mdic = 0;
   10780 	int i;
   10781 
   10782 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10783 	    && (reg > MII_ADDRMASK)) {
   10784 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10785 		    __func__, sc->sc_phytype, reg);
   10786 		reg &= MII_ADDRMASK;
   10787 	}
   10788 
   10789 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10790 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10791 
   10792 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10793 		delay(50);
   10794 		mdic = CSR_READ(sc, WMREG_MDIC);
   10795 		if (mdic & MDIC_READY)
   10796 			break;
   10797 	}
   10798 
   10799 	if ((mdic & MDIC_READY) == 0) {
   10800 		DPRINTF(WM_DEBUG_GMII,
   10801 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10802 			device_xname(dev), phy, reg));
   10803 		return ETIMEDOUT;
   10804 	} else if (mdic & MDIC_E) {
   10805 		DPRINTF(WM_DEBUG_GMII,
   10806 		    ("%s: MDIC write error: phy %d reg %d\n",
   10807 			device_xname(dev), phy, reg));
   10808 		return -1;
   10809 	}
   10810 
   10811 	/*
   10812 	 * Allow some time after each MDIC transaction to avoid
   10813 	 * reading duplicate data in the next MDIC transaction.
   10814 	 */
   10815 	if (sc->sc_type == WM_T_PCH2)
   10816 		delay(100);
   10817 
   10818 	return 0;
   10819 }
   10820 
   10821 /*
   10822  * wm_gmii_i82544_readreg:	[mii interface function]
   10823  *
   10824  *	Read a PHY register on the GMII.
   10825  */
   10826 static int
   10827 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10828 {
   10829 	struct wm_softc *sc = device_private(dev);
   10830 	int rv;
   10831 
   10832 	if (sc->phy.acquire(sc)) {
   10833 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10834 		return -1;
   10835 	}
   10836 
   10837 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10838 
   10839 	sc->phy.release(sc);
   10840 
   10841 	return rv;
   10842 }
   10843 
   10844 static int
   10845 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10846 {
   10847 	struct wm_softc *sc = device_private(dev);
   10848 	int rv;
   10849 
   10850 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10851 		switch (sc->sc_phytype) {
   10852 		case WMPHY_IGP:
   10853 		case WMPHY_IGP_2:
   10854 		case WMPHY_IGP_3:
   10855 			rv = wm_gmii_mdic_writereg(dev, phy,
   10856 			    MII_IGPHY_PAGE_SELECT, reg);
   10857 			if (rv != 0)
   10858 				return rv;
   10859 			break;
   10860 		default:
   10861 #ifdef WM_DEBUG
   10862 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10863 			    __func__, sc->sc_phytype, reg);
   10864 #endif
   10865 			break;
   10866 		}
   10867 	}
   10868 
   10869 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10870 }
   10871 
   10872 /*
   10873  * wm_gmii_i82544_writereg:	[mii interface function]
   10874  *
   10875  *	Write a PHY register on the GMII.
   10876  */
   10877 static int
   10878 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10879 {
   10880 	struct wm_softc *sc = device_private(dev);
   10881 	int rv;
   10882 
   10883 	if (sc->phy.acquire(sc)) {
   10884 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10885 		return -1;
   10886 	}
   10887 
   10888 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10889 	sc->phy.release(sc);
   10890 
   10891 	return rv;
   10892 }
   10893 
   10894 static int
   10895 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10896 {
   10897 	struct wm_softc *sc = device_private(dev);
   10898 	int rv;
   10899 
   10900 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10901 		switch (sc->sc_phytype) {
   10902 		case WMPHY_IGP:
   10903 		case WMPHY_IGP_2:
   10904 		case WMPHY_IGP_3:
   10905 			rv = wm_gmii_mdic_writereg(dev, phy,
   10906 			    MII_IGPHY_PAGE_SELECT, reg);
   10907 			if (rv != 0)
   10908 				return rv;
   10909 			break;
   10910 		default:
   10911 #ifdef WM_DEBUG
   10912 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10913 			    __func__, sc->sc_phytype, reg);
   10914 #endif
   10915 			break;
   10916 		}
   10917 	}
   10918 
   10919 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10920 }
   10921 
   10922 /*
   10923  * wm_gmii_i80003_readreg:	[mii interface function]
   10924  *
   10925  *	Read a PHY register on the kumeran
   10926  * This could be handled by the PHY layer if we didn't have to lock the
   10927  * ressource ...
   10928  */
   10929 static int
   10930 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10931 {
   10932 	struct wm_softc *sc = device_private(dev);
   10933 	int page_select;
   10934 	uint16_t temp, temp2;
   10935 	int rv = 0;
   10936 
   10937 	if (phy != 1) /* Only one PHY on kumeran bus */
   10938 		return -1;
   10939 
   10940 	if (sc->phy.acquire(sc)) {
   10941 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10942 		return -1;
   10943 	}
   10944 
   10945 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10946 		page_select = GG82563_PHY_PAGE_SELECT;
   10947 	else {
   10948 		/*
   10949 		 * Use Alternative Page Select register to access registers
   10950 		 * 30 and 31.
   10951 		 */
   10952 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10953 	}
   10954 	temp = reg >> GG82563_PAGE_SHIFT;
   10955 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10956 		goto out;
   10957 
   10958 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10959 		/*
   10960 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10961 		 * register.
   10962 		 */
   10963 		delay(200);
   10964 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10965 		if ((rv != 0) || (temp2 != temp)) {
   10966 			device_printf(dev, "%s failed\n", __func__);
   10967 			rv = -1;
   10968 			goto out;
   10969 		}
   10970 		delay(200);
   10971 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10972 		delay(200);
   10973 	} else
   10974 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10975 
   10976 out:
   10977 	sc->phy.release(sc);
   10978 	return rv;
   10979 }
   10980 
   10981 /*
   10982  * wm_gmii_i80003_writereg:	[mii interface function]
   10983  *
   10984  *	Write a PHY register on the kumeran.
   10985  * This could be handled by the PHY layer if we didn't have to lock the
   10986  * ressource ...
   10987  */
   10988 static int
   10989 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   10990 {
   10991 	struct wm_softc *sc = device_private(dev);
   10992 	int page_select, rv;
   10993 	uint16_t temp, temp2;
   10994 
   10995 	if (phy != 1) /* Only one PHY on kumeran bus */
   10996 		return -1;
   10997 
   10998 	if (sc->phy.acquire(sc)) {
   10999 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11000 		return -1;
   11001 	}
   11002 
   11003 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11004 		page_select = GG82563_PHY_PAGE_SELECT;
   11005 	else {
   11006 		/*
   11007 		 * Use Alternative Page Select register to access registers
   11008 		 * 30 and 31.
   11009 		 */
   11010 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11011 	}
   11012 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11013 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11014 		goto out;
   11015 
   11016 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11017 		/*
   11018 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11019 		 * register.
   11020 		 */
   11021 		delay(200);
   11022 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11023 		if ((rv != 0) || (temp2 != temp)) {
   11024 			device_printf(dev, "%s failed\n", __func__);
   11025 			rv = -1;
   11026 			goto out;
   11027 		}
   11028 		delay(200);
   11029 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11030 		delay(200);
   11031 	} else
   11032 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11033 
   11034 out:
   11035 	sc->phy.release(sc);
   11036 	return rv;
   11037 }
   11038 
   11039 /*
   11040  * wm_gmii_bm_readreg:	[mii interface function]
   11041  *
   11042  *	Read a PHY register on the kumeran
   11043  * This could be handled by the PHY layer if we didn't have to lock the
   11044  * ressource ...
   11045  */
   11046 static int
   11047 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11048 {
   11049 	struct wm_softc *sc = device_private(dev);
   11050 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11051 	int rv;
   11052 
   11053 	if (sc->phy.acquire(sc)) {
   11054 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11055 		return -1;
   11056 	}
   11057 
   11058 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11059 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11060 		    || (reg == 31)) ? 1 : phy;
   11061 	/* Page 800 works differently than the rest so it has its own func */
   11062 	if (page == BM_WUC_PAGE) {
   11063 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11064 		goto release;
   11065 	}
   11066 
   11067 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11068 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11069 		    && (sc->sc_type != WM_T_82583))
   11070 			rv = wm_gmii_mdic_writereg(dev, phy,
   11071 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11072 		else
   11073 			rv = wm_gmii_mdic_writereg(dev, phy,
   11074 			    BME1000_PHY_PAGE_SELECT, page);
   11075 		if (rv != 0)
   11076 			goto release;
   11077 	}
   11078 
   11079 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11080 
   11081 release:
   11082 	sc->phy.release(sc);
   11083 	return rv;
   11084 }
   11085 
   11086 /*
   11087  * wm_gmii_bm_writereg:	[mii interface function]
   11088  *
   11089  *	Write a PHY register on the kumeran.
   11090  * This could be handled by the PHY layer if we didn't have to lock the
   11091  * ressource ...
   11092  */
   11093 static int
   11094 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11095 {
   11096 	struct wm_softc *sc = device_private(dev);
   11097 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11098 	int rv;
   11099 
   11100 	if (sc->phy.acquire(sc)) {
   11101 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11102 		return -1;
   11103 	}
   11104 
   11105 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11106 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11107 		    || (reg == 31)) ? 1 : phy;
   11108 	/* Page 800 works differently than the rest so it has its own func */
   11109 	if (page == BM_WUC_PAGE) {
   11110 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11111 		goto release;
   11112 	}
   11113 
   11114 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11115 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11116 		    && (sc->sc_type != WM_T_82583))
   11117 			rv = wm_gmii_mdic_writereg(dev, phy,
   11118 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11119 		else
   11120 			rv = wm_gmii_mdic_writereg(dev, phy,
   11121 			    BME1000_PHY_PAGE_SELECT, page);
   11122 		if (rv != 0)
   11123 			goto release;
   11124 	}
   11125 
   11126 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11127 
   11128 release:
   11129 	sc->phy.release(sc);
   11130 	return rv;
   11131 }
   11132 
   11133 /*
   11134  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11135  *  @dev: pointer to the HW structure
   11136  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11137  *
   11138  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11139  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11140  */
   11141 static int
   11142 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11143 {
   11144 	uint16_t temp;
   11145 	int rv;
   11146 
   11147 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11148 		device_xname(dev), __func__));
   11149 
   11150 	if (!phy_regp)
   11151 		return -1;
   11152 
   11153 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11154 
   11155 	/* Select Port Control Registers page */
   11156 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11157 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11158 	if (rv != 0)
   11159 		return rv;
   11160 
   11161 	/* Read WUCE and save it */
   11162 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11163 	if (rv != 0)
   11164 		return rv;
   11165 
   11166 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11167 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11168 	 */
   11169 	temp = *phy_regp;
   11170 	temp |= BM_WUC_ENABLE_BIT;
   11171 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11172 
   11173 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11174 		return rv;
   11175 
   11176 	/* Select Host Wakeup Registers page - caller now able to write
   11177 	 * registers on the Wakeup registers page
   11178 	 */
   11179 	return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11180 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11181 }
   11182 
   11183 /*
   11184  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11185  *  @dev: pointer to the HW structure
   11186  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11187  *
   11188  *  Restore BM_WUC_ENABLE_REG to its original value.
   11189  *
   11190  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11191  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11192  *  caller.
   11193  */
   11194 static int
   11195 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11196 {
   11197 
   11198 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11199 		device_xname(dev), __func__));
   11200 
   11201 	if (!phy_regp)
   11202 		return -1;
   11203 
   11204 	/* Select Port Control Registers page */
   11205 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11206 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11207 
   11208 	/* Restore 769.17 to its original value */
   11209 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11210 
   11211 	return 0;
   11212 }
   11213 
   11214 /*
   11215  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11216  *  @sc: pointer to the HW structure
   11217  *  @offset: register offset to be read or written
   11218  *  @val: pointer to the data to read or write
   11219  *  @rd: determines if operation is read or write
   11220  *  @page_set: BM_WUC_PAGE already set and access enabled
   11221  *
   11222  *  Read the PHY register at offset and store the retrieved information in
   11223  *  data, or write data to PHY register at offset.  Note the procedure to
   11224  *  access the PHY wakeup registers is different than reading the other PHY
   11225  *  registers. It works as such:
   11226  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11227  *  2) Set page to 800 for host (801 if we were manageability)
   11228  *  3) Write the address using the address opcode (0x11)
   11229  *  4) Read or write the data using the data opcode (0x12)
   11230  *  5) Restore 769.17.2 to its original value
   11231  *
   11232  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11233  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11234  *
   11235  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11236  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11237  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11238  */
   11239 static int
   11240 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11241 	bool page_set)
   11242 {
   11243 	struct wm_softc *sc = device_private(dev);
   11244 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11245 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11246 	uint16_t wuce;
   11247 	int rv = 0;
   11248 
   11249 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11250 		device_xname(dev), __func__));
   11251 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11252 	if ((sc->sc_type == WM_T_PCH)
   11253 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11254 		device_printf(dev,
   11255 		    "Attempting to access page %d while gig enabled.\n", page);
   11256 	}
   11257 
   11258 	if (!page_set) {
   11259 		/* Enable access to PHY wakeup registers */
   11260 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11261 		if (rv != 0) {
   11262 			device_printf(dev,
   11263 			    "%s: Could not enable PHY wakeup reg access\n",
   11264 			    __func__);
   11265 			return rv;
   11266 		}
   11267 	}
   11268 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11269 		device_xname(sc->sc_dev), __func__, page, regnum));
   11270 
   11271 	/*
   11272 	 * 2) Access PHY wakeup register.
   11273 	 * See wm_access_phy_wakeup_reg_bm.
   11274 	 */
   11275 
   11276 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11277 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11278 	if (rv != 0)
   11279 		return rv;
   11280 
   11281 	if (rd) {
   11282 		/* Read the Wakeup register page value using opcode 0x12 */
   11283 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11284 	} else {
   11285 		/* Write the Wakeup register page value using opcode 0x12 */
   11286 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11287 	}
   11288 	if (rv != 0)
   11289 		return rv;
   11290 
   11291 	if (!page_set)
   11292 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11293 
   11294 	return rv;
   11295 }
   11296 
   11297 /*
   11298  * wm_gmii_hv_readreg:	[mii interface function]
   11299  *
   11300  *	Read a PHY register on the kumeran
   11301  * This could be handled by the PHY layer if we didn't have to lock the
   11302  * ressource ...
   11303  */
   11304 static int
   11305 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11306 {
   11307 	struct wm_softc *sc = device_private(dev);
   11308 	int rv;
   11309 
   11310 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11311 		device_xname(dev), __func__));
   11312 	if (sc->phy.acquire(sc)) {
   11313 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11314 		return -1;
   11315 	}
   11316 
   11317 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11318 	sc->phy.release(sc);
   11319 	return rv;
   11320 }
   11321 
   11322 static int
   11323 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11324 {
   11325 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11326 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11327 	int rv;
   11328 
   11329 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11330 
   11331 	/* Page 800 works differently than the rest so it has its own func */
   11332 	if (page == BM_WUC_PAGE)
   11333 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11334 
   11335 	/*
   11336 	 * Lower than page 768 works differently than the rest so it has its
   11337 	 * own func
   11338 	 */
   11339 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11340 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11341 		return -1;
   11342 	}
   11343 
   11344 	/*
   11345 	 * XXX I21[789] documents say that the SMBus Address register is at
   11346 	 * PHY address 01, Page 0 (not 768), Register 26.
   11347 	 */
   11348 	if (page == HV_INTC_FC_PAGE_START)
   11349 		page = 0;
   11350 
   11351 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11352 		rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11353 		    page << BME1000_PAGE_SHIFT);
   11354 		if (rv != 0)
   11355 			return rv;
   11356 	}
   11357 
   11358 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11359 }
   11360 
   11361 /*
   11362  * wm_gmii_hv_writereg:	[mii interface function]
   11363  *
   11364  *	Write a PHY register on the kumeran.
   11365  * This could be handled by the PHY layer if we didn't have to lock the
   11366  * ressource ...
   11367  */
   11368 static int
   11369 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11370 {
   11371 	struct wm_softc *sc = device_private(dev);
   11372 	int rv;
   11373 
   11374 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11375 		device_xname(dev), __func__));
   11376 
   11377 	if (sc->phy.acquire(sc)) {
   11378 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11379 		return -1;
   11380 	}
   11381 
   11382 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11383 	sc->phy.release(sc);
   11384 
   11385 	return rv;
   11386 }
   11387 
   11388 static int
   11389 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11390 {
   11391 	struct wm_softc *sc = device_private(dev);
   11392 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11393 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11394 	int rv;
   11395 
   11396 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11397 
   11398 	/* Page 800 works differently than the rest so it has its own func */
   11399 	if (page == BM_WUC_PAGE)
   11400 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11401 		    false);
   11402 
   11403 	/*
   11404 	 * Lower than page 768 works differently than the rest so it has its
   11405 	 * own func
   11406 	 */
   11407 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11408 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11409 		return -1;
   11410 	}
   11411 
   11412 	{
   11413 		/*
   11414 		 * XXX I21[789] documents say that the SMBus Address register
   11415 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11416 		 */
   11417 		if (page == HV_INTC_FC_PAGE_START)
   11418 			page = 0;
   11419 
   11420 		/*
   11421 		 * XXX Workaround MDIO accesses being disabled after entering
   11422 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11423 		 * register is set)
   11424 		 */
   11425 		if (sc->sc_phytype == WMPHY_82578) {
   11426 			struct mii_softc *child;
   11427 
   11428 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11429 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11430 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11431 			    && ((val & (1 << 11)) != 0)) {
   11432 				device_printf(dev, "XXX need workaround\n");
   11433 			}
   11434 		}
   11435 
   11436 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11437 			rv = wm_gmii_mdic_writereg(dev, 1,
   11438 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11439 			if (rv != 0)
   11440 				return rv;
   11441 		}
   11442 	}
   11443 
   11444 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11445 }
   11446 
   11447 /*
   11448  * wm_gmii_82580_readreg:	[mii interface function]
   11449  *
   11450  *	Read a PHY register on the 82580 and I350.
   11451  * This could be handled by the PHY layer if we didn't have to lock the
   11452  * ressource ...
   11453  */
   11454 static int
   11455 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11456 {
   11457 	struct wm_softc *sc = device_private(dev);
   11458 	int rv;
   11459 
   11460 	if (sc->phy.acquire(sc) != 0) {
   11461 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11462 		return -1;
   11463 	}
   11464 
   11465 #ifdef DIAGNOSTIC
   11466 	if (reg > MII_ADDRMASK) {
   11467 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11468 		    __func__, sc->sc_phytype, reg);
   11469 		reg &= MII_ADDRMASK;
   11470 	}
   11471 #endif
   11472 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11473 
   11474 	sc->phy.release(sc);
   11475 	return rv;
   11476 }
   11477 
   11478 /*
   11479  * wm_gmii_82580_writereg:	[mii interface function]
   11480  *
   11481  *	Write a PHY register on the 82580 and I350.
   11482  * This could be handled by the PHY layer if we didn't have to lock the
   11483  * ressource ...
   11484  */
   11485 static int
   11486 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11487 {
   11488 	struct wm_softc *sc = device_private(dev);
   11489 	int rv;
   11490 
   11491 	if (sc->phy.acquire(sc) != 0) {
   11492 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11493 		return -1;
   11494 	}
   11495 
   11496 #ifdef DIAGNOSTIC
   11497 	if (reg > MII_ADDRMASK) {
   11498 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11499 		    __func__, sc->sc_phytype, reg);
   11500 		reg &= MII_ADDRMASK;
   11501 	}
   11502 #endif
   11503 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11504 
   11505 	sc->phy.release(sc);
   11506 	return rv;
   11507 }
   11508 
   11509 /*
   11510  * wm_gmii_gs40g_readreg:	[mii interface function]
   11511  *
   11512  *	Read a PHY register on the I2100 and I211.
   11513  * This could be handled by the PHY layer if we didn't have to lock the
   11514  * ressource ...
   11515  */
   11516 static int
   11517 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11518 {
   11519 	struct wm_softc *sc = device_private(dev);
   11520 	int page, offset;
   11521 	int rv;
   11522 
   11523 	/* Acquire semaphore */
   11524 	if (sc->phy.acquire(sc)) {
   11525 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11526 		return -1;
   11527 	}
   11528 
   11529 	/* Page select */
   11530 	page = reg >> GS40G_PAGE_SHIFT;
   11531 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11532 	if (rv != 0)
   11533 		goto release;
   11534 
   11535 	/* Read reg */
   11536 	offset = reg & GS40G_OFFSET_MASK;
   11537 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11538 
   11539 release:
   11540 	sc->phy.release(sc);
   11541 	return rv;
   11542 }
   11543 
   11544 /*
   11545  * wm_gmii_gs40g_writereg:	[mii interface function]
   11546  *
   11547  *	Write a PHY register on the I210 and I211.
   11548  * This could be handled by the PHY layer if we didn't have to lock the
   11549  * ressource ...
   11550  */
   11551 static int
   11552 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11553 {
   11554 	struct wm_softc *sc = device_private(dev);
   11555 	uint16_t page;
   11556 	int offset, rv;
   11557 
   11558 	/* Acquire semaphore */
   11559 	if (sc->phy.acquire(sc)) {
   11560 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11561 		return -1;
   11562 	}
   11563 
   11564 	/* Page select */
   11565 	page = reg >> GS40G_PAGE_SHIFT;
   11566 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11567 	if (rv != 0)
   11568 		goto release;
   11569 
   11570 	/* Write reg */
   11571 	offset = reg & GS40G_OFFSET_MASK;
   11572 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11573 
   11574 release:
   11575 	/* Release semaphore */
   11576 	sc->phy.release(sc);
   11577 	return rv;
   11578 }
   11579 
   11580 /*
   11581  * wm_gmii_statchg:	[mii interface function]
   11582  *
   11583  *	Callback from MII layer when media changes.
   11584  */
   11585 static void
   11586 wm_gmii_statchg(struct ifnet *ifp)
   11587 {
   11588 	struct wm_softc *sc = ifp->if_softc;
   11589 	struct mii_data *mii = &sc->sc_mii;
   11590 
   11591 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11592 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11593 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11594 
   11595 	/* Get flow control negotiation result. */
   11596 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11597 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11598 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11599 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11600 	}
   11601 
   11602 	if (sc->sc_flowflags & IFM_FLOW) {
   11603 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11604 			sc->sc_ctrl |= CTRL_TFCE;
   11605 			sc->sc_fcrtl |= FCRTL_XONE;
   11606 		}
   11607 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11608 			sc->sc_ctrl |= CTRL_RFCE;
   11609 	}
   11610 
   11611 	if (mii->mii_media_active & IFM_FDX) {
   11612 		DPRINTF(WM_DEBUG_LINK,
   11613 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11614 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11615 	} else {
   11616 		DPRINTF(WM_DEBUG_LINK,
   11617 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11618 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11619 	}
   11620 
   11621 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11622 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11623 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11624 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11625 	if (sc->sc_type == WM_T_80003) {
   11626 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   11627 		case IFM_1000_T:
   11628 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11629 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11630 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11631 			break;
   11632 		default:
   11633 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11634 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11635 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11636 			break;
   11637 		}
   11638 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11639 	}
   11640 }
   11641 
   11642 /* kumeran related (80003, ICH* and PCH*) */
   11643 
   11644 /*
   11645  * wm_kmrn_readreg:
   11646  *
   11647  *	Read a kumeran register
   11648  */
   11649 static int
   11650 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11651 {
   11652 	int rv;
   11653 
   11654 	if (sc->sc_type == WM_T_80003)
   11655 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11656 	else
   11657 		rv = sc->phy.acquire(sc);
   11658 	if (rv != 0) {
   11659 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11660 		    __func__);
   11661 		return rv;
   11662 	}
   11663 
   11664 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11665 
   11666 	if (sc->sc_type == WM_T_80003)
   11667 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11668 	else
   11669 		sc->phy.release(sc);
   11670 
   11671 	return rv;
   11672 }
   11673 
   11674 static int
   11675 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11676 {
   11677 
   11678 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11679 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11680 	    KUMCTRLSTA_REN);
   11681 	CSR_WRITE_FLUSH(sc);
   11682 	delay(2);
   11683 
   11684 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11685 
   11686 	return 0;
   11687 }
   11688 
   11689 /*
   11690  * wm_kmrn_writereg:
   11691  *
   11692  *	Write a kumeran register
   11693  */
   11694 static int
   11695 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11696 {
   11697 	int rv;
   11698 
   11699 	if (sc->sc_type == WM_T_80003)
   11700 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11701 	else
   11702 		rv = sc->phy.acquire(sc);
   11703 	if (rv != 0) {
   11704 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11705 		    __func__);
   11706 		return rv;
   11707 	}
   11708 
   11709 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11710 
   11711 	if (sc->sc_type == WM_T_80003)
   11712 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11713 	else
   11714 		sc->phy.release(sc);
   11715 
   11716 	return rv;
   11717 }
   11718 
   11719 static int
   11720 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11721 {
   11722 
   11723 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11724 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11725 
   11726 	return 0;
   11727 }
   11728 
   11729 /*
   11730  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11731  * This access method is different from IEEE MMD.
   11732  */
   11733 static int
   11734 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11735 {
   11736 	struct wm_softc *sc = device_private(dev);
   11737 	int rv;
   11738 
   11739 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11740 	if (rv != 0)
   11741 		return rv;
   11742 
   11743 	if (rd)
   11744 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11745 	else
   11746 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11747 	return rv;
   11748 }
   11749 
   11750 static int
   11751 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11752 {
   11753 
   11754 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11755 }
   11756 
   11757 static int
   11758 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11759 {
   11760 
   11761 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11762 }
   11763 
   11764 /* SGMII related */
   11765 
   11766 /*
   11767  * wm_sgmii_uses_mdio
   11768  *
   11769  * Check whether the transaction is to the internal PHY or the external
   11770  * MDIO interface. Return true if it's MDIO.
   11771  */
   11772 static bool
   11773 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11774 {
   11775 	uint32_t reg;
   11776 	bool ismdio = false;
   11777 
   11778 	switch (sc->sc_type) {
   11779 	case WM_T_82575:
   11780 	case WM_T_82576:
   11781 		reg = CSR_READ(sc, WMREG_MDIC);
   11782 		ismdio = ((reg & MDIC_DEST) != 0);
   11783 		break;
   11784 	case WM_T_82580:
   11785 	case WM_T_I350:
   11786 	case WM_T_I354:
   11787 	case WM_T_I210:
   11788 	case WM_T_I211:
   11789 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11790 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11791 		break;
   11792 	default:
   11793 		break;
   11794 	}
   11795 
   11796 	return ismdio;
   11797 }
   11798 
   11799 /*
   11800  * wm_sgmii_readreg:	[mii interface function]
   11801  *
   11802  *	Read a PHY register on the SGMII
   11803  * This could be handled by the PHY layer if we didn't have to lock the
   11804  * ressource ...
   11805  */
   11806 static int
   11807 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11808 {
   11809 	struct wm_softc *sc = device_private(dev);
   11810 	int rv;
   11811 
   11812 	if (sc->phy.acquire(sc)) {
   11813 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11814 		return -1;
   11815 	}
   11816 
   11817 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11818 
   11819 	sc->phy.release(sc);
   11820 	return rv;
   11821 }
   11822 
   11823 static int
   11824 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11825 {
   11826 	struct wm_softc *sc = device_private(dev);
   11827 	uint32_t i2ccmd;
   11828 	int i, rv = 0;
   11829 
   11830 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11831 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11832 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11833 
   11834 	/* Poll the ready bit */
   11835 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11836 		delay(50);
   11837 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11838 		if (i2ccmd & I2CCMD_READY)
   11839 			break;
   11840 	}
   11841 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11842 		device_printf(dev, "I2CCMD Read did not complete\n");
   11843 		rv = ETIMEDOUT;
   11844 	}
   11845 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11846 		if (!sc->phy.no_errprint)
   11847 			device_printf(dev, "I2CCMD Error bit set\n");
   11848 		rv = EIO;
   11849 	}
   11850 
   11851 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11852 
   11853 	return rv;
   11854 }
   11855 
   11856 /*
   11857  * wm_sgmii_writereg:	[mii interface function]
   11858  *
   11859  *	Write a PHY register on the SGMII.
   11860  * This could be handled by the PHY layer if we didn't have to lock the
   11861  * ressource ...
   11862  */
   11863 static int
   11864 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11865 {
   11866 	struct wm_softc *sc = device_private(dev);
   11867 	int rv;
   11868 
   11869 	if (sc->phy.acquire(sc) != 0) {
   11870 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11871 		return -1;
   11872 	}
   11873 
   11874 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11875 
   11876 	sc->phy.release(sc);
   11877 
   11878 	return rv;
   11879 }
   11880 
   11881 static int
   11882 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11883 {
   11884 	struct wm_softc *sc = device_private(dev);
   11885 	uint32_t i2ccmd;
   11886 	uint16_t swapdata;
   11887 	int rv = 0;
   11888 	int i;
   11889 
   11890 	/* Swap the data bytes for the I2C interface */
   11891 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11892 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11893 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11894 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11895 
   11896 	/* Poll the ready bit */
   11897 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11898 		delay(50);
   11899 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11900 		if (i2ccmd & I2CCMD_READY)
   11901 			break;
   11902 	}
   11903 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11904 		device_printf(dev, "I2CCMD Write did not complete\n");
   11905 		rv = ETIMEDOUT;
   11906 	}
   11907 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11908 		device_printf(dev, "I2CCMD Error bit set\n");
   11909 		rv = EIO;
   11910 	}
   11911 
   11912 	return rv;
   11913 }
   11914 
   11915 /* TBI related */
   11916 
   11917 static bool
   11918 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11919 {
   11920 	bool sig;
   11921 
   11922 	sig = ctrl & CTRL_SWDPIN(1);
   11923 
   11924 	/*
   11925 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11926 	 * detect a signal, 1 if they don't.
   11927 	 */
   11928 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11929 		sig = !sig;
   11930 
   11931 	return sig;
   11932 }
   11933 
   11934 /*
   11935  * wm_tbi_mediainit:
   11936  *
   11937  *	Initialize media for use on 1000BASE-X devices.
   11938  */
   11939 static void
   11940 wm_tbi_mediainit(struct wm_softc *sc)
   11941 {
   11942 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11943 	const char *sep = "";
   11944 
   11945 	if (sc->sc_type < WM_T_82543)
   11946 		sc->sc_tipg = TIPG_WM_DFLT;
   11947 	else
   11948 		sc->sc_tipg = TIPG_LG_DFLT;
   11949 
   11950 	sc->sc_tbi_serdes_anegticks = 5;
   11951 
   11952 	/* Initialize our media structures */
   11953 	sc->sc_mii.mii_ifp = ifp;
   11954 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11955 
   11956 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11957 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11958 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11959 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11960 	else
   11961 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11962 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11963 
   11964 	/*
   11965 	 * SWD Pins:
   11966 	 *
   11967 	 *	0 = Link LED (output)
   11968 	 *	1 = Loss Of Signal (input)
   11969 	 */
   11970 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11971 
   11972 	/* XXX Perhaps this is only for TBI */
   11973 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11974 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11975 
   11976 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11977 		sc->sc_ctrl &= ~CTRL_LRST;
   11978 
   11979 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11980 
   11981 #define	ADD(ss, mm, dd)							\
   11982 do {									\
   11983 	aprint_normal("%s%s", sep, ss);					\
   11984 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11985 	sep = ", ";							\
   11986 } while (/*CONSTCOND*/0)
   11987 
   11988 	aprint_normal_dev(sc->sc_dev, "");
   11989 
   11990 	if (sc->sc_type == WM_T_I354) {
   11991 		uint32_t status;
   11992 
   11993 		status = CSR_READ(sc, WMREG_STATUS);
   11994 		if (((status & STATUS_2P5_SKU) != 0)
   11995 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11996 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11997 		} else
   11998 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11999 	} else if (sc->sc_type == WM_T_82545) {
   12000 		/* Only 82545 is LX (XXX except SFP) */
   12001 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12002 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12003 	} else if (sc->sc_sfptype != 0) {
   12004 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12005 		switch (sc->sc_sfptype) {
   12006 		default:
   12007 		case SFF_SFP_ETH_FLAGS_1000SX:
   12008 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12009 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12010 			break;
   12011 		case SFF_SFP_ETH_FLAGS_1000LX:
   12012 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12013 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12014 			break;
   12015 		case SFF_SFP_ETH_FLAGS_1000CX:
   12016 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12017 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12018 			break;
   12019 		case SFF_SFP_ETH_FLAGS_1000T:
   12020 			ADD("1000baseT", IFM_1000_T, 0);
   12021 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12022 			break;
   12023 		case SFF_SFP_ETH_FLAGS_100FX:
   12024 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12025 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12026 			break;
   12027 		}
   12028 	} else {
   12029 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12030 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12031 	}
   12032 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12033 	aprint_normal("\n");
   12034 
   12035 #undef ADD
   12036 
   12037 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12038 }
   12039 
   12040 /*
   12041  * wm_tbi_mediachange:	[ifmedia interface function]
   12042  *
   12043  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12044  */
   12045 static int
   12046 wm_tbi_mediachange(struct ifnet *ifp)
   12047 {
   12048 	struct wm_softc *sc = ifp->if_softc;
   12049 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12050 	uint32_t status, ctrl;
   12051 	bool signal;
   12052 	int i;
   12053 
   12054 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12055 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12056 		/* XXX need some work for >= 82571 and < 82575 */
   12057 		if (sc->sc_type < WM_T_82575)
   12058 			return 0;
   12059 	}
   12060 
   12061 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12062 	    || (sc->sc_type >= WM_T_82575))
   12063 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12064 
   12065 	sc->sc_ctrl &= ~CTRL_LRST;
   12066 	sc->sc_txcw = TXCW_ANE;
   12067 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12068 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12069 	else if (ife->ifm_media & IFM_FDX)
   12070 		sc->sc_txcw |= TXCW_FD;
   12071 	else
   12072 		sc->sc_txcw |= TXCW_HD;
   12073 
   12074 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12075 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12076 
   12077 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12078 		device_xname(sc->sc_dev), sc->sc_txcw));
   12079 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12080 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12081 	CSR_WRITE_FLUSH(sc);
   12082 	delay(1000);
   12083 
   12084 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12085 	signal = wm_tbi_havesignal(sc, ctrl);
   12086 
   12087 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   12088 		signal));
   12089 
   12090 	if (signal) {
   12091 		/* Have signal; wait for the link to come up. */
   12092 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12093 			delay(10000);
   12094 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12095 				break;
   12096 		}
   12097 
   12098 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   12099 			device_xname(sc->sc_dev), i));
   12100 
   12101 		status = CSR_READ(sc, WMREG_STATUS);
   12102 		DPRINTF(WM_DEBUG_LINK,
   12103 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   12104 			device_xname(sc->sc_dev), status, STATUS_LU));
   12105 		if (status & STATUS_LU) {
   12106 			/* Link is up. */
   12107 			DPRINTF(WM_DEBUG_LINK,
   12108 			    ("%s: LINK: set media -> link up %s\n",
   12109 				device_xname(sc->sc_dev),
   12110 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12111 
   12112 			/*
   12113 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12114 			 * so we should update sc->sc_ctrl
   12115 			 */
   12116 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12117 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12118 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12119 			if (status & STATUS_FD)
   12120 				sc->sc_tctl |=
   12121 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12122 			else
   12123 				sc->sc_tctl |=
   12124 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12125 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12126 				sc->sc_fcrtl |= FCRTL_XONE;
   12127 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12128 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12129 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12130 			sc->sc_tbi_linkup = 1;
   12131 		} else {
   12132 			if (i == WM_LINKUP_TIMEOUT)
   12133 				wm_check_for_link(sc);
   12134 			/* Link is down. */
   12135 			DPRINTF(WM_DEBUG_LINK,
   12136 			    ("%s: LINK: set media -> link down\n",
   12137 				device_xname(sc->sc_dev)));
   12138 			sc->sc_tbi_linkup = 0;
   12139 		}
   12140 	} else {
   12141 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   12142 			device_xname(sc->sc_dev)));
   12143 		sc->sc_tbi_linkup = 0;
   12144 	}
   12145 
   12146 	wm_tbi_serdes_set_linkled(sc);
   12147 
   12148 	return 0;
   12149 }
   12150 
   12151 /*
   12152  * wm_tbi_mediastatus:	[ifmedia interface function]
   12153  *
   12154  *	Get the current interface media status on a 1000BASE-X device.
   12155  */
   12156 static void
   12157 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12158 {
   12159 	struct wm_softc *sc = ifp->if_softc;
   12160 	uint32_t ctrl, status;
   12161 
   12162 	ifmr->ifm_status = IFM_AVALID;
   12163 	ifmr->ifm_active = IFM_ETHER;
   12164 
   12165 	status = CSR_READ(sc, WMREG_STATUS);
   12166 	if ((status & STATUS_LU) == 0) {
   12167 		ifmr->ifm_active |= IFM_NONE;
   12168 		return;
   12169 	}
   12170 
   12171 	ifmr->ifm_status |= IFM_ACTIVE;
   12172 	/* Only 82545 is LX */
   12173 	if (sc->sc_type == WM_T_82545)
   12174 		ifmr->ifm_active |= IFM_1000_LX;
   12175 	else
   12176 		ifmr->ifm_active |= IFM_1000_SX;
   12177 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   12178 		ifmr->ifm_active |= IFM_FDX;
   12179 	else
   12180 		ifmr->ifm_active |= IFM_HDX;
   12181 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12182 	if (ctrl & CTRL_RFCE)
   12183 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   12184 	if (ctrl & CTRL_TFCE)
   12185 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   12186 }
   12187 
   12188 /* XXX TBI only */
   12189 static int
   12190 wm_check_for_link(struct wm_softc *sc)
   12191 {
   12192 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12193 	uint32_t rxcw;
   12194 	uint32_t ctrl;
   12195 	uint32_t status;
   12196 	bool signal;
   12197 
   12198 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   12199 		device_xname(sc->sc_dev), __func__));
   12200 
   12201 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12202 		/* XXX need some work for >= 82571 */
   12203 		if (sc->sc_type >= WM_T_82571) {
   12204 			sc->sc_tbi_linkup = 1;
   12205 			return 0;
   12206 		}
   12207 	}
   12208 
   12209 	rxcw = CSR_READ(sc, WMREG_RXCW);
   12210 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12211 	status = CSR_READ(sc, WMREG_STATUS);
   12212 	signal = wm_tbi_havesignal(sc, ctrl);
   12213 
   12214 	DPRINTF(WM_DEBUG_LINK,
   12215 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   12216 		device_xname(sc->sc_dev), __func__, signal,
   12217 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   12218 
   12219 	/*
   12220 	 * SWDPIN   LU RXCW
   12221 	 *	0    0	  0
   12222 	 *	0    0	  1	(should not happen)
   12223 	 *	0    1	  0	(should not happen)
   12224 	 *	0    1	  1	(should not happen)
   12225 	 *	1    0	  0	Disable autonego and force linkup
   12226 	 *	1    0	  1	got /C/ but not linkup yet
   12227 	 *	1    1	  0	(linkup)
   12228 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12229 	 *
   12230 	 */
   12231 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12232 		DPRINTF(WM_DEBUG_LINK,
   12233 		    ("%s: %s: force linkup and fullduplex\n",
   12234 			device_xname(sc->sc_dev), __func__));
   12235 		sc->sc_tbi_linkup = 0;
   12236 		/* Disable auto-negotiation in the TXCW register */
   12237 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12238 
   12239 		/*
   12240 		 * Force link-up and also force full-duplex.
   12241 		 *
   12242 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12243 		 * so we should update sc->sc_ctrl
   12244 		 */
   12245 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12246 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12247 	} else if (((status & STATUS_LU) != 0)
   12248 	    && ((rxcw & RXCW_C) != 0)
   12249 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12250 		sc->sc_tbi_linkup = 1;
   12251 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12252 			device_xname(sc->sc_dev),
   12253 			__func__));
   12254 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12255 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12256 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12257 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   12258 			device_xname(sc->sc_dev), __func__));
   12259 	} else {
   12260 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12261 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12262 			status));
   12263 	}
   12264 
   12265 	return 0;
   12266 }
   12267 
   12268 /*
   12269  * wm_tbi_tick:
   12270  *
   12271  *	Check the link on TBI devices.
   12272  *	This function acts as mii_tick().
   12273  */
   12274 static void
   12275 wm_tbi_tick(struct wm_softc *sc)
   12276 {
   12277 	struct mii_data *mii = &sc->sc_mii;
   12278 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12279 	uint32_t status;
   12280 
   12281 	KASSERT(WM_CORE_LOCKED(sc));
   12282 
   12283 	status = CSR_READ(sc, WMREG_STATUS);
   12284 
   12285 	/* XXX is this needed? */
   12286 	(void)CSR_READ(sc, WMREG_RXCW);
   12287 	(void)CSR_READ(sc, WMREG_CTRL);
   12288 
   12289 	/* set link status */
   12290 	if ((status & STATUS_LU) == 0) {
   12291 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12292 			device_xname(sc->sc_dev)));
   12293 		sc->sc_tbi_linkup = 0;
   12294 	} else if (sc->sc_tbi_linkup == 0) {
   12295 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12296 			device_xname(sc->sc_dev),
   12297 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12298 		sc->sc_tbi_linkup = 1;
   12299 		sc->sc_tbi_serdes_ticks = 0;
   12300 	}
   12301 
   12302 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12303 		goto setled;
   12304 
   12305 	if ((status & STATUS_LU) == 0) {
   12306 		sc->sc_tbi_linkup = 0;
   12307 		/* If the timer expired, retry autonegotiation */
   12308 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12309 		    && (++sc->sc_tbi_serdes_ticks
   12310 			>= sc->sc_tbi_serdes_anegticks)) {
   12311 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12312 				device_xname(sc->sc_dev), __func__));
   12313 			sc->sc_tbi_serdes_ticks = 0;
   12314 			/*
   12315 			 * Reset the link, and let autonegotiation do
   12316 			 * its thing
   12317 			 */
   12318 			sc->sc_ctrl |= CTRL_LRST;
   12319 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12320 			CSR_WRITE_FLUSH(sc);
   12321 			delay(1000);
   12322 			sc->sc_ctrl &= ~CTRL_LRST;
   12323 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12324 			CSR_WRITE_FLUSH(sc);
   12325 			delay(1000);
   12326 			CSR_WRITE(sc, WMREG_TXCW,
   12327 			    sc->sc_txcw & ~TXCW_ANE);
   12328 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12329 		}
   12330 	}
   12331 
   12332 setled:
   12333 	wm_tbi_serdes_set_linkled(sc);
   12334 }
   12335 
   12336 /* SERDES related */
   12337 static void
   12338 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12339 {
   12340 	uint32_t reg;
   12341 
   12342 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12343 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12344 		return;
   12345 
   12346 	/* Enable PCS to turn on link */
   12347 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12348 	reg |= PCS_CFG_PCS_EN;
   12349 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12350 
   12351 	/* Power up the laser */
   12352 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12353 	reg &= ~CTRL_EXT_SWDPIN(3);
   12354 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12355 
   12356 	/* Flush the write to verify completion */
   12357 	CSR_WRITE_FLUSH(sc);
   12358 	delay(1000);
   12359 }
   12360 
   12361 static int
   12362 wm_serdes_mediachange(struct ifnet *ifp)
   12363 {
   12364 	struct wm_softc *sc = ifp->if_softc;
   12365 	bool pcs_autoneg = true; /* XXX */
   12366 	uint32_t ctrl_ext, pcs_lctl, reg;
   12367 
   12368 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12369 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12370 		return 0;
   12371 
   12372 	/* XXX Currently, this function is not called on 8257[12] */
   12373 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12374 	    || (sc->sc_type >= WM_T_82575))
   12375 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12376 
   12377 	/* Power on the sfp cage if present */
   12378 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12379 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12380 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   12381 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12382 
   12383 	sc->sc_ctrl |= CTRL_SLU;
   12384 
   12385 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   12386 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12387 
   12388 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12389 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12390 	case CTRL_EXT_LINK_MODE_SGMII:
   12391 		/* SGMII mode lets the phy handle forcing speed/duplex */
   12392 		pcs_autoneg = true;
   12393 		/* Autoneg time out should be disabled for SGMII mode */
   12394 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12395 		break;
   12396 	case CTRL_EXT_LINK_MODE_1000KX:
   12397 		pcs_autoneg = false;
   12398 		/* FALLTHROUGH */
   12399 	default:
   12400 		if ((sc->sc_type == WM_T_82575)
   12401 		    || (sc->sc_type == WM_T_82576)) {
   12402 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12403 				pcs_autoneg = false;
   12404 		}
   12405 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12406 		    | CTRL_FRCFDX;
   12407 
   12408 		/* Set speed of 1000/Full if speed/duplex is forced */
   12409 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12410 	}
   12411 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12412 
   12413 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   12414 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   12415 
   12416 	if (pcs_autoneg) {
   12417 		/* Set PCS register for autoneg */
   12418 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12419 
   12420 		/* Disable force flow control for autoneg */
   12421 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12422 
   12423 		/* Configure flow control advertisement for autoneg */
   12424 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12425 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12426 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12427 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12428 	} else
   12429 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12430 
   12431 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12432 
   12433 	return 0;
   12434 }
   12435 
   12436 static void
   12437 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12438 {
   12439 	struct wm_softc *sc = ifp->if_softc;
   12440 	struct mii_data *mii = &sc->sc_mii;
   12441 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12442 	uint32_t pcs_adv, pcs_lpab, reg;
   12443 
   12444 	ifmr->ifm_status = IFM_AVALID;
   12445 	ifmr->ifm_active = IFM_ETHER;
   12446 
   12447 	/* Check PCS */
   12448 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12449 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12450 		ifmr->ifm_active |= IFM_NONE;
   12451 		sc->sc_tbi_linkup = 0;
   12452 		goto setled;
   12453 	}
   12454 
   12455 	sc->sc_tbi_linkup = 1;
   12456 	ifmr->ifm_status |= IFM_ACTIVE;
   12457 	if (sc->sc_type == WM_T_I354) {
   12458 		uint32_t status;
   12459 
   12460 		status = CSR_READ(sc, WMREG_STATUS);
   12461 		if (((status & STATUS_2P5_SKU) != 0)
   12462 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12463 			ifmr->ifm_active |= IFM_2500_KX;
   12464 		} else
   12465 			ifmr->ifm_active |= IFM_1000_KX;
   12466 	} else {
   12467 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12468 		case PCS_LSTS_SPEED_10:
   12469 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12470 			break;
   12471 		case PCS_LSTS_SPEED_100:
   12472 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12473 			break;
   12474 		case PCS_LSTS_SPEED_1000:
   12475 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12476 			break;
   12477 		default:
   12478 			device_printf(sc->sc_dev, "Unknown speed\n");
   12479 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12480 			break;
   12481 		}
   12482 	}
   12483 	if ((reg & PCS_LSTS_FDX) != 0)
   12484 		ifmr->ifm_active |= IFM_FDX;
   12485 	else
   12486 		ifmr->ifm_active |= IFM_HDX;
   12487 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12488 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12489 		/* Check flow */
   12490 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12491 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12492 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12493 			goto setled;
   12494 		}
   12495 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12496 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12497 		DPRINTF(WM_DEBUG_LINK,
   12498 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12499 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12500 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12501 			mii->mii_media_active |= IFM_FLOW
   12502 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12503 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12504 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12505 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12506 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12507 			mii->mii_media_active |= IFM_FLOW
   12508 			    | IFM_ETH_TXPAUSE;
   12509 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12510 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12511 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12512 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12513 			mii->mii_media_active |= IFM_FLOW
   12514 			    | IFM_ETH_RXPAUSE;
   12515 		}
   12516 	}
   12517 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12518 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12519 setled:
   12520 	wm_tbi_serdes_set_linkled(sc);
   12521 }
   12522 
   12523 /*
   12524  * wm_serdes_tick:
   12525  *
   12526  *	Check the link on serdes devices.
   12527  */
   12528 static void
   12529 wm_serdes_tick(struct wm_softc *sc)
   12530 {
   12531 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12532 	struct mii_data *mii = &sc->sc_mii;
   12533 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12534 	uint32_t reg;
   12535 
   12536 	KASSERT(WM_CORE_LOCKED(sc));
   12537 
   12538 	mii->mii_media_status = IFM_AVALID;
   12539 	mii->mii_media_active = IFM_ETHER;
   12540 
   12541 	/* Check PCS */
   12542 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12543 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12544 		mii->mii_media_status |= IFM_ACTIVE;
   12545 		sc->sc_tbi_linkup = 1;
   12546 		sc->sc_tbi_serdes_ticks = 0;
   12547 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12548 		if ((reg & PCS_LSTS_FDX) != 0)
   12549 			mii->mii_media_active |= IFM_FDX;
   12550 		else
   12551 			mii->mii_media_active |= IFM_HDX;
   12552 	} else {
   12553 		mii->mii_media_status |= IFM_NONE;
   12554 		sc->sc_tbi_linkup = 0;
   12555 		/* If the timer expired, retry autonegotiation */
   12556 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12557 		    && (++sc->sc_tbi_serdes_ticks
   12558 			>= sc->sc_tbi_serdes_anegticks)) {
   12559 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12560 				device_xname(sc->sc_dev), __func__));
   12561 			sc->sc_tbi_serdes_ticks = 0;
   12562 			/* XXX */
   12563 			wm_serdes_mediachange(ifp);
   12564 		}
   12565 	}
   12566 
   12567 	wm_tbi_serdes_set_linkled(sc);
   12568 }
   12569 
   12570 /* SFP related */
   12571 
   12572 static int
   12573 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12574 {
   12575 	uint32_t i2ccmd;
   12576 	int i;
   12577 
   12578 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12579 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12580 
   12581 	/* Poll the ready bit */
   12582 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12583 		delay(50);
   12584 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12585 		if (i2ccmd & I2CCMD_READY)
   12586 			break;
   12587 	}
   12588 	if ((i2ccmd & I2CCMD_READY) == 0)
   12589 		return -1;
   12590 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12591 		return -1;
   12592 
   12593 	*data = i2ccmd & 0x00ff;
   12594 
   12595 	return 0;
   12596 }
   12597 
   12598 static uint32_t
   12599 wm_sfp_get_media_type(struct wm_softc *sc)
   12600 {
   12601 	uint32_t ctrl_ext;
   12602 	uint8_t val = 0;
   12603 	int timeout = 3;
   12604 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12605 	int rv = -1;
   12606 
   12607 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12608 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12609 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12610 	CSR_WRITE_FLUSH(sc);
   12611 
   12612 	/* Read SFP module data */
   12613 	while (timeout) {
   12614 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12615 		if (rv == 0)
   12616 			break;
   12617 		delay(100*1000); /* XXX too big */
   12618 		timeout--;
   12619 	}
   12620 	if (rv != 0)
   12621 		goto out;
   12622 
   12623 	switch (val) {
   12624 	case SFF_SFP_ID_SFF:
   12625 		aprint_normal_dev(sc->sc_dev,
   12626 		    "Module/Connector soldered to board\n");
   12627 		break;
   12628 	case SFF_SFP_ID_SFP:
   12629 		sc->sc_flags |= WM_F_SFP;
   12630 		break;
   12631 	case SFF_SFP_ID_UNKNOWN:
   12632 		goto out;
   12633 	default:
   12634 		break;
   12635 	}
   12636 
   12637 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12638 	if (rv != 0)
   12639 		goto out;
   12640 
   12641 	sc->sc_sfptype = val;
   12642 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12643 		mediatype = WM_MEDIATYPE_SERDES;
   12644 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12645 		sc->sc_flags |= WM_F_SGMII;
   12646 		mediatype = WM_MEDIATYPE_COPPER;
   12647 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12648 		sc->sc_flags |= WM_F_SGMII;
   12649 		mediatype = WM_MEDIATYPE_SERDES;
   12650 	} else {
   12651 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   12652 		    __func__, sc->sc_sfptype);
   12653 		sc->sc_sfptype = 0; /* XXX unknown */
   12654 	}
   12655 
   12656 out:
   12657 	/* Restore I2C interface setting */
   12658 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12659 
   12660 	return mediatype;
   12661 }
   12662 
   12663 /*
   12664  * NVM related.
   12665  * Microwire, SPI (w/wo EERD) and Flash.
   12666  */
   12667 
   12668 /* Both spi and uwire */
   12669 
   12670 /*
   12671  * wm_eeprom_sendbits:
   12672  *
   12673  *	Send a series of bits to the EEPROM.
   12674  */
   12675 static void
   12676 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12677 {
   12678 	uint32_t reg;
   12679 	int x;
   12680 
   12681 	reg = CSR_READ(sc, WMREG_EECD);
   12682 
   12683 	for (x = nbits; x > 0; x--) {
   12684 		if (bits & (1U << (x - 1)))
   12685 			reg |= EECD_DI;
   12686 		else
   12687 			reg &= ~EECD_DI;
   12688 		CSR_WRITE(sc, WMREG_EECD, reg);
   12689 		CSR_WRITE_FLUSH(sc);
   12690 		delay(2);
   12691 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12692 		CSR_WRITE_FLUSH(sc);
   12693 		delay(2);
   12694 		CSR_WRITE(sc, WMREG_EECD, reg);
   12695 		CSR_WRITE_FLUSH(sc);
   12696 		delay(2);
   12697 	}
   12698 }
   12699 
   12700 /*
   12701  * wm_eeprom_recvbits:
   12702  *
   12703  *	Receive a series of bits from the EEPROM.
   12704  */
   12705 static void
   12706 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12707 {
   12708 	uint32_t reg, val;
   12709 	int x;
   12710 
   12711 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12712 
   12713 	val = 0;
   12714 	for (x = nbits; x > 0; x--) {
   12715 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12716 		CSR_WRITE_FLUSH(sc);
   12717 		delay(2);
   12718 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12719 			val |= (1U << (x - 1));
   12720 		CSR_WRITE(sc, WMREG_EECD, reg);
   12721 		CSR_WRITE_FLUSH(sc);
   12722 		delay(2);
   12723 	}
   12724 	*valp = val;
   12725 }
   12726 
   12727 /* Microwire */
   12728 
   12729 /*
   12730  * wm_nvm_read_uwire:
   12731  *
   12732  *	Read a word from the EEPROM using the MicroWire protocol.
   12733  */
   12734 static int
   12735 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12736 {
   12737 	uint32_t reg, val;
   12738 	int i;
   12739 
   12740 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12741 		device_xname(sc->sc_dev), __func__));
   12742 
   12743 	if (sc->nvm.acquire(sc) != 0)
   12744 		return -1;
   12745 
   12746 	for (i = 0; i < wordcnt; i++) {
   12747 		/* Clear SK and DI. */
   12748 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12749 		CSR_WRITE(sc, WMREG_EECD, reg);
   12750 
   12751 		/*
   12752 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12753 		 * and Xen.
   12754 		 *
   12755 		 * We use this workaround only for 82540 because qemu's
   12756 		 * e1000 act as 82540.
   12757 		 */
   12758 		if (sc->sc_type == WM_T_82540) {
   12759 			reg |= EECD_SK;
   12760 			CSR_WRITE(sc, WMREG_EECD, reg);
   12761 			reg &= ~EECD_SK;
   12762 			CSR_WRITE(sc, WMREG_EECD, reg);
   12763 			CSR_WRITE_FLUSH(sc);
   12764 			delay(2);
   12765 		}
   12766 		/* XXX: end of workaround */
   12767 
   12768 		/* Set CHIP SELECT. */
   12769 		reg |= EECD_CS;
   12770 		CSR_WRITE(sc, WMREG_EECD, reg);
   12771 		CSR_WRITE_FLUSH(sc);
   12772 		delay(2);
   12773 
   12774 		/* Shift in the READ command. */
   12775 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12776 
   12777 		/* Shift in address. */
   12778 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12779 
   12780 		/* Shift out the data. */
   12781 		wm_eeprom_recvbits(sc, &val, 16);
   12782 		data[i] = val & 0xffff;
   12783 
   12784 		/* Clear CHIP SELECT. */
   12785 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12786 		CSR_WRITE(sc, WMREG_EECD, reg);
   12787 		CSR_WRITE_FLUSH(sc);
   12788 		delay(2);
   12789 	}
   12790 
   12791 	sc->nvm.release(sc);
   12792 	return 0;
   12793 }
   12794 
   12795 /* SPI */
   12796 
   12797 /*
   12798  * Set SPI and FLASH related information from the EECD register.
   12799  * For 82541 and 82547, the word size is taken from EEPROM.
   12800  */
   12801 static int
   12802 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12803 {
   12804 	int size;
   12805 	uint32_t reg;
   12806 	uint16_t data;
   12807 
   12808 	reg = CSR_READ(sc, WMREG_EECD);
   12809 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12810 
   12811 	/* Read the size of NVM from EECD by default */
   12812 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12813 	switch (sc->sc_type) {
   12814 	case WM_T_82541:
   12815 	case WM_T_82541_2:
   12816 	case WM_T_82547:
   12817 	case WM_T_82547_2:
   12818 		/* Set dummy value to access EEPROM */
   12819 		sc->sc_nvm_wordsize = 64;
   12820 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12821 			aprint_error_dev(sc->sc_dev,
   12822 			    "%s: failed to read EEPROM size\n", __func__);
   12823 		}
   12824 		reg = data;
   12825 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12826 		if (size == 0)
   12827 			size = 6; /* 64 word size */
   12828 		else
   12829 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12830 		break;
   12831 	case WM_T_80003:
   12832 	case WM_T_82571:
   12833 	case WM_T_82572:
   12834 	case WM_T_82573: /* SPI case */
   12835 	case WM_T_82574: /* SPI case */
   12836 	case WM_T_82583: /* SPI case */
   12837 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12838 		if (size > 14)
   12839 			size = 14;
   12840 		break;
   12841 	case WM_T_82575:
   12842 	case WM_T_82576:
   12843 	case WM_T_82580:
   12844 	case WM_T_I350:
   12845 	case WM_T_I354:
   12846 	case WM_T_I210:
   12847 	case WM_T_I211:
   12848 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12849 		if (size > 15)
   12850 			size = 15;
   12851 		break;
   12852 	default:
   12853 		aprint_error_dev(sc->sc_dev,
   12854 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12855 		return -1;
   12856 		break;
   12857 	}
   12858 
   12859 	sc->sc_nvm_wordsize = 1 << size;
   12860 
   12861 	return 0;
   12862 }
   12863 
   12864 /*
   12865  * wm_nvm_ready_spi:
   12866  *
   12867  *	Wait for a SPI EEPROM to be ready for commands.
   12868  */
   12869 static int
   12870 wm_nvm_ready_spi(struct wm_softc *sc)
   12871 {
   12872 	uint32_t val;
   12873 	int usec;
   12874 
   12875 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12876 		device_xname(sc->sc_dev), __func__));
   12877 
   12878 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12879 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12880 		wm_eeprom_recvbits(sc, &val, 8);
   12881 		if ((val & SPI_SR_RDY) == 0)
   12882 			break;
   12883 	}
   12884 	if (usec >= SPI_MAX_RETRIES) {
   12885 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12886 		return -1;
   12887 	}
   12888 	return 0;
   12889 }
   12890 
   12891 /*
   12892  * wm_nvm_read_spi:
   12893  *
   12894  *	Read a work from the EEPROM using the SPI protocol.
   12895  */
   12896 static int
   12897 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12898 {
   12899 	uint32_t reg, val;
   12900 	int i;
   12901 	uint8_t opc;
   12902 	int rv = 0;
   12903 
   12904 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12905 		device_xname(sc->sc_dev), __func__));
   12906 
   12907 	if (sc->nvm.acquire(sc) != 0)
   12908 		return -1;
   12909 
   12910 	/* Clear SK and CS. */
   12911 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12912 	CSR_WRITE(sc, WMREG_EECD, reg);
   12913 	CSR_WRITE_FLUSH(sc);
   12914 	delay(2);
   12915 
   12916 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12917 		goto out;
   12918 
   12919 	/* Toggle CS to flush commands. */
   12920 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12921 	CSR_WRITE_FLUSH(sc);
   12922 	delay(2);
   12923 	CSR_WRITE(sc, WMREG_EECD, reg);
   12924 	CSR_WRITE_FLUSH(sc);
   12925 	delay(2);
   12926 
   12927 	opc = SPI_OPC_READ;
   12928 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12929 		opc |= SPI_OPC_A8;
   12930 
   12931 	wm_eeprom_sendbits(sc, opc, 8);
   12932 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12933 
   12934 	for (i = 0; i < wordcnt; i++) {
   12935 		wm_eeprom_recvbits(sc, &val, 16);
   12936 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12937 	}
   12938 
   12939 	/* Raise CS and clear SK. */
   12940 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12941 	CSR_WRITE(sc, WMREG_EECD, reg);
   12942 	CSR_WRITE_FLUSH(sc);
   12943 	delay(2);
   12944 
   12945 out:
   12946 	sc->nvm.release(sc);
   12947 	return rv;
   12948 }
   12949 
   12950 /* Using with EERD */
   12951 
   12952 static int
   12953 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12954 {
   12955 	uint32_t attempts = 100000;
   12956 	uint32_t i, reg = 0;
   12957 	int32_t done = -1;
   12958 
   12959 	for (i = 0; i < attempts; i++) {
   12960 		reg = CSR_READ(sc, rw);
   12961 
   12962 		if (reg & EERD_DONE) {
   12963 			done = 0;
   12964 			break;
   12965 		}
   12966 		delay(5);
   12967 	}
   12968 
   12969 	return done;
   12970 }
   12971 
   12972 static int
   12973 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12974 {
   12975 	int i, eerd = 0;
   12976 	int rv = 0;
   12977 
   12978 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12979 		device_xname(sc->sc_dev), __func__));
   12980 
   12981 	if (sc->nvm.acquire(sc) != 0)
   12982 		return -1;
   12983 
   12984 	for (i = 0; i < wordcnt; i++) {
   12985 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12986 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12987 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12988 		if (rv != 0) {
   12989 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12990 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12991 			break;
   12992 		}
   12993 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12994 	}
   12995 
   12996 	sc->nvm.release(sc);
   12997 	return rv;
   12998 }
   12999 
   13000 /* Flash */
   13001 
   13002 static int
   13003 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13004 {
   13005 	uint32_t eecd;
   13006 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13007 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13008 	uint32_t nvm_dword = 0;
   13009 	uint8_t sig_byte = 0;
   13010 	int rv;
   13011 
   13012 	switch (sc->sc_type) {
   13013 	case WM_T_PCH_SPT:
   13014 	case WM_T_PCH_CNP:
   13015 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13016 		act_offset = ICH_NVM_SIG_WORD * 2;
   13017 
   13018 		/* Set bank to 0 in case flash read fails. */
   13019 		*bank = 0;
   13020 
   13021 		/* Check bank 0 */
   13022 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13023 		if (rv != 0)
   13024 			return rv;
   13025 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13026 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13027 			*bank = 0;
   13028 			return 0;
   13029 		}
   13030 
   13031 		/* Check bank 1 */
   13032 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13033 		    &nvm_dword);
   13034 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13035 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13036 			*bank = 1;
   13037 			return 0;
   13038 		}
   13039 		aprint_error_dev(sc->sc_dev,
   13040 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13041 		return -1;
   13042 	case WM_T_ICH8:
   13043 	case WM_T_ICH9:
   13044 		eecd = CSR_READ(sc, WMREG_EECD);
   13045 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13046 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13047 			return 0;
   13048 		}
   13049 		/* FALLTHROUGH */
   13050 	default:
   13051 		/* Default to 0 */
   13052 		*bank = 0;
   13053 
   13054 		/* Check bank 0 */
   13055 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13056 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13057 			*bank = 0;
   13058 			return 0;
   13059 		}
   13060 
   13061 		/* Check bank 1 */
   13062 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13063 		    &sig_byte);
   13064 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13065 			*bank = 1;
   13066 			return 0;
   13067 		}
   13068 	}
   13069 
   13070 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13071 		device_xname(sc->sc_dev)));
   13072 	return -1;
   13073 }
   13074 
   13075 /******************************************************************************
   13076  * This function does initial flash setup so that a new read/write/erase cycle
   13077  * can be started.
   13078  *
   13079  * sc - The pointer to the hw structure
   13080  ****************************************************************************/
   13081 static int32_t
   13082 wm_ich8_cycle_init(struct wm_softc *sc)
   13083 {
   13084 	uint16_t hsfsts;
   13085 	int32_t error = 1;
   13086 	int32_t i     = 0;
   13087 
   13088 	if (sc->sc_type >= WM_T_PCH_SPT)
   13089 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13090 	else
   13091 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13092 
   13093 	/* May be check the Flash Des Valid bit in Hw status */
   13094 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13095 		return error;
   13096 
   13097 	/* Clear FCERR in Hw status by writing 1 */
   13098 	/* Clear DAEL in Hw status by writing a 1 */
   13099 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13100 
   13101 	if (sc->sc_type >= WM_T_PCH_SPT)
   13102 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13103 	else
   13104 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13105 
   13106 	/*
   13107 	 * Either we should have a hardware SPI cycle in progress bit to check
   13108 	 * against, in order to start a new cycle or FDONE bit should be
   13109 	 * changed in the hardware so that it is 1 after hardware reset, which
   13110 	 * can then be used as an indication whether a cycle is in progress or
   13111 	 * has been completed .. we should also have some software semaphore
   13112 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13113 	 * threads access to those bits can be sequentiallized or a way so that
   13114 	 * 2 threads don't start the cycle at the same time
   13115 	 */
   13116 
   13117 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13118 		/*
   13119 		 * There is no cycle running at present, so we can start a
   13120 		 * cycle
   13121 		 */
   13122 
   13123 		/* Begin by setting Flash Cycle Done. */
   13124 		hsfsts |= HSFSTS_DONE;
   13125 		if (sc->sc_type >= WM_T_PCH_SPT)
   13126 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13127 			    hsfsts & 0xffffUL);
   13128 		else
   13129 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13130 		error = 0;
   13131 	} else {
   13132 		/*
   13133 		 * Otherwise poll for sometime so the current cycle has a
   13134 		 * chance to end before giving up.
   13135 		 */
   13136 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   13137 			if (sc->sc_type >= WM_T_PCH_SPT)
   13138 				hsfsts = ICH8_FLASH_READ32(sc,
   13139 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13140 			else
   13141 				hsfsts = ICH8_FLASH_READ16(sc,
   13142 				    ICH_FLASH_HSFSTS);
   13143 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13144 				error = 0;
   13145 				break;
   13146 			}
   13147 			delay(1);
   13148 		}
   13149 		if (error == 0) {
   13150 			/*
   13151 			 * Successful in waiting for previous cycle to timeout,
   13152 			 * now set the Flash Cycle Done.
   13153 			 */
   13154 			hsfsts |= HSFSTS_DONE;
   13155 			if (sc->sc_type >= WM_T_PCH_SPT)
   13156 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13157 				    hsfsts & 0xffffUL);
   13158 			else
   13159 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   13160 				    hsfsts);
   13161 		}
   13162 	}
   13163 	return error;
   13164 }
   13165 
   13166 /******************************************************************************
   13167  * This function starts a flash cycle and waits for its completion
   13168  *
   13169  * sc - The pointer to the hw structure
   13170  ****************************************************************************/
   13171 static int32_t
   13172 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   13173 {
   13174 	uint16_t hsflctl;
   13175 	uint16_t hsfsts;
   13176 	int32_t error = 1;
   13177 	uint32_t i = 0;
   13178 
   13179 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   13180 	if (sc->sc_type >= WM_T_PCH_SPT)
   13181 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   13182 	else
   13183 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13184 	hsflctl |= HSFCTL_GO;
   13185 	if (sc->sc_type >= WM_T_PCH_SPT)
   13186 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13187 		    (uint32_t)hsflctl << 16);
   13188 	else
   13189 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13190 
   13191 	/* Wait till FDONE bit is set to 1 */
   13192 	do {
   13193 		if (sc->sc_type >= WM_T_PCH_SPT)
   13194 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13195 			    & 0xffffUL;
   13196 		else
   13197 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13198 		if (hsfsts & HSFSTS_DONE)
   13199 			break;
   13200 		delay(1);
   13201 		i++;
   13202 	} while (i < timeout);
   13203 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   13204 		error = 0;
   13205 
   13206 	return error;
   13207 }
   13208 
   13209 /******************************************************************************
   13210  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   13211  *
   13212  * sc - The pointer to the hw structure
   13213  * index - The index of the byte or word to read.
   13214  * size - Size of data to read, 1=byte 2=word, 4=dword
   13215  * data - Pointer to the word to store the value read.
   13216  *****************************************************************************/
   13217 static int32_t
   13218 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   13219     uint32_t size, uint32_t *data)
   13220 {
   13221 	uint16_t hsfsts;
   13222 	uint16_t hsflctl;
   13223 	uint32_t flash_linear_address;
   13224 	uint32_t flash_data = 0;
   13225 	int32_t error = 1;
   13226 	int32_t count = 0;
   13227 
   13228 	if (size < 1  || size > 4 || data == 0x0 ||
   13229 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   13230 		return error;
   13231 
   13232 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   13233 	    sc->sc_ich8_flash_base;
   13234 
   13235 	do {
   13236 		delay(1);
   13237 		/* Steps */
   13238 		error = wm_ich8_cycle_init(sc);
   13239 		if (error)
   13240 			break;
   13241 
   13242 		if (sc->sc_type >= WM_T_PCH_SPT)
   13243 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13244 			    >> 16;
   13245 		else
   13246 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13247 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13248 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13249 		    & HSFCTL_BCOUNT_MASK;
   13250 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13251 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13252 			/*
   13253 			 * In SPT, This register is in Lan memory space, not
   13254 			 * flash. Therefore, only 32 bit access is supported.
   13255 			 */
   13256 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13257 			    (uint32_t)hsflctl << 16);
   13258 		} else
   13259 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13260 
   13261 		/*
   13262 		 * Write the last 24 bits of index into Flash Linear address
   13263 		 * field in Flash Address
   13264 		 */
   13265 		/* TODO: TBD maybe check the index against the size of flash */
   13266 
   13267 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13268 
   13269 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13270 
   13271 		/*
   13272 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13273 		 * the whole sequence a few more times, else read in (shift in)
   13274 		 * the Flash Data0, the order is least significant byte first
   13275 		 * msb to lsb
   13276 		 */
   13277 		if (error == 0) {
   13278 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13279 			if (size == 1)
   13280 				*data = (uint8_t)(flash_data & 0x000000FF);
   13281 			else if (size == 2)
   13282 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13283 			else if (size == 4)
   13284 				*data = (uint32_t)flash_data;
   13285 			break;
   13286 		} else {
   13287 			/*
   13288 			 * If we've gotten here, then things are probably
   13289 			 * completely hosed, but if the error condition is
   13290 			 * detected, it won't hurt to give it another try...
   13291 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13292 			 */
   13293 			if (sc->sc_type >= WM_T_PCH_SPT)
   13294 				hsfsts = ICH8_FLASH_READ32(sc,
   13295 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13296 			else
   13297 				hsfsts = ICH8_FLASH_READ16(sc,
   13298 				    ICH_FLASH_HSFSTS);
   13299 
   13300 			if (hsfsts & HSFSTS_ERR) {
   13301 				/* Repeat for some time before giving up. */
   13302 				continue;
   13303 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13304 				break;
   13305 		}
   13306 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13307 
   13308 	return error;
   13309 }
   13310 
   13311 /******************************************************************************
   13312  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13313  *
   13314  * sc - pointer to wm_hw structure
   13315  * index - The index of the byte to read.
   13316  * data - Pointer to a byte to store the value read.
   13317  *****************************************************************************/
   13318 static int32_t
   13319 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13320 {
   13321 	int32_t status;
   13322 	uint32_t word = 0;
   13323 
   13324 	status = wm_read_ich8_data(sc, index, 1, &word);
   13325 	if (status == 0)
   13326 		*data = (uint8_t)word;
   13327 	else
   13328 		*data = 0;
   13329 
   13330 	return status;
   13331 }
   13332 
   13333 /******************************************************************************
   13334  * Reads a word from the NVM using the ICH8 flash access registers.
   13335  *
   13336  * sc - pointer to wm_hw structure
   13337  * index - The starting byte index of the word to read.
   13338  * data - Pointer to a word to store the value read.
   13339  *****************************************************************************/
   13340 static int32_t
   13341 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13342 {
   13343 	int32_t status;
   13344 	uint32_t word = 0;
   13345 
   13346 	status = wm_read_ich8_data(sc, index, 2, &word);
   13347 	if (status == 0)
   13348 		*data = (uint16_t)word;
   13349 	else
   13350 		*data = 0;
   13351 
   13352 	return status;
   13353 }
   13354 
   13355 /******************************************************************************
   13356  * Reads a dword from the NVM using the ICH8 flash access registers.
   13357  *
   13358  * sc - pointer to wm_hw structure
   13359  * index - The starting byte index of the word to read.
   13360  * data - Pointer to a word to store the value read.
   13361  *****************************************************************************/
   13362 static int32_t
   13363 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13364 {
   13365 	int32_t status;
   13366 
   13367 	status = wm_read_ich8_data(sc, index, 4, data);
   13368 	return status;
   13369 }
   13370 
   13371 /******************************************************************************
   13372  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13373  * register.
   13374  *
   13375  * sc - Struct containing variables accessed by shared code
   13376  * offset - offset of word in the EEPROM to read
   13377  * data - word read from the EEPROM
   13378  * words - number of words to read
   13379  *****************************************************************************/
   13380 static int
   13381 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13382 {
   13383 	int32_t	 rv = 0;
   13384 	uint32_t flash_bank = 0;
   13385 	uint32_t act_offset = 0;
   13386 	uint32_t bank_offset = 0;
   13387 	uint16_t word = 0;
   13388 	uint16_t i = 0;
   13389 
   13390 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13391 		device_xname(sc->sc_dev), __func__));
   13392 
   13393 	if (sc->nvm.acquire(sc) != 0)
   13394 		return -1;
   13395 
   13396 	/*
   13397 	 * We need to know which is the valid flash bank.  In the event
   13398 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13399 	 * managing flash_bank. So it cannot be trusted and needs
   13400 	 * to be updated with each read.
   13401 	 */
   13402 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13403 	if (rv) {
   13404 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13405 			device_xname(sc->sc_dev)));
   13406 		flash_bank = 0;
   13407 	}
   13408 
   13409 	/*
   13410 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13411 	 * size
   13412 	 */
   13413 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13414 
   13415 	for (i = 0; i < words; i++) {
   13416 		/* The NVM part needs a byte offset, hence * 2 */
   13417 		act_offset = bank_offset + ((offset + i) * 2);
   13418 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13419 		if (rv) {
   13420 			aprint_error_dev(sc->sc_dev,
   13421 			    "%s: failed to read NVM\n", __func__);
   13422 			break;
   13423 		}
   13424 		data[i] = word;
   13425 	}
   13426 
   13427 	sc->nvm.release(sc);
   13428 	return rv;
   13429 }
   13430 
   13431 /******************************************************************************
   13432  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13433  * register.
   13434  *
   13435  * sc - Struct containing variables accessed by shared code
   13436  * offset - offset of word in the EEPROM to read
   13437  * data - word read from the EEPROM
   13438  * words - number of words to read
   13439  *****************************************************************************/
   13440 static int
   13441 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13442 {
   13443 	int32_t	 rv = 0;
   13444 	uint32_t flash_bank = 0;
   13445 	uint32_t act_offset = 0;
   13446 	uint32_t bank_offset = 0;
   13447 	uint32_t dword = 0;
   13448 	uint16_t i = 0;
   13449 
   13450 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13451 		device_xname(sc->sc_dev), __func__));
   13452 
   13453 	if (sc->nvm.acquire(sc) != 0)
   13454 		return -1;
   13455 
   13456 	/*
   13457 	 * We need to know which is the valid flash bank.  In the event
   13458 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13459 	 * managing flash_bank. So it cannot be trusted and needs
   13460 	 * to be updated with each read.
   13461 	 */
   13462 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13463 	if (rv) {
   13464 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13465 			device_xname(sc->sc_dev)));
   13466 		flash_bank = 0;
   13467 	}
   13468 
   13469 	/*
   13470 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13471 	 * size
   13472 	 */
   13473 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13474 
   13475 	for (i = 0; i < words; i++) {
   13476 		/* The NVM part needs a byte offset, hence * 2 */
   13477 		act_offset = bank_offset + ((offset + i) * 2);
   13478 		/* but we must read dword aligned, so mask ... */
   13479 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13480 		if (rv) {
   13481 			aprint_error_dev(sc->sc_dev,
   13482 			    "%s: failed to read NVM\n", __func__);
   13483 			break;
   13484 		}
   13485 		/* ... and pick out low or high word */
   13486 		if ((act_offset & 0x2) == 0)
   13487 			data[i] = (uint16_t)(dword & 0xFFFF);
   13488 		else
   13489 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13490 	}
   13491 
   13492 	sc->nvm.release(sc);
   13493 	return rv;
   13494 }
   13495 
   13496 /* iNVM */
   13497 
   13498 static int
   13499 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13500 {
   13501 	int32_t	 rv = 0;
   13502 	uint32_t invm_dword;
   13503 	uint16_t i;
   13504 	uint8_t record_type, word_address;
   13505 
   13506 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13507 		device_xname(sc->sc_dev), __func__));
   13508 
   13509 	for (i = 0; i < INVM_SIZE; i++) {
   13510 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13511 		/* Get record type */
   13512 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13513 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13514 			break;
   13515 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13516 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13517 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13518 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13519 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13520 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13521 			if (word_address == address) {
   13522 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13523 				rv = 0;
   13524 				break;
   13525 			}
   13526 		}
   13527 	}
   13528 
   13529 	return rv;
   13530 }
   13531 
   13532 static int
   13533 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13534 {
   13535 	int rv = 0;
   13536 	int i;
   13537 
   13538 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13539 		device_xname(sc->sc_dev), __func__));
   13540 
   13541 	if (sc->nvm.acquire(sc) != 0)
   13542 		return -1;
   13543 
   13544 	for (i = 0; i < words; i++) {
   13545 		switch (offset + i) {
   13546 		case NVM_OFF_MACADDR:
   13547 		case NVM_OFF_MACADDR1:
   13548 		case NVM_OFF_MACADDR2:
   13549 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13550 			if (rv != 0) {
   13551 				data[i] = 0xffff;
   13552 				rv = -1;
   13553 			}
   13554 			break;
   13555 		case NVM_OFF_CFG2:
   13556 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13557 			if (rv != 0) {
   13558 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13559 				rv = 0;
   13560 			}
   13561 			break;
   13562 		case NVM_OFF_CFG4:
   13563 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13564 			if (rv != 0) {
   13565 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13566 				rv = 0;
   13567 			}
   13568 			break;
   13569 		case NVM_OFF_LED_1_CFG:
   13570 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13571 			if (rv != 0) {
   13572 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13573 				rv = 0;
   13574 			}
   13575 			break;
   13576 		case NVM_OFF_LED_0_2_CFG:
   13577 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13578 			if (rv != 0) {
   13579 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13580 				rv = 0;
   13581 			}
   13582 			break;
   13583 		case NVM_OFF_ID_LED_SETTINGS:
   13584 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13585 			if (rv != 0) {
   13586 				*data = ID_LED_RESERVED_FFFF;
   13587 				rv = 0;
   13588 			}
   13589 			break;
   13590 		default:
   13591 			DPRINTF(WM_DEBUG_NVM,
   13592 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13593 			*data = NVM_RESERVED_WORD;
   13594 			break;
   13595 		}
   13596 	}
   13597 
   13598 	sc->nvm.release(sc);
   13599 	return rv;
   13600 }
   13601 
   13602 /* Lock, detecting NVM type, validate checksum, version and read */
   13603 
   13604 static int
   13605 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13606 {
   13607 	uint32_t eecd = 0;
   13608 
   13609 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13610 	    || sc->sc_type == WM_T_82583) {
   13611 		eecd = CSR_READ(sc, WMREG_EECD);
   13612 
   13613 		/* Isolate bits 15 & 16 */
   13614 		eecd = ((eecd >> 15) & 0x03);
   13615 
   13616 		/* If both bits are set, device is Flash type */
   13617 		if (eecd == 0x03)
   13618 			return 0;
   13619 	}
   13620 	return 1;
   13621 }
   13622 
   13623 static int
   13624 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13625 {
   13626 	uint32_t eec;
   13627 
   13628 	eec = CSR_READ(sc, WMREG_EEC);
   13629 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13630 		return 1;
   13631 
   13632 	return 0;
   13633 }
   13634 
   13635 /*
   13636  * wm_nvm_validate_checksum
   13637  *
   13638  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13639  */
   13640 static int
   13641 wm_nvm_validate_checksum(struct wm_softc *sc)
   13642 {
   13643 	uint16_t checksum;
   13644 	uint16_t eeprom_data;
   13645 #ifdef WM_DEBUG
   13646 	uint16_t csum_wordaddr, valid_checksum;
   13647 #endif
   13648 	int i;
   13649 
   13650 	checksum = 0;
   13651 
   13652 	/* Don't check for I211 */
   13653 	if (sc->sc_type == WM_T_I211)
   13654 		return 0;
   13655 
   13656 #ifdef WM_DEBUG
   13657 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13658 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13659 		csum_wordaddr = NVM_OFF_COMPAT;
   13660 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13661 	} else {
   13662 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13663 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13664 	}
   13665 
   13666 	/* Dump EEPROM image for debug */
   13667 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13668 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13669 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13670 		/* XXX PCH_SPT? */
   13671 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13672 		if ((eeprom_data & valid_checksum) == 0)
   13673 			DPRINTF(WM_DEBUG_NVM,
   13674 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13675 				device_xname(sc->sc_dev), eeprom_data,
   13676 				    valid_checksum));
   13677 	}
   13678 
   13679 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13680 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13681 		for (i = 0; i < NVM_SIZE; i++) {
   13682 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13683 				printf("XXXX ");
   13684 			else
   13685 				printf("%04hx ", eeprom_data);
   13686 			if (i % 8 == 7)
   13687 				printf("\n");
   13688 		}
   13689 	}
   13690 
   13691 #endif /* WM_DEBUG */
   13692 
   13693 	for (i = 0; i < NVM_SIZE; i++) {
   13694 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13695 			return 1;
   13696 		checksum += eeprom_data;
   13697 	}
   13698 
   13699 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13700 #ifdef WM_DEBUG
   13701 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13702 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13703 #endif
   13704 	}
   13705 
   13706 	return 0;
   13707 }
   13708 
   13709 static void
   13710 wm_nvm_version_invm(struct wm_softc *sc)
   13711 {
   13712 	uint32_t dword;
   13713 
   13714 	/*
   13715 	 * Linux's code to decode version is very strange, so we don't
   13716 	 * obey that algorithm and just use word 61 as the document.
   13717 	 * Perhaps it's not perfect though...
   13718 	 *
   13719 	 * Example:
   13720 	 *
   13721 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13722 	 */
   13723 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13724 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13725 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13726 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13727 }
   13728 
   13729 static void
   13730 wm_nvm_version(struct wm_softc *sc)
   13731 {
   13732 	uint16_t major, minor, build, patch;
   13733 	uint16_t uid0, uid1;
   13734 	uint16_t nvm_data;
   13735 	uint16_t off;
   13736 	bool check_version = false;
   13737 	bool check_optionrom = false;
   13738 	bool have_build = false;
   13739 	bool have_uid = true;
   13740 
   13741 	/*
   13742 	 * Version format:
   13743 	 *
   13744 	 * XYYZ
   13745 	 * X0YZ
   13746 	 * X0YY
   13747 	 *
   13748 	 * Example:
   13749 	 *
   13750 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13751 	 *	82571	0x50a6	5.10.6?
   13752 	 *	82572	0x506a	5.6.10?
   13753 	 *	82572EI	0x5069	5.6.9?
   13754 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13755 	 *		0x2013	2.1.3?
   13756 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   13757 	 * ICH8+82567	0x0040	0.4.0?
   13758 	 * ICH9+82566	0x1040	1.4.0?
   13759 	 *ICH10+82567	0x0043	0.4.3?
   13760 	 *  PCH+82577	0x00c1	0.12.1?
   13761 	 * PCH2+82579	0x00d3	0.13.3?
   13762 	 *		0x00d4	0.13.4?
   13763 	 *  LPT+I218	0x0023	0.2.3?
   13764 	 *  SPT+I219	0x0084	0.8.4?
   13765 	 *  CNP+I219	0x0054	0.5.4?
   13766 	 */
   13767 
   13768 	/*
   13769 	 * XXX
   13770 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13771 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13772 	 */
   13773 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13774 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13775 		have_uid = false;
   13776 
   13777 	switch (sc->sc_type) {
   13778 	case WM_T_82571:
   13779 	case WM_T_82572:
   13780 	case WM_T_82574:
   13781 	case WM_T_82583:
   13782 		check_version = true;
   13783 		check_optionrom = true;
   13784 		have_build = true;
   13785 		break;
   13786 	case WM_T_ICH8:
   13787 	case WM_T_ICH9:
   13788 	case WM_T_ICH10:
   13789 	case WM_T_PCH:
   13790 	case WM_T_PCH2:
   13791 	case WM_T_PCH_LPT:
   13792 	case WM_T_PCH_SPT:
   13793 	case WM_T_PCH_CNP:
   13794 		check_version = true;
   13795 		have_build = true;
   13796 		have_uid = false;
   13797 		break;
   13798 	case WM_T_82575:
   13799 	case WM_T_82576:
   13800 	case WM_T_82580:
   13801 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13802 			check_version = true;
   13803 		break;
   13804 	case WM_T_I211:
   13805 		wm_nvm_version_invm(sc);
   13806 		have_uid = false;
   13807 		goto printver;
   13808 	case WM_T_I210:
   13809 		if (!wm_nvm_flash_presence_i210(sc)) {
   13810 			wm_nvm_version_invm(sc);
   13811 			have_uid = false;
   13812 			goto printver;
   13813 		}
   13814 		/* FALLTHROUGH */
   13815 	case WM_T_I350:
   13816 	case WM_T_I354:
   13817 		check_version = true;
   13818 		check_optionrom = true;
   13819 		break;
   13820 	default:
   13821 		return;
   13822 	}
   13823 	if (check_version
   13824 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13825 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13826 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13827 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13828 			build = nvm_data & NVM_BUILD_MASK;
   13829 			have_build = true;
   13830 		} else
   13831 			minor = nvm_data & 0x00ff;
   13832 
   13833 		/* Decimal */
   13834 		minor = (minor / 16) * 10 + (minor % 16);
   13835 		sc->sc_nvm_ver_major = major;
   13836 		sc->sc_nvm_ver_minor = minor;
   13837 
   13838 printver:
   13839 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13840 		    sc->sc_nvm_ver_minor);
   13841 		if (have_build) {
   13842 			sc->sc_nvm_ver_build = build;
   13843 			aprint_verbose(".%d", build);
   13844 		}
   13845 	}
   13846 
   13847 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13848 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13849 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13850 		/* Option ROM Version */
   13851 		if ((off != 0x0000) && (off != 0xffff)) {
   13852 			int rv;
   13853 
   13854 			off += NVM_COMBO_VER_OFF;
   13855 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13856 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13857 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13858 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13859 				/* 16bits */
   13860 				major = uid0 >> 8;
   13861 				build = (uid0 << 8) | (uid1 >> 8);
   13862 				patch = uid1 & 0x00ff;
   13863 				aprint_verbose(", option ROM Version %d.%d.%d",
   13864 				    major, build, patch);
   13865 			}
   13866 		}
   13867 	}
   13868 
   13869 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13870 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13871 }
   13872 
   13873 /*
   13874  * wm_nvm_read:
   13875  *
   13876  *	Read data from the serial EEPROM.
   13877  */
   13878 static int
   13879 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13880 {
   13881 	int rv;
   13882 
   13883 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13884 		device_xname(sc->sc_dev), __func__));
   13885 
   13886 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13887 		return -1;
   13888 
   13889 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13890 
   13891 	return rv;
   13892 }
   13893 
   13894 /*
   13895  * Hardware semaphores.
   13896  * Very complexed...
   13897  */
   13898 
   13899 static int
   13900 wm_get_null(struct wm_softc *sc)
   13901 {
   13902 
   13903 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13904 		device_xname(sc->sc_dev), __func__));
   13905 	return 0;
   13906 }
   13907 
   13908 static void
   13909 wm_put_null(struct wm_softc *sc)
   13910 {
   13911 
   13912 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13913 		device_xname(sc->sc_dev), __func__));
   13914 	return;
   13915 }
   13916 
   13917 static int
   13918 wm_get_eecd(struct wm_softc *sc)
   13919 {
   13920 	uint32_t reg;
   13921 	int x;
   13922 
   13923 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13924 		device_xname(sc->sc_dev), __func__));
   13925 
   13926 	reg = CSR_READ(sc, WMREG_EECD);
   13927 
   13928 	/* Request EEPROM access. */
   13929 	reg |= EECD_EE_REQ;
   13930 	CSR_WRITE(sc, WMREG_EECD, reg);
   13931 
   13932 	/* ..and wait for it to be granted. */
   13933 	for (x = 0; x < 1000; x++) {
   13934 		reg = CSR_READ(sc, WMREG_EECD);
   13935 		if (reg & EECD_EE_GNT)
   13936 			break;
   13937 		delay(5);
   13938 	}
   13939 	if ((reg & EECD_EE_GNT) == 0) {
   13940 		aprint_error_dev(sc->sc_dev,
   13941 		    "could not acquire EEPROM GNT\n");
   13942 		reg &= ~EECD_EE_REQ;
   13943 		CSR_WRITE(sc, WMREG_EECD, reg);
   13944 		return -1;
   13945 	}
   13946 
   13947 	return 0;
   13948 }
   13949 
   13950 static void
   13951 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13952 {
   13953 
   13954 	*eecd |= EECD_SK;
   13955 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13956 	CSR_WRITE_FLUSH(sc);
   13957 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13958 		delay(1);
   13959 	else
   13960 		delay(50);
   13961 }
   13962 
   13963 static void
   13964 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13965 {
   13966 
   13967 	*eecd &= ~EECD_SK;
   13968 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13969 	CSR_WRITE_FLUSH(sc);
   13970 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13971 		delay(1);
   13972 	else
   13973 		delay(50);
   13974 }
   13975 
   13976 static void
   13977 wm_put_eecd(struct wm_softc *sc)
   13978 {
   13979 	uint32_t reg;
   13980 
   13981 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13982 		device_xname(sc->sc_dev), __func__));
   13983 
   13984 	/* Stop nvm */
   13985 	reg = CSR_READ(sc, WMREG_EECD);
   13986 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13987 		/* Pull CS high */
   13988 		reg |= EECD_CS;
   13989 		wm_nvm_eec_clock_lower(sc, &reg);
   13990 	} else {
   13991 		/* CS on Microwire is active-high */
   13992 		reg &= ~(EECD_CS | EECD_DI);
   13993 		CSR_WRITE(sc, WMREG_EECD, reg);
   13994 		wm_nvm_eec_clock_raise(sc, &reg);
   13995 		wm_nvm_eec_clock_lower(sc, &reg);
   13996 	}
   13997 
   13998 	reg = CSR_READ(sc, WMREG_EECD);
   13999 	reg &= ~EECD_EE_REQ;
   14000 	CSR_WRITE(sc, WMREG_EECD, reg);
   14001 
   14002 	return;
   14003 }
   14004 
   14005 /*
   14006  * Get hardware semaphore.
   14007  * Same as e1000_get_hw_semaphore_generic()
   14008  */
   14009 static int
   14010 wm_get_swsm_semaphore(struct wm_softc *sc)
   14011 {
   14012 	int32_t timeout;
   14013 	uint32_t swsm;
   14014 
   14015 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14016 		device_xname(sc->sc_dev), __func__));
   14017 	KASSERT(sc->sc_nvm_wordsize > 0);
   14018 
   14019 retry:
   14020 	/* Get the SW semaphore. */
   14021 	timeout = sc->sc_nvm_wordsize + 1;
   14022 	while (timeout) {
   14023 		swsm = CSR_READ(sc, WMREG_SWSM);
   14024 
   14025 		if ((swsm & SWSM_SMBI) == 0)
   14026 			break;
   14027 
   14028 		delay(50);
   14029 		timeout--;
   14030 	}
   14031 
   14032 	if (timeout == 0) {
   14033 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14034 			/*
   14035 			 * In rare circumstances, the SW semaphore may already
   14036 			 * be held unintentionally. Clear the semaphore once
   14037 			 * before giving up.
   14038 			 */
   14039 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14040 			wm_put_swsm_semaphore(sc);
   14041 			goto retry;
   14042 		}
   14043 		aprint_error_dev(sc->sc_dev,
   14044 		    "could not acquire SWSM SMBI\n");
   14045 		return 1;
   14046 	}
   14047 
   14048 	/* Get the FW semaphore. */
   14049 	timeout = sc->sc_nvm_wordsize + 1;
   14050 	while (timeout) {
   14051 		swsm = CSR_READ(sc, WMREG_SWSM);
   14052 		swsm |= SWSM_SWESMBI;
   14053 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14054 		/* If we managed to set the bit we got the semaphore. */
   14055 		swsm = CSR_READ(sc, WMREG_SWSM);
   14056 		if (swsm & SWSM_SWESMBI)
   14057 			break;
   14058 
   14059 		delay(50);
   14060 		timeout--;
   14061 	}
   14062 
   14063 	if (timeout == 0) {
   14064 		aprint_error_dev(sc->sc_dev,
   14065 		    "could not acquire SWSM SWESMBI\n");
   14066 		/* Release semaphores */
   14067 		wm_put_swsm_semaphore(sc);
   14068 		return 1;
   14069 	}
   14070 	return 0;
   14071 }
   14072 
   14073 /*
   14074  * Put hardware semaphore.
   14075  * Same as e1000_put_hw_semaphore_generic()
   14076  */
   14077 static void
   14078 wm_put_swsm_semaphore(struct wm_softc *sc)
   14079 {
   14080 	uint32_t swsm;
   14081 
   14082 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14083 		device_xname(sc->sc_dev), __func__));
   14084 
   14085 	swsm = CSR_READ(sc, WMREG_SWSM);
   14086 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14087 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14088 }
   14089 
   14090 /*
   14091  * Get SW/FW semaphore.
   14092  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14093  */
   14094 static int
   14095 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14096 {
   14097 	uint32_t swfw_sync;
   14098 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14099 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14100 	int timeout;
   14101 
   14102 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14103 		device_xname(sc->sc_dev), __func__));
   14104 
   14105 	if (sc->sc_type == WM_T_80003)
   14106 		timeout = 50;
   14107 	else
   14108 		timeout = 200;
   14109 
   14110 	while (timeout) {
   14111 		if (wm_get_swsm_semaphore(sc)) {
   14112 			aprint_error_dev(sc->sc_dev,
   14113 			    "%s: failed to get semaphore\n",
   14114 			    __func__);
   14115 			return 1;
   14116 		}
   14117 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14118 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14119 			swfw_sync |= swmask;
   14120 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14121 			wm_put_swsm_semaphore(sc);
   14122 			return 0;
   14123 		}
   14124 		wm_put_swsm_semaphore(sc);
   14125 		delay(5000);
   14126 		timeout--;
   14127 	}
   14128 	device_printf(sc->sc_dev,
   14129 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   14130 	    mask, swfw_sync);
   14131 	return 1;
   14132 }
   14133 
   14134 static void
   14135 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14136 {
   14137 	uint32_t swfw_sync;
   14138 
   14139 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14140 		device_xname(sc->sc_dev), __func__));
   14141 
   14142 	while (wm_get_swsm_semaphore(sc) != 0)
   14143 		continue;
   14144 
   14145 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14146 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   14147 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14148 
   14149 	wm_put_swsm_semaphore(sc);
   14150 }
   14151 
   14152 static int
   14153 wm_get_nvm_80003(struct wm_softc *sc)
   14154 {
   14155 	int rv;
   14156 
   14157 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14158 		device_xname(sc->sc_dev), __func__));
   14159 
   14160 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   14161 		aprint_error_dev(sc->sc_dev,
   14162 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   14163 		return rv;
   14164 	}
   14165 
   14166 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14167 	    && (rv = wm_get_eecd(sc)) != 0) {
   14168 		aprint_error_dev(sc->sc_dev,
   14169 		    "%s: failed to get semaphore(EECD)\n", __func__);
   14170 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14171 		return rv;
   14172 	}
   14173 
   14174 	return 0;
   14175 }
   14176 
   14177 static void
   14178 wm_put_nvm_80003(struct wm_softc *sc)
   14179 {
   14180 
   14181 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14182 		device_xname(sc->sc_dev), __func__));
   14183 
   14184 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14185 		wm_put_eecd(sc);
   14186 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14187 }
   14188 
   14189 static int
   14190 wm_get_nvm_82571(struct wm_softc *sc)
   14191 {
   14192 	int rv;
   14193 
   14194 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14195 		device_xname(sc->sc_dev), __func__));
   14196 
   14197 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   14198 		return rv;
   14199 
   14200 	switch (sc->sc_type) {
   14201 	case WM_T_82573:
   14202 		break;
   14203 	default:
   14204 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14205 			rv = wm_get_eecd(sc);
   14206 		break;
   14207 	}
   14208 
   14209 	if (rv != 0) {
   14210 		aprint_error_dev(sc->sc_dev,
   14211 		    "%s: failed to get semaphore\n",
   14212 		    __func__);
   14213 		wm_put_swsm_semaphore(sc);
   14214 	}
   14215 
   14216 	return rv;
   14217 }
   14218 
   14219 static void
   14220 wm_put_nvm_82571(struct wm_softc *sc)
   14221 {
   14222 
   14223 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14224 		device_xname(sc->sc_dev), __func__));
   14225 
   14226 	switch (sc->sc_type) {
   14227 	case WM_T_82573:
   14228 		break;
   14229 	default:
   14230 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14231 			wm_put_eecd(sc);
   14232 		break;
   14233 	}
   14234 
   14235 	wm_put_swsm_semaphore(sc);
   14236 }
   14237 
   14238 static int
   14239 wm_get_phy_82575(struct wm_softc *sc)
   14240 {
   14241 
   14242 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14243 		device_xname(sc->sc_dev), __func__));
   14244 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14245 }
   14246 
   14247 static void
   14248 wm_put_phy_82575(struct wm_softc *sc)
   14249 {
   14250 
   14251 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14252 		device_xname(sc->sc_dev), __func__));
   14253 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14254 }
   14255 
   14256 static int
   14257 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14258 {
   14259 	uint32_t ext_ctrl;
   14260 	int timeout = 200;
   14261 
   14262 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14263 		device_xname(sc->sc_dev), __func__));
   14264 
   14265 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14266 	for (timeout = 0; timeout < 200; timeout++) {
   14267 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14268 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14269 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14270 
   14271 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14272 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14273 			return 0;
   14274 		delay(5000);
   14275 	}
   14276 	device_printf(sc->sc_dev,
   14277 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14278 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14279 	return 1;
   14280 }
   14281 
   14282 static void
   14283 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14284 {
   14285 	uint32_t ext_ctrl;
   14286 
   14287 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14288 		device_xname(sc->sc_dev), __func__));
   14289 
   14290 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14291 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14292 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14293 
   14294 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14295 }
   14296 
   14297 static int
   14298 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14299 {
   14300 	uint32_t ext_ctrl;
   14301 	int timeout;
   14302 
   14303 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14304 		device_xname(sc->sc_dev), __func__));
   14305 	mutex_enter(sc->sc_ich_phymtx);
   14306 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14307 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14308 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14309 			break;
   14310 		delay(1000);
   14311 	}
   14312 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14313 		device_printf(sc->sc_dev,
   14314 		    "SW has already locked the resource\n");
   14315 		goto out;
   14316 	}
   14317 
   14318 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14319 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14320 	for (timeout = 0; timeout < 1000; timeout++) {
   14321 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14322 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14323 			break;
   14324 		delay(1000);
   14325 	}
   14326 	if (timeout >= 1000) {
   14327 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14328 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14329 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14330 		goto out;
   14331 	}
   14332 	return 0;
   14333 
   14334 out:
   14335 	mutex_exit(sc->sc_ich_phymtx);
   14336 	return 1;
   14337 }
   14338 
   14339 static void
   14340 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14341 {
   14342 	uint32_t ext_ctrl;
   14343 
   14344 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14345 		device_xname(sc->sc_dev), __func__));
   14346 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14347 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14348 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14349 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14350 	} else {
   14351 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14352 	}
   14353 
   14354 	mutex_exit(sc->sc_ich_phymtx);
   14355 }
   14356 
   14357 static int
   14358 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14359 {
   14360 
   14361 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14362 		device_xname(sc->sc_dev), __func__));
   14363 	mutex_enter(sc->sc_ich_nvmmtx);
   14364 
   14365 	return 0;
   14366 }
   14367 
   14368 static void
   14369 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14370 {
   14371 
   14372 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14373 		device_xname(sc->sc_dev), __func__));
   14374 	mutex_exit(sc->sc_ich_nvmmtx);
   14375 }
   14376 
   14377 static int
   14378 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14379 {
   14380 	int i = 0;
   14381 	uint32_t reg;
   14382 
   14383 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14384 		device_xname(sc->sc_dev), __func__));
   14385 
   14386 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14387 	do {
   14388 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14389 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14390 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14391 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14392 			break;
   14393 		delay(2*1000);
   14394 		i++;
   14395 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14396 
   14397 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14398 		wm_put_hw_semaphore_82573(sc);
   14399 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14400 		    device_xname(sc->sc_dev));
   14401 		return -1;
   14402 	}
   14403 
   14404 	return 0;
   14405 }
   14406 
   14407 static void
   14408 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14409 {
   14410 	uint32_t reg;
   14411 
   14412 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14413 		device_xname(sc->sc_dev), __func__));
   14414 
   14415 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14416 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14417 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14418 }
   14419 
   14420 /*
   14421  * Management mode and power management related subroutines.
   14422  * BMC, AMT, suspend/resume and EEE.
   14423  */
   14424 
   14425 #ifdef WM_WOL
   14426 static int
   14427 wm_check_mng_mode(struct wm_softc *sc)
   14428 {
   14429 	int rv;
   14430 
   14431 	switch (sc->sc_type) {
   14432 	case WM_T_ICH8:
   14433 	case WM_T_ICH9:
   14434 	case WM_T_ICH10:
   14435 	case WM_T_PCH:
   14436 	case WM_T_PCH2:
   14437 	case WM_T_PCH_LPT:
   14438 	case WM_T_PCH_SPT:
   14439 	case WM_T_PCH_CNP:
   14440 		rv = wm_check_mng_mode_ich8lan(sc);
   14441 		break;
   14442 	case WM_T_82574:
   14443 	case WM_T_82583:
   14444 		rv = wm_check_mng_mode_82574(sc);
   14445 		break;
   14446 	case WM_T_82571:
   14447 	case WM_T_82572:
   14448 	case WM_T_82573:
   14449 	case WM_T_80003:
   14450 		rv = wm_check_mng_mode_generic(sc);
   14451 		break;
   14452 	default:
   14453 		/* Noting to do */
   14454 		rv = 0;
   14455 		break;
   14456 	}
   14457 
   14458 	return rv;
   14459 }
   14460 
   14461 static int
   14462 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14463 {
   14464 	uint32_t fwsm;
   14465 
   14466 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14467 
   14468 	if (((fwsm & FWSM_FW_VALID) != 0)
   14469 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14470 		return 1;
   14471 
   14472 	return 0;
   14473 }
   14474 
   14475 static int
   14476 wm_check_mng_mode_82574(struct wm_softc *sc)
   14477 {
   14478 	uint16_t data;
   14479 
   14480 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14481 
   14482 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14483 		return 1;
   14484 
   14485 	return 0;
   14486 }
   14487 
   14488 static int
   14489 wm_check_mng_mode_generic(struct wm_softc *sc)
   14490 {
   14491 	uint32_t fwsm;
   14492 
   14493 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14494 
   14495 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14496 		return 1;
   14497 
   14498 	return 0;
   14499 }
   14500 #endif /* WM_WOL */
   14501 
   14502 static int
   14503 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14504 {
   14505 	uint32_t manc, fwsm, factps;
   14506 
   14507 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14508 		return 0;
   14509 
   14510 	manc = CSR_READ(sc, WMREG_MANC);
   14511 
   14512 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14513 		device_xname(sc->sc_dev), manc));
   14514 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14515 		return 0;
   14516 
   14517 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14518 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14519 		factps = CSR_READ(sc, WMREG_FACTPS);
   14520 		if (((factps & FACTPS_MNGCG) == 0)
   14521 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14522 			return 1;
   14523 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14524 		uint16_t data;
   14525 
   14526 		factps = CSR_READ(sc, WMREG_FACTPS);
   14527 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14528 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14529 			device_xname(sc->sc_dev), factps, data));
   14530 		if (((factps & FACTPS_MNGCG) == 0)
   14531 		    && ((data & NVM_CFG2_MNGM_MASK)
   14532 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14533 			return 1;
   14534 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14535 	    && ((manc & MANC_ASF_EN) == 0))
   14536 		return 1;
   14537 
   14538 	return 0;
   14539 }
   14540 
   14541 static bool
   14542 wm_phy_resetisblocked(struct wm_softc *sc)
   14543 {
   14544 	bool blocked = false;
   14545 	uint32_t reg;
   14546 	int i = 0;
   14547 
   14548 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14549 		device_xname(sc->sc_dev), __func__));
   14550 
   14551 	switch (sc->sc_type) {
   14552 	case WM_T_ICH8:
   14553 	case WM_T_ICH9:
   14554 	case WM_T_ICH10:
   14555 	case WM_T_PCH:
   14556 	case WM_T_PCH2:
   14557 	case WM_T_PCH_LPT:
   14558 	case WM_T_PCH_SPT:
   14559 	case WM_T_PCH_CNP:
   14560 		do {
   14561 			reg = CSR_READ(sc, WMREG_FWSM);
   14562 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14563 				blocked = true;
   14564 				delay(10*1000);
   14565 				continue;
   14566 			}
   14567 			blocked = false;
   14568 		} while (blocked && (i++ < 30));
   14569 		return blocked;
   14570 		break;
   14571 	case WM_T_82571:
   14572 	case WM_T_82572:
   14573 	case WM_T_82573:
   14574 	case WM_T_82574:
   14575 	case WM_T_82583:
   14576 	case WM_T_80003:
   14577 		reg = CSR_READ(sc, WMREG_MANC);
   14578 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14579 			return true;
   14580 		else
   14581 			return false;
   14582 		break;
   14583 	default:
   14584 		/* No problem */
   14585 		break;
   14586 	}
   14587 
   14588 	return false;
   14589 }
   14590 
   14591 static void
   14592 wm_get_hw_control(struct wm_softc *sc)
   14593 {
   14594 	uint32_t reg;
   14595 
   14596 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14597 		device_xname(sc->sc_dev), __func__));
   14598 
   14599 	if (sc->sc_type == WM_T_82573) {
   14600 		reg = CSR_READ(sc, WMREG_SWSM);
   14601 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14602 	} else if (sc->sc_type >= WM_T_82571) {
   14603 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14604 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14605 	}
   14606 }
   14607 
   14608 static void
   14609 wm_release_hw_control(struct wm_softc *sc)
   14610 {
   14611 	uint32_t reg;
   14612 
   14613 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14614 		device_xname(sc->sc_dev), __func__));
   14615 
   14616 	if (sc->sc_type == WM_T_82573) {
   14617 		reg = CSR_READ(sc, WMREG_SWSM);
   14618 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14619 	} else if (sc->sc_type >= WM_T_82571) {
   14620 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14621 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14622 	}
   14623 }
   14624 
   14625 static void
   14626 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14627 {
   14628 	uint32_t reg;
   14629 
   14630 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14631 		device_xname(sc->sc_dev), __func__));
   14632 
   14633 	if (sc->sc_type < WM_T_PCH2)
   14634 		return;
   14635 
   14636 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14637 
   14638 	if (gate)
   14639 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14640 	else
   14641 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14642 
   14643 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14644 }
   14645 
   14646 static int
   14647 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14648 {
   14649 	uint32_t fwsm, reg;
   14650 	int rv = 0;
   14651 
   14652 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14653 		device_xname(sc->sc_dev), __func__));
   14654 
   14655 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14656 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14657 
   14658 	/* Disable ULP */
   14659 	wm_ulp_disable(sc);
   14660 
   14661 	/* Acquire PHY semaphore */
   14662 	rv = sc->phy.acquire(sc);
   14663 	if (rv != 0) {
   14664 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14665 		device_xname(sc->sc_dev), __func__));
   14666 		return -1;
   14667 	}
   14668 
   14669 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14670 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14671 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14672 	 */
   14673 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14674 	switch (sc->sc_type) {
   14675 	case WM_T_PCH_LPT:
   14676 	case WM_T_PCH_SPT:
   14677 	case WM_T_PCH_CNP:
   14678 		if (wm_phy_is_accessible_pchlan(sc))
   14679 			break;
   14680 
   14681 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14682 		 * forcing MAC to SMBus mode first.
   14683 		 */
   14684 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14685 		reg |= CTRL_EXT_FORCE_SMBUS;
   14686 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14687 #if 0
   14688 		/* XXX Isn't this required??? */
   14689 		CSR_WRITE_FLUSH(sc);
   14690 #endif
   14691 		/* Wait 50 milliseconds for MAC to finish any retries
   14692 		 * that it might be trying to perform from previous
   14693 		 * attempts to acknowledge any phy read requests.
   14694 		 */
   14695 		delay(50 * 1000);
   14696 		/* FALLTHROUGH */
   14697 	case WM_T_PCH2:
   14698 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14699 			break;
   14700 		/* FALLTHROUGH */
   14701 	case WM_T_PCH:
   14702 		if (sc->sc_type == WM_T_PCH)
   14703 			if ((fwsm & FWSM_FW_VALID) != 0)
   14704 				break;
   14705 
   14706 		if (wm_phy_resetisblocked(sc) == true) {
   14707 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   14708 			break;
   14709 		}
   14710 
   14711 		/* Toggle LANPHYPC Value bit */
   14712 		wm_toggle_lanphypc_pch_lpt(sc);
   14713 
   14714 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14715 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14716 				break;
   14717 
   14718 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14719 			 * so ensure that the MAC is also out of SMBus mode
   14720 			 */
   14721 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14722 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14723 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14724 
   14725 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14726 				break;
   14727 			rv = -1;
   14728 		}
   14729 		break;
   14730 	default:
   14731 		break;
   14732 	}
   14733 
   14734 	/* Release semaphore */
   14735 	sc->phy.release(sc);
   14736 
   14737 	if (rv == 0) {
   14738 		/* Check to see if able to reset PHY.  Print error if not */
   14739 		if (wm_phy_resetisblocked(sc)) {
   14740 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14741 			goto out;
   14742 		}
   14743 
   14744 		/* Reset the PHY before any access to it.  Doing so, ensures
   14745 		 * that the PHY is in a known good state before we read/write
   14746 		 * PHY registers.  The generic reset is sufficient here,
   14747 		 * because we haven't determined the PHY type yet.
   14748 		 */
   14749 		if (wm_reset_phy(sc) != 0)
   14750 			goto out;
   14751 
   14752 		/* On a successful reset, possibly need to wait for the PHY
   14753 		 * to quiesce to an accessible state before returning control
   14754 		 * to the calling function.  If the PHY does not quiesce, then
   14755 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14756 		 *  the PHY is in.
   14757 		 */
   14758 		if (wm_phy_resetisblocked(sc))
   14759 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14760 	}
   14761 
   14762 out:
   14763 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14764 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14765 		delay(10*1000);
   14766 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14767 	}
   14768 
   14769 	return 0;
   14770 }
   14771 
   14772 static void
   14773 wm_init_manageability(struct wm_softc *sc)
   14774 {
   14775 
   14776 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14777 		device_xname(sc->sc_dev), __func__));
   14778 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14779 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14780 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14781 
   14782 		/* Disable hardware interception of ARP */
   14783 		manc &= ~MANC_ARP_EN;
   14784 
   14785 		/* Enable receiving management packets to the host */
   14786 		if (sc->sc_type >= WM_T_82571) {
   14787 			manc |= MANC_EN_MNG2HOST;
   14788 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14789 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14790 		}
   14791 
   14792 		CSR_WRITE(sc, WMREG_MANC, manc);
   14793 	}
   14794 }
   14795 
   14796 static void
   14797 wm_release_manageability(struct wm_softc *sc)
   14798 {
   14799 
   14800 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14801 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14802 
   14803 		manc |= MANC_ARP_EN;
   14804 		if (sc->sc_type >= WM_T_82571)
   14805 			manc &= ~MANC_EN_MNG2HOST;
   14806 
   14807 		CSR_WRITE(sc, WMREG_MANC, manc);
   14808 	}
   14809 }
   14810 
   14811 static void
   14812 wm_get_wakeup(struct wm_softc *sc)
   14813 {
   14814 
   14815 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14816 	switch (sc->sc_type) {
   14817 	case WM_T_82573:
   14818 	case WM_T_82583:
   14819 		sc->sc_flags |= WM_F_HAS_AMT;
   14820 		/* FALLTHROUGH */
   14821 	case WM_T_80003:
   14822 	case WM_T_82575:
   14823 	case WM_T_82576:
   14824 	case WM_T_82580:
   14825 	case WM_T_I350:
   14826 	case WM_T_I354:
   14827 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14828 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14829 		/* FALLTHROUGH */
   14830 	case WM_T_82541:
   14831 	case WM_T_82541_2:
   14832 	case WM_T_82547:
   14833 	case WM_T_82547_2:
   14834 	case WM_T_82571:
   14835 	case WM_T_82572:
   14836 	case WM_T_82574:
   14837 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14838 		break;
   14839 	case WM_T_ICH8:
   14840 	case WM_T_ICH9:
   14841 	case WM_T_ICH10:
   14842 	case WM_T_PCH:
   14843 	case WM_T_PCH2:
   14844 	case WM_T_PCH_LPT:
   14845 	case WM_T_PCH_SPT:
   14846 	case WM_T_PCH_CNP:
   14847 		sc->sc_flags |= WM_F_HAS_AMT;
   14848 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14849 		break;
   14850 	default:
   14851 		break;
   14852 	}
   14853 
   14854 	/* 1: HAS_MANAGE */
   14855 	if (wm_enable_mng_pass_thru(sc) != 0)
   14856 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14857 
   14858 	/*
   14859 	 * Note that the WOL flags is set after the resetting of the eeprom
   14860 	 * stuff
   14861 	 */
   14862 }
   14863 
   14864 /*
   14865  * Unconfigure Ultra Low Power mode.
   14866  * Only for I217 and newer (see below).
   14867  */
   14868 static int
   14869 wm_ulp_disable(struct wm_softc *sc)
   14870 {
   14871 	uint32_t reg;
   14872 	uint16_t phyreg;
   14873 	int i = 0, rv = 0;
   14874 
   14875 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14876 		device_xname(sc->sc_dev), __func__));
   14877 	/* Exclude old devices */
   14878 	if ((sc->sc_type < WM_T_PCH_LPT)
   14879 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14880 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14881 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14882 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14883 		return 0;
   14884 
   14885 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14886 		/* Request ME un-configure ULP mode in the PHY */
   14887 		reg = CSR_READ(sc, WMREG_H2ME);
   14888 		reg &= ~H2ME_ULP;
   14889 		reg |= H2ME_ENFORCE_SETTINGS;
   14890 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14891 
   14892 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14893 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14894 			if (i++ == 30) {
   14895 				device_printf(sc->sc_dev, "%s timed out\n",
   14896 				    __func__);
   14897 				return -1;
   14898 			}
   14899 			delay(10 * 1000);
   14900 		}
   14901 		reg = CSR_READ(sc, WMREG_H2ME);
   14902 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14903 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14904 
   14905 		return 0;
   14906 	}
   14907 
   14908 	/* Acquire semaphore */
   14909 	rv = sc->phy.acquire(sc);
   14910 	if (rv != 0) {
   14911 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14912 		device_xname(sc->sc_dev), __func__));
   14913 		return -1;
   14914 	}
   14915 
   14916 	/* Toggle LANPHYPC */
   14917 	wm_toggle_lanphypc_pch_lpt(sc);
   14918 
   14919 	/* Unforce SMBus mode in PHY */
   14920 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14921 	if (rv != 0) {
   14922 		uint32_t reg2;
   14923 
   14924 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   14925 			__func__);
   14926 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14927 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14928 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14929 		delay(50 * 1000);
   14930 
   14931 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14932 		    &phyreg);
   14933 		if (rv != 0)
   14934 			goto release;
   14935 	}
   14936 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14937 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14938 
   14939 	/* Unforce SMBus mode in MAC */
   14940 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14941 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14942 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14943 
   14944 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14945 	if (rv != 0)
   14946 		goto release;
   14947 	phyreg |= HV_PM_CTRL_K1_ENA;
   14948 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14949 
   14950 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14951 		&phyreg);
   14952 	if (rv != 0)
   14953 		goto release;
   14954 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14955 	    | I218_ULP_CONFIG1_STICKY_ULP
   14956 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14957 	    | I218_ULP_CONFIG1_WOL_HOST
   14958 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14959 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14960 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14961 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14962 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14963 	phyreg |= I218_ULP_CONFIG1_START;
   14964 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14965 
   14966 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14967 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14968 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14969 
   14970 release:
   14971 	/* Release semaphore */
   14972 	sc->phy.release(sc);
   14973 	wm_gmii_reset(sc);
   14974 	delay(50 * 1000);
   14975 
   14976 	return rv;
   14977 }
   14978 
   14979 /* WOL in the newer chipset interfaces (pchlan) */
   14980 static int
   14981 wm_enable_phy_wakeup(struct wm_softc *sc)
   14982 {
   14983 	device_t dev = sc->sc_dev;
   14984 	uint32_t mreg, moff;
   14985 	uint16_t wuce, wuc, wufc, preg;
   14986 	int i, rv;
   14987 
   14988 	KASSERT(sc->sc_type >= WM_T_PCH);
   14989 
   14990 	/* Copy MAC RARs to PHY RARs */
   14991 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   14992 
   14993 	/* Activate PHY wakeup */
   14994 	rv = sc->phy.acquire(sc);
   14995 	if (rv != 0) {
   14996 		device_printf(dev, "%s: failed to acquire semaphore\n",
   14997 		    __func__);
   14998 		return rv;
   14999 	}
   15000 
   15001 	/*
   15002 	 * Enable access to PHY wakeup registers.
   15003 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15004 	 */
   15005 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15006 	if (rv != 0) {
   15007 		device_printf(dev,
   15008 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15009 		goto release;
   15010 	}
   15011 
   15012 	/* Copy MAC MTA to PHY MTA */
   15013 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15014 		uint16_t lo, hi;
   15015 
   15016 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15017 		lo = (uint16_t)(mreg & 0xffff);
   15018 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15019 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15020 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15021 	}
   15022 
   15023 	/* Configure PHY Rx Control register */
   15024 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15025 	mreg = CSR_READ(sc, WMREG_RCTL);
   15026 	if (mreg & RCTL_UPE)
   15027 		preg |= BM_RCTL_UPE;
   15028 	if (mreg & RCTL_MPE)
   15029 		preg |= BM_RCTL_MPE;
   15030 	preg &= ~(BM_RCTL_MO_MASK);
   15031 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15032 	if (moff != 0)
   15033 		preg |= moff << BM_RCTL_MO_SHIFT;
   15034 	if (mreg & RCTL_BAM)
   15035 		preg |= BM_RCTL_BAM;
   15036 	if (mreg & RCTL_PMCF)
   15037 		preg |= BM_RCTL_PMCF;
   15038 	mreg = CSR_READ(sc, WMREG_CTRL);
   15039 	if (mreg & CTRL_RFCE)
   15040 		preg |= BM_RCTL_RFCE;
   15041 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15042 
   15043 	wuc = WUC_APME | WUC_PME_EN;
   15044 	wufc = WUFC_MAG;
   15045 	/* Enable PHY wakeup in MAC register */
   15046 	CSR_WRITE(sc, WMREG_WUC,
   15047 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15048 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15049 
   15050 	/* Configure and enable PHY wakeup in PHY registers */
   15051 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15052 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15053 
   15054 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15055 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15056 
   15057 release:
   15058 	sc->phy.release(sc);
   15059 
   15060 	return 0;
   15061 }
   15062 
   15063 /* Power down workaround on D3 */
   15064 static void
   15065 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15066 {
   15067 	uint32_t reg;
   15068 	uint16_t phyreg;
   15069 	int i;
   15070 
   15071 	for (i = 0; i < 2; i++) {
   15072 		/* Disable link */
   15073 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15074 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15075 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15076 
   15077 		/*
   15078 		 * Call gig speed drop workaround on Gig disable before
   15079 		 * accessing any PHY registers
   15080 		 */
   15081 		if (sc->sc_type == WM_T_ICH8)
   15082 			wm_gig_downshift_workaround_ich8lan(sc);
   15083 
   15084 		/* Write VR power-down enable */
   15085 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15086 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15087 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15088 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15089 
   15090 		/* Read it back and test */
   15091 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15092 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15093 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15094 			break;
   15095 
   15096 		/* Issue PHY reset and repeat at most one more time */
   15097 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15098 	}
   15099 }
   15100 
   15101 /*
   15102  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15103  *  @sc: pointer to the HW structure
   15104  *
   15105  *  During S0 to Sx transition, it is possible the link remains at gig
   15106  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15107  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15108  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15109  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15110  *  needs to be written.
   15111  *  Parts that support (and are linked to a partner which support) EEE in
   15112  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15113  *  than 10Mbps w/o EEE.
   15114  */
   15115 static void
   15116 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15117 {
   15118 	device_t dev = sc->sc_dev;
   15119 	struct ethercom *ec = &sc->sc_ethercom;
   15120 	uint32_t phy_ctrl;
   15121 	int rv;
   15122 
   15123 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15124 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15125 
   15126 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   15127 
   15128 	if (sc->sc_phytype == WMPHY_I217) {
   15129 		uint16_t devid = sc->sc_pcidevid;
   15130 
   15131 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   15132 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   15133 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   15134 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   15135 		    (sc->sc_type >= WM_T_PCH_SPT))
   15136 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   15137 			    CSR_READ(sc, WMREG_FEXTNVM6)
   15138 			    & ~FEXTNVM6_REQ_PLL_CLK);
   15139 
   15140 		if (sc->phy.acquire(sc) != 0)
   15141 			goto out;
   15142 
   15143 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15144 			uint16_t eee_advert;
   15145 
   15146 			rv = wm_read_emi_reg_locked(dev,
   15147 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   15148 			if (rv)
   15149 				goto release;
   15150 
   15151 			/*
   15152 			 * Disable LPLU if both link partners support 100BaseT
   15153 			 * EEE and 100Full is advertised on both ends of the
   15154 			 * link, and enable Auto Enable LPI since there will
   15155 			 * be no driver to enable LPI while in Sx.
   15156 			 */
   15157 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   15158 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   15159 				uint16_t anar, phy_reg;
   15160 
   15161 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   15162 				    &anar);
   15163 				if (anar & ANAR_TX_FD) {
   15164 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   15165 					    PHY_CTRL_NOND0A_LPLU);
   15166 
   15167 					/* Set Auto Enable LPI after link up */
   15168 					sc->phy.readreg_locked(dev, 2,
   15169 					    I217_LPI_GPIO_CTRL, &phy_reg);
   15170 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15171 					sc->phy.writereg_locked(dev, 2,
   15172 					    I217_LPI_GPIO_CTRL, phy_reg);
   15173 				}
   15174 			}
   15175 		}
   15176 
   15177 		/*
   15178 		 * For i217 Intel Rapid Start Technology support,
   15179 		 * when the system is going into Sx and no manageability engine
   15180 		 * is present, the driver must configure proxy to reset only on
   15181 		 * power good.	LPI (Low Power Idle) state must also reset only
   15182 		 * on power good, as well as the MTA (Multicast table array).
   15183 		 * The SMBus release must also be disabled on LCD reset.
   15184 		 */
   15185 
   15186 		/*
   15187 		 * Enable MTA to reset for Intel Rapid Start Technology
   15188 		 * Support
   15189 		 */
   15190 
   15191 release:
   15192 		sc->phy.release(sc);
   15193 	}
   15194 out:
   15195 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   15196 
   15197 	if (sc->sc_type == WM_T_ICH8)
   15198 		wm_gig_downshift_workaround_ich8lan(sc);
   15199 
   15200 	if (sc->sc_type >= WM_T_PCH) {
   15201 		wm_oem_bits_config_ich8lan(sc, false);
   15202 
   15203 		/* Reset PHY to activate OEM bits on 82577/8 */
   15204 		if (sc->sc_type == WM_T_PCH)
   15205 			wm_reset_phy(sc);
   15206 
   15207 		if (sc->phy.acquire(sc) != 0)
   15208 			return;
   15209 		wm_write_smbus_addr(sc);
   15210 		sc->phy.release(sc);
   15211 	}
   15212 }
   15213 
   15214 /*
   15215  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   15216  *  @sc: pointer to the HW structure
   15217  *
   15218  *  During Sx to S0 transitions on non-managed devices or managed devices
   15219  *  on which PHY resets are not blocked, if the PHY registers cannot be
   15220  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   15221  *  the PHY.
   15222  *  On i217, setup Intel Rapid Start Technology.
   15223  */
   15224 static int
   15225 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   15226 {
   15227 	device_t dev = sc->sc_dev;
   15228 	int rv;
   15229 
   15230 	if (sc->sc_type < WM_T_PCH2)
   15231 		return 0;
   15232 
   15233 	rv = wm_init_phy_workarounds_pchlan(sc);
   15234 	if (rv != 0)
   15235 		return -1;
   15236 
   15237 	/* For i217 Intel Rapid Start Technology support when the system
   15238 	 * is transitioning from Sx and no manageability engine is present
   15239 	 * configure SMBus to restore on reset, disable proxy, and enable
   15240 	 * the reset on MTA (Multicast table array).
   15241 	 */
   15242 	if (sc->sc_phytype == WMPHY_I217) {
   15243 		uint16_t phy_reg;
   15244 
   15245 		if (sc->phy.acquire(sc) != 0)
   15246 			return -1;
   15247 
   15248 		/* Clear Auto Enable LPI after link up */
   15249 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15250 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15251 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15252 
   15253 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15254 			/* Restore clear on SMB if no manageability engine
   15255 			 * is present
   15256 			 */
   15257 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15258 			    &phy_reg);
   15259 			if (rv != 0)
   15260 				goto release;
   15261 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15262 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15263 
   15264 			/* Disable Proxy */
   15265 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15266 		}
   15267 		/* Enable reset on MTA */
   15268 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15269 		if (rv != 0)
   15270 			goto release;
   15271 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15272 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15273 
   15274 release:
   15275 		sc->phy.release(sc);
   15276 		return rv;
   15277 	}
   15278 
   15279 	return 0;
   15280 }
   15281 
   15282 static void
   15283 wm_enable_wakeup(struct wm_softc *sc)
   15284 {
   15285 	uint32_t reg, pmreg;
   15286 	pcireg_t pmode;
   15287 	int rv = 0;
   15288 
   15289 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15290 		device_xname(sc->sc_dev), __func__));
   15291 
   15292 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15293 	    &pmreg, NULL) == 0)
   15294 		return;
   15295 
   15296 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15297 		goto pme;
   15298 
   15299 	/* Advertise the wakeup capability */
   15300 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15301 	    | CTRL_SWDPIN(3));
   15302 
   15303 	/* Keep the laser running on fiber adapters */
   15304 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15305 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15306 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15307 		reg |= CTRL_EXT_SWDPIN(3);
   15308 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15309 	}
   15310 
   15311 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15312 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15313 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15314 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15315 		wm_suspend_workarounds_ich8lan(sc);
   15316 
   15317 #if 0	/* For the multicast packet */
   15318 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15319 	reg |= WUFC_MC;
   15320 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15321 #endif
   15322 
   15323 	if (sc->sc_type >= WM_T_PCH) {
   15324 		rv = wm_enable_phy_wakeup(sc);
   15325 		if (rv != 0)
   15326 			goto pme;
   15327 	} else {
   15328 		/* Enable wakeup by the MAC */
   15329 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15330 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15331 	}
   15332 
   15333 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15334 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15335 		|| (sc->sc_type == WM_T_PCH2))
   15336 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15337 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15338 
   15339 pme:
   15340 	/* Request PME */
   15341 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15342 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15343 		/* For WOL */
   15344 		pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   15345 	} else {
   15346 		/* Disable WOL */
   15347 		pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   15348 	}
   15349 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15350 }
   15351 
   15352 /* Disable ASPM L0s and/or L1 for workaround */
   15353 static void
   15354 wm_disable_aspm(struct wm_softc *sc)
   15355 {
   15356 	pcireg_t reg, mask = 0;
   15357 	unsigned const char *str = "";
   15358 
   15359 	/*
   15360 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15361 	 * space.
   15362 	 */
   15363 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15364 		return;
   15365 
   15366 	switch (sc->sc_type) {
   15367 	case WM_T_82571:
   15368 	case WM_T_82572:
   15369 		/*
   15370 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15371 		 * State Power management L1 State (ASPM L1).
   15372 		 */
   15373 		mask = PCIE_LCSR_ASPM_L1;
   15374 		str = "L1 is";
   15375 		break;
   15376 	case WM_T_82573:
   15377 	case WM_T_82574:
   15378 	case WM_T_82583:
   15379 		/*
   15380 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15381 		 *
   15382 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15383 		 * some chipset.  The document of 82574 and 82583 says that
   15384 		 * disabling L0s with some specific chipset is sufficient,
   15385 		 * but we follow as of the Intel em driver does.
   15386 		 *
   15387 		 * References:
   15388 		 * Errata 8 of the Specification Update of i82573.
   15389 		 * Errata 20 of the Specification Update of i82574.
   15390 		 * Errata 9 of the Specification Update of i82583.
   15391 		 */
   15392 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15393 		str = "L0s and L1 are";
   15394 		break;
   15395 	default:
   15396 		return;
   15397 	}
   15398 
   15399 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15400 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15401 	reg &= ~mask;
   15402 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15403 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15404 
   15405 	/* Print only in wm_attach() */
   15406 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15407 		aprint_verbose_dev(sc->sc_dev,
   15408 		    "ASPM %s disabled to workaround the errata.\n", str);
   15409 }
   15410 
   15411 /* LPLU */
   15412 
   15413 static void
   15414 wm_lplu_d0_disable(struct wm_softc *sc)
   15415 {
   15416 	struct mii_data *mii = &sc->sc_mii;
   15417 	uint32_t reg;
   15418 	uint16_t phyval;
   15419 
   15420 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15421 		device_xname(sc->sc_dev), __func__));
   15422 
   15423 	if (sc->sc_phytype == WMPHY_IFE)
   15424 		return;
   15425 
   15426 	switch (sc->sc_type) {
   15427 	case WM_T_82571:
   15428 	case WM_T_82572:
   15429 	case WM_T_82573:
   15430 	case WM_T_82575:
   15431 	case WM_T_82576:
   15432 		mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
   15433 		phyval &= ~PMR_D0_LPLU;
   15434 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
   15435 		break;
   15436 	case WM_T_82580:
   15437 	case WM_T_I350:
   15438 	case WM_T_I210:
   15439 	case WM_T_I211:
   15440 		reg = CSR_READ(sc, WMREG_PHPM);
   15441 		reg &= ~PHPM_D0A_LPLU;
   15442 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15443 		break;
   15444 	case WM_T_82574:
   15445 	case WM_T_82583:
   15446 	case WM_T_ICH8:
   15447 	case WM_T_ICH9:
   15448 	case WM_T_ICH10:
   15449 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15450 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15451 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15452 		CSR_WRITE_FLUSH(sc);
   15453 		break;
   15454 	case WM_T_PCH:
   15455 	case WM_T_PCH2:
   15456 	case WM_T_PCH_LPT:
   15457 	case WM_T_PCH_SPT:
   15458 	case WM_T_PCH_CNP:
   15459 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15460 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15461 		if (wm_phy_resetisblocked(sc) == false)
   15462 			phyval |= HV_OEM_BITS_ANEGNOW;
   15463 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15464 		break;
   15465 	default:
   15466 		break;
   15467 	}
   15468 }
   15469 
   15470 /* EEE */
   15471 
   15472 static int
   15473 wm_set_eee_i350(struct wm_softc *sc)
   15474 {
   15475 	struct ethercom *ec = &sc->sc_ethercom;
   15476 	uint32_t ipcnfg, eeer;
   15477 	uint32_t ipcnfg_mask
   15478 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15479 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15480 
   15481 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15482 
   15483 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15484 	eeer = CSR_READ(sc, WMREG_EEER);
   15485 
   15486 	/* Enable or disable per user setting */
   15487 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15488 		ipcnfg |= ipcnfg_mask;
   15489 		eeer |= eeer_mask;
   15490 	} else {
   15491 		ipcnfg &= ~ipcnfg_mask;
   15492 		eeer &= ~eeer_mask;
   15493 	}
   15494 
   15495 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15496 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15497 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15498 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15499 
   15500 	return 0;
   15501 }
   15502 
   15503 static int
   15504 wm_set_eee_pchlan(struct wm_softc *sc)
   15505 {
   15506 	device_t dev = sc->sc_dev;
   15507 	struct ethercom *ec = &sc->sc_ethercom;
   15508 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15509 	int rv = 0;
   15510 
   15511 	switch (sc->sc_phytype) {
   15512 	case WMPHY_82579:
   15513 		lpa = I82579_EEE_LP_ABILITY;
   15514 		pcs_status = I82579_EEE_PCS_STATUS;
   15515 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15516 		break;
   15517 	case WMPHY_I217:
   15518 		lpa = I217_EEE_LP_ABILITY;
   15519 		pcs_status = I217_EEE_PCS_STATUS;
   15520 		adv_addr = I217_EEE_ADVERTISEMENT;
   15521 		break;
   15522 	default:
   15523 		return 0;
   15524 	}
   15525 
   15526 	if (sc->phy.acquire(sc)) {
   15527 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15528 		return 0;
   15529 	}
   15530 
   15531 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15532 	if (rv != 0)
   15533 		goto release;
   15534 
   15535 	/* Clear bits that enable EEE in various speeds */
   15536 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15537 
   15538 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15539 		/* Save off link partner's EEE ability */
   15540 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15541 		if (rv != 0)
   15542 			goto release;
   15543 
   15544 		/* Read EEE advertisement */
   15545 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15546 			goto release;
   15547 
   15548 		/*
   15549 		 * Enable EEE only for speeds in which the link partner is
   15550 		 * EEE capable and for which we advertise EEE.
   15551 		 */
   15552 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15553 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15554 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15555 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15556 			if ((data & ANLPAR_TX_FD) != 0)
   15557 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15558 			else {
   15559 				/*
   15560 				 * EEE is not supported in 100Half, so ignore
   15561 				 * partner's EEE in 100 ability if full-duplex
   15562 				 * is not advertised.
   15563 				 */
   15564 				sc->eee_lp_ability
   15565 				    &= ~AN_EEEADVERT_100_TX;
   15566 			}
   15567 		}
   15568 	}
   15569 
   15570 	if (sc->sc_phytype == WMPHY_82579) {
   15571 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15572 		if (rv != 0)
   15573 			goto release;
   15574 
   15575 		data &= ~I82579_LPI_PLL_SHUT_100;
   15576 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15577 	}
   15578 
   15579 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15580 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15581 		goto release;
   15582 
   15583 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15584 release:
   15585 	sc->phy.release(sc);
   15586 
   15587 	return rv;
   15588 }
   15589 
   15590 static int
   15591 wm_set_eee(struct wm_softc *sc)
   15592 {
   15593 	struct ethercom *ec = &sc->sc_ethercom;
   15594 
   15595 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15596 		return 0;
   15597 
   15598 	if (sc->sc_type == WM_T_I354) {
   15599 		/* I354 uses an external PHY */
   15600 		return 0; /* not yet */
   15601 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15602 		return wm_set_eee_i350(sc);
   15603 	else if (sc->sc_type >= WM_T_PCH2)
   15604 		return wm_set_eee_pchlan(sc);
   15605 
   15606 	return 0;
   15607 }
   15608 
   15609 /*
   15610  * Workarounds (mainly PHY related).
   15611  * Basically, PHY's workarounds are in the PHY drivers.
   15612  */
   15613 
   15614 /* Work-around for 82566 Kumeran PCS lock loss */
   15615 static int
   15616 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15617 {
   15618 	struct mii_data *mii = &sc->sc_mii;
   15619 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15620 	int i, reg, rv;
   15621 	uint16_t phyreg;
   15622 
   15623 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15624 		device_xname(sc->sc_dev), __func__));
   15625 
   15626 	/* If the link is not up, do nothing */
   15627 	if ((status & STATUS_LU) == 0)
   15628 		return 0;
   15629 
   15630 	/* Nothing to do if the link is other than 1Gbps */
   15631 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15632 		return 0;
   15633 
   15634 	for (i = 0; i < 10; i++) {
   15635 		/* read twice */
   15636 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15637 		if (rv != 0)
   15638 			return rv;
   15639 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15640 		if (rv != 0)
   15641 			return rv;
   15642 
   15643 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15644 			goto out;	/* GOOD! */
   15645 
   15646 		/* Reset the PHY */
   15647 		wm_reset_phy(sc);
   15648 		delay(5*1000);
   15649 	}
   15650 
   15651 	/* Disable GigE link negotiation */
   15652 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15653 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15654 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15655 
   15656 	/*
   15657 	 * Call gig speed drop workaround on Gig disable before accessing
   15658 	 * any PHY registers.
   15659 	 */
   15660 	wm_gig_downshift_workaround_ich8lan(sc);
   15661 
   15662 out:
   15663 	return 0;
   15664 }
   15665 
   15666 /*
   15667  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15668  *  @sc: pointer to the HW structure
   15669  *
   15670  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15671  *  LPLU, Gig disable, MDIC PHY reset):
   15672  *    1) Set Kumeran Near-end loopback
   15673  *    2) Clear Kumeran Near-end loopback
   15674  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15675  */
   15676 static void
   15677 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15678 {
   15679 	uint16_t kmreg;
   15680 
   15681 	/* Only for igp3 */
   15682 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15683 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15684 			return;
   15685 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15686 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15687 			return;
   15688 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15689 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15690 	}
   15691 }
   15692 
   15693 /*
   15694  * Workaround for pch's PHYs
   15695  * XXX should be moved to new PHY driver?
   15696  */
   15697 static int
   15698 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15699 {
   15700 	device_t dev = sc->sc_dev;
   15701 	struct mii_data *mii = &sc->sc_mii;
   15702 	struct mii_softc *child;
   15703 	uint16_t phy_data, phyrev = 0;
   15704 	int phytype = sc->sc_phytype;
   15705 	int rv;
   15706 
   15707 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15708 		device_xname(dev), __func__));
   15709 	KASSERT(sc->sc_type == WM_T_PCH);
   15710 
   15711 	/* Set MDIO slow mode before any other MDIO access */
   15712 	if (phytype == WMPHY_82577)
   15713 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15714 			return rv;
   15715 
   15716 	child = LIST_FIRST(&mii->mii_phys);
   15717 	if (child != NULL)
   15718 		phyrev = child->mii_mpd_rev;
   15719 
   15720 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15721 	if ((child != NULL) &&
   15722 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   15723 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   15724 		/* Disable generation of early preamble (0x4431) */
   15725 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15726 		    &phy_data);
   15727 		if (rv != 0)
   15728 			return rv;
   15729 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   15730 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   15731 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15732 		    phy_data);
   15733 		if (rv != 0)
   15734 			return rv;
   15735 
   15736 		/* Preamble tuning for SSC */
   15737 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   15738 		if (rv != 0)
   15739 			return rv;
   15740 	}
   15741 
   15742 	/* 82578 */
   15743 	if (phytype == WMPHY_82578) {
   15744 		/*
   15745 		 * Return registers to default by doing a soft reset then
   15746 		 * writing 0x3140 to the control register
   15747 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15748 		 */
   15749 		if ((child != NULL) && (phyrev < 2)) {
   15750 			PHY_RESET(child);
   15751 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   15752 			if (rv != 0)
   15753 				return rv;
   15754 		}
   15755 	}
   15756 
   15757 	/* Select page 0 */
   15758 	if ((rv = sc->phy.acquire(sc)) != 0)
   15759 		return rv;
   15760 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   15761 	sc->phy.release(sc);
   15762 	if (rv != 0)
   15763 		return rv;
   15764 
   15765 	/*
   15766 	 * Configure the K1 Si workaround during phy reset assuming there is
   15767 	 * link so that it disables K1 if link is in 1Gbps.
   15768 	 */
   15769 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15770 		return rv;
   15771 
   15772 	/* Workaround for link disconnects on a busy hub in half duplex */
   15773 	rv = sc->phy.acquire(sc);
   15774 	if (rv)
   15775 		return rv;
   15776 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   15777 	if (rv)
   15778 		goto release;
   15779 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   15780 	    phy_data & 0x00ff);
   15781 	if (rv)
   15782 		goto release;
   15783 
   15784 	/* Set MSE higher to enable link to stay up when noise is high */
   15785 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   15786 release:
   15787 	sc->phy.release(sc);
   15788 
   15789 	return rv;
   15790 }
   15791 
   15792 /*
   15793  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15794  *  @sc:   pointer to the HW structure
   15795  */
   15796 static void
   15797 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15798 {
   15799 	device_t dev = sc->sc_dev;
   15800 	uint32_t mac_reg;
   15801 	uint16_t i, wuce;
   15802 	int count;
   15803 
   15804 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15805 		device_xname(sc->sc_dev), __func__));
   15806 
   15807 	if (sc->phy.acquire(sc) != 0)
   15808 		return;
   15809 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15810 		goto release;
   15811 
   15812 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15813 	count = wm_rar_count(sc);
   15814 	for (i = 0; i < count; i++) {
   15815 		uint16_t lo, hi;
   15816 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15817 		lo = (uint16_t)(mac_reg & 0xffff);
   15818 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15819 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15820 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15821 
   15822 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15823 		lo = (uint16_t)(mac_reg & 0xffff);
   15824 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15825 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15826 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15827 	}
   15828 
   15829 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15830 
   15831 release:
   15832 	sc->phy.release(sc);
   15833 }
   15834 
   15835 /*
   15836  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15837  *  done after every PHY reset.
   15838  */
   15839 static int
   15840 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15841 {
   15842 	device_t dev = sc->sc_dev;
   15843 	int rv;
   15844 
   15845 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15846 		device_xname(dev), __func__));
   15847 	KASSERT(sc->sc_type == WM_T_PCH2);
   15848 
   15849 	/* Set MDIO slow mode before any other MDIO access */
   15850 	rv = wm_set_mdio_slow_mode_hv(sc);
   15851 	if (rv != 0)
   15852 		return rv;
   15853 
   15854 	rv = sc->phy.acquire(sc);
   15855 	if (rv != 0)
   15856 		return rv;
   15857 	/* Set MSE higher to enable link to stay up when noise is high */
   15858 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   15859 	if (rv != 0)
   15860 		goto release;
   15861 	/* Drop link after 5 times MSE threshold was reached */
   15862 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   15863 release:
   15864 	sc->phy.release(sc);
   15865 
   15866 	return rv;
   15867 }
   15868 
   15869 /**
   15870  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15871  *  @link: link up bool flag
   15872  *
   15873  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15874  *  preventing further DMA write requests.  Workaround the issue by disabling
   15875  *  the de-assertion of the clock request when in 1Gpbs mode.
   15876  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15877  *  speeds in order to avoid Tx hangs.
   15878  **/
   15879 static int
   15880 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15881 {
   15882 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15883 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15884 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15885 	uint16_t phyreg;
   15886 
   15887 	if (link && (speed == STATUS_SPEED_1000)) {
   15888 		sc->phy.acquire(sc);
   15889 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15890 		    &phyreg);
   15891 		if (rv != 0)
   15892 			goto release;
   15893 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15894 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15895 		if (rv != 0)
   15896 			goto release;
   15897 		delay(20);
   15898 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15899 
   15900 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15901 		    &phyreg);
   15902 release:
   15903 		sc->phy.release(sc);
   15904 		return rv;
   15905 	}
   15906 
   15907 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15908 
   15909 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15910 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15911 	    || !link
   15912 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15913 		goto update_fextnvm6;
   15914 
   15915 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   15916 
   15917 	/* Clear link status transmit timeout */
   15918 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   15919 	if (speed == STATUS_SPEED_100) {
   15920 		/* Set inband Tx timeout to 5x10us for 100Half */
   15921 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15922 
   15923 		/* Do not extend the K1 entry latency for 100Half */
   15924 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15925 	} else {
   15926 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   15927 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15928 
   15929 		/* Extend the K1 entry latency for 10 Mbps */
   15930 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15931 	}
   15932 
   15933 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   15934 
   15935 update_fextnvm6:
   15936 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   15937 	return 0;
   15938 }
   15939 
   15940 /*
   15941  *  wm_k1_gig_workaround_hv - K1 Si workaround
   15942  *  @sc:   pointer to the HW structure
   15943  *  @link: link up bool flag
   15944  *
   15945  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   15946  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   15947  *  If link is down, the function will restore the default K1 setting located
   15948  *  in the NVM.
   15949  */
   15950 static int
   15951 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   15952 {
   15953 	int k1_enable = sc->sc_nvm_k1_enabled;
   15954 
   15955 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15956 		device_xname(sc->sc_dev), __func__));
   15957 
   15958 	if (sc->phy.acquire(sc) != 0)
   15959 		return -1;
   15960 
   15961 	if (link) {
   15962 		k1_enable = 0;
   15963 
   15964 		/* Link stall fix for link up */
   15965 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15966 		    0x0100);
   15967 	} else {
   15968 		/* Link stall fix for link down */
   15969 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15970 		    0x4100);
   15971 	}
   15972 
   15973 	wm_configure_k1_ich8lan(sc, k1_enable);
   15974 	sc->phy.release(sc);
   15975 
   15976 	return 0;
   15977 }
   15978 
   15979 /*
   15980  *  wm_k1_workaround_lv - K1 Si workaround
   15981  *  @sc:   pointer to the HW structure
   15982  *
   15983  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   15984  *  Disable K1 for 1000 and 100 speeds
   15985  */
   15986 static int
   15987 wm_k1_workaround_lv(struct wm_softc *sc)
   15988 {
   15989 	uint32_t reg;
   15990 	uint16_t phyreg;
   15991 	int rv;
   15992 
   15993 	if (sc->sc_type != WM_T_PCH2)
   15994 		return 0;
   15995 
   15996 	/* Set K1 beacon duration based on 10Mbps speed */
   15997 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   15998 	if (rv != 0)
   15999 		return rv;
   16000 
   16001 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   16002 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   16003 		if (phyreg &
   16004 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   16005 			/* LV 1G/100 Packet drop issue wa  */
   16006 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   16007 			    &phyreg);
   16008 			if (rv != 0)
   16009 				return rv;
   16010 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   16011 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   16012 			    phyreg);
   16013 			if (rv != 0)
   16014 				return rv;
   16015 		} else {
   16016 			/* For 10Mbps */
   16017 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   16018 			reg &= ~FEXTNVM4_BEACON_DURATION;
   16019 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   16020 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   16021 		}
   16022 	}
   16023 
   16024 	return 0;
   16025 }
   16026 
   16027 /*
   16028  *  wm_link_stall_workaround_hv - Si workaround
   16029  *  @sc: pointer to the HW structure
   16030  *
   16031  *  This function works around a Si bug where the link partner can get
   16032  *  a link up indication before the PHY does. If small packets are sent
   16033  *  by the link partner they can be placed in the packet buffer without
   16034  *  being properly accounted for by the PHY and will stall preventing
   16035  *  further packets from being received.  The workaround is to clear the
   16036  *  packet buffer after the PHY detects link up.
   16037  */
   16038 static int
   16039 wm_link_stall_workaround_hv(struct wm_softc *sc)
   16040 {
   16041 	uint16_t phyreg;
   16042 
   16043 	if (sc->sc_phytype != WMPHY_82578)
   16044 		return 0;
   16045 
   16046 	/* Do not apply workaround if in PHY loopback bit 14 set */
   16047 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   16048 	if ((phyreg & BMCR_LOOP) != 0)
   16049 		return 0;
   16050 
   16051 	/* Check if link is up and at 1Gbps */
   16052 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   16053 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16054 	    | BM_CS_STATUS_SPEED_MASK;
   16055 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16056 		| BM_CS_STATUS_SPEED_1000))
   16057 		return 0;
   16058 
   16059 	delay(200 * 1000);	/* XXX too big */
   16060 
   16061 	/* Flush the packets in the fifo buffer */
   16062 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16063 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   16064 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16065 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   16066 
   16067 	return 0;
   16068 }
   16069 
   16070 static int
   16071 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   16072 {
   16073 	int rv;
   16074 	uint16_t reg;
   16075 
   16076 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   16077 	if (rv != 0)
   16078 		return rv;
   16079 
   16080 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   16081 	    reg | HV_KMRN_MDIO_SLOW);
   16082 }
   16083 
   16084 /*
   16085  *  wm_configure_k1_ich8lan - Configure K1 power state
   16086  *  @sc: pointer to the HW structure
   16087  *  @enable: K1 state to configure
   16088  *
   16089  *  Configure the K1 power state based on the provided parameter.
   16090  *  Assumes semaphore already acquired.
   16091  */
   16092 static void
   16093 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   16094 {
   16095 	uint32_t ctrl, ctrl_ext, tmp;
   16096 	uint16_t kmreg;
   16097 	int rv;
   16098 
   16099 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16100 
   16101 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   16102 	if (rv != 0)
   16103 		return;
   16104 
   16105 	if (k1_enable)
   16106 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   16107 	else
   16108 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   16109 
   16110 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   16111 	if (rv != 0)
   16112 		return;
   16113 
   16114 	delay(20);
   16115 
   16116 	ctrl = CSR_READ(sc, WMREG_CTRL);
   16117 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   16118 
   16119 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   16120 	tmp |= CTRL_FRCSPD;
   16121 
   16122 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   16123 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   16124 	CSR_WRITE_FLUSH(sc);
   16125 	delay(20);
   16126 
   16127 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   16128 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   16129 	CSR_WRITE_FLUSH(sc);
   16130 	delay(20);
   16131 
   16132 	return;
   16133 }
   16134 
   16135 /* special case - for 82575 - need to do manual init ... */
   16136 static void
   16137 wm_reset_init_script_82575(struct wm_softc *sc)
   16138 {
   16139 	/*
   16140 	 * Remark: this is untested code - we have no board without EEPROM
   16141 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   16142 	 */
   16143 
   16144 	/* SerDes configuration via SERDESCTRL */
   16145 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   16146 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   16147 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   16148 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   16149 
   16150 	/* CCM configuration via CCMCTL register */
   16151 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   16152 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   16153 
   16154 	/* PCIe lanes configuration */
   16155 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   16156 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   16157 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   16158 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   16159 
   16160 	/* PCIe PLL Configuration */
   16161 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   16162 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   16163 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   16164 }
   16165 
   16166 static void
   16167 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   16168 {
   16169 	uint32_t reg;
   16170 	uint16_t nvmword;
   16171 	int rv;
   16172 
   16173 	if (sc->sc_type != WM_T_82580)
   16174 		return;
   16175 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   16176 		return;
   16177 
   16178 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   16179 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   16180 	if (rv != 0) {
   16181 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   16182 		    __func__);
   16183 		return;
   16184 	}
   16185 
   16186 	reg = CSR_READ(sc, WMREG_MDICNFG);
   16187 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   16188 		reg |= MDICNFG_DEST;
   16189 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   16190 		reg |= MDICNFG_COM_MDIO;
   16191 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16192 }
   16193 
   16194 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   16195 
   16196 static bool
   16197 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   16198 {
   16199 	uint32_t reg;
   16200 	uint16_t id1, id2;
   16201 	int i, rv;
   16202 
   16203 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16204 		device_xname(sc->sc_dev), __func__));
   16205 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16206 
   16207 	id1 = id2 = 0xffff;
   16208 	for (i = 0; i < 2; i++) {
   16209 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   16210 		    &id1);
   16211 		if ((rv != 0) || MII_INVALIDID(id1))
   16212 			continue;
   16213 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   16214 		    &id2);
   16215 		if ((rv != 0) || MII_INVALIDID(id2))
   16216 			continue;
   16217 		break;
   16218 	}
   16219 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   16220 		goto out;
   16221 
   16222 	/*
   16223 	 * In case the PHY needs to be in mdio slow mode,
   16224 	 * set slow mode and try to get the PHY id again.
   16225 	 */
   16226 	rv = 0;
   16227 	if (sc->sc_type < WM_T_PCH_LPT) {
   16228 		sc->phy.release(sc);
   16229 		wm_set_mdio_slow_mode_hv(sc);
   16230 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   16231 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   16232 		sc->phy.acquire(sc);
   16233 	}
   16234 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   16235 		device_printf(sc->sc_dev, "XXX return with false\n");
   16236 		return false;
   16237 	}
   16238 out:
   16239 	if (sc->sc_type >= WM_T_PCH_LPT) {
   16240 		/* Only unforce SMBus if ME is not active */
   16241 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16242 			uint16_t phyreg;
   16243 
   16244 			/* Unforce SMBus mode in PHY */
   16245 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   16246 			    CV_SMB_CTRL, &phyreg);
   16247 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16248 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16249 			    CV_SMB_CTRL, phyreg);
   16250 
   16251 			/* Unforce SMBus mode in MAC */
   16252 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16253 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16254 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16255 		}
   16256 	}
   16257 	return true;
   16258 }
   16259 
   16260 static void
   16261 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16262 {
   16263 	uint32_t reg;
   16264 	int i;
   16265 
   16266 	/* Set PHY Config Counter to 50msec */
   16267 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16268 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16269 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16270 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16271 
   16272 	/* Toggle LANPHYPC */
   16273 	reg = CSR_READ(sc, WMREG_CTRL);
   16274 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16275 	reg &= ~CTRL_LANPHYPC_VALUE;
   16276 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16277 	CSR_WRITE_FLUSH(sc);
   16278 	delay(1000);
   16279 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16280 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16281 	CSR_WRITE_FLUSH(sc);
   16282 
   16283 	if (sc->sc_type < WM_T_PCH_LPT)
   16284 		delay(50 * 1000);
   16285 	else {
   16286 		i = 20;
   16287 
   16288 		do {
   16289 			delay(5 * 1000);
   16290 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16291 		    && i--);
   16292 
   16293 		delay(30 * 1000);
   16294 	}
   16295 }
   16296 
   16297 static int
   16298 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16299 {
   16300 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16301 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16302 	uint32_t rxa;
   16303 	uint16_t scale = 0, lat_enc = 0;
   16304 	int32_t obff_hwm = 0;
   16305 	int64_t lat_ns, value;
   16306 
   16307 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16308 		device_xname(sc->sc_dev), __func__));
   16309 
   16310 	if (link) {
   16311 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16312 		uint32_t status;
   16313 		uint16_t speed;
   16314 		pcireg_t preg;
   16315 
   16316 		status = CSR_READ(sc, WMREG_STATUS);
   16317 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16318 		case STATUS_SPEED_10:
   16319 			speed = 10;
   16320 			break;
   16321 		case STATUS_SPEED_100:
   16322 			speed = 100;
   16323 			break;
   16324 		case STATUS_SPEED_1000:
   16325 			speed = 1000;
   16326 			break;
   16327 		default:
   16328 			device_printf(sc->sc_dev, "Unknown speed "
   16329 			    "(status = %08x)\n", status);
   16330 			return -1;
   16331 		}
   16332 
   16333 		/* Rx Packet Buffer Allocation size (KB) */
   16334 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16335 
   16336 		/*
   16337 		 * Determine the maximum latency tolerated by the device.
   16338 		 *
   16339 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16340 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16341 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16342 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16343 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16344 		 */
   16345 		lat_ns = ((int64_t)rxa * 1024 -
   16346 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16347 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16348 		if (lat_ns < 0)
   16349 			lat_ns = 0;
   16350 		else
   16351 			lat_ns /= speed;
   16352 		value = lat_ns;
   16353 
   16354 		while (value > LTRV_VALUE) {
   16355 			scale ++;
   16356 			value = howmany(value, __BIT(5));
   16357 		}
   16358 		if (scale > LTRV_SCALE_MAX) {
   16359 			device_printf(sc->sc_dev,
   16360 			    "Invalid LTR latency scale %d\n", scale);
   16361 			return -1;
   16362 		}
   16363 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16364 
   16365 		/* Determine the maximum latency tolerated by the platform */
   16366 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16367 		    WM_PCI_LTR_CAP_LPT);
   16368 		max_snoop = preg & 0xffff;
   16369 		max_nosnoop = preg >> 16;
   16370 
   16371 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16372 
   16373 		if (lat_enc > max_ltr_enc) {
   16374 			lat_enc = max_ltr_enc;
   16375 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16376 			    * PCI_LTR_SCALETONS(
   16377 				    __SHIFTOUT(lat_enc,
   16378 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16379 		}
   16380 
   16381 		if (lat_ns) {
   16382 			lat_ns *= speed * 1000;
   16383 			lat_ns /= 8;
   16384 			lat_ns /= 1000000000;
   16385 			obff_hwm = (int32_t)(rxa - lat_ns);
   16386 		}
   16387 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16388 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16389 			    "(rxa = %d, lat_ns = %d)\n",
   16390 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16391 			return -1;
   16392 		}
   16393 	}
   16394 	/* Snoop and No-Snoop latencies the same */
   16395 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16396 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16397 
   16398 	/* Set OBFF high water mark */
   16399 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16400 	reg |= obff_hwm;
   16401 	CSR_WRITE(sc, WMREG_SVT, reg);
   16402 
   16403 	/* Enable OBFF */
   16404 	reg = CSR_READ(sc, WMREG_SVCR);
   16405 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16406 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16407 
   16408 	return 0;
   16409 }
   16410 
   16411 /*
   16412  * I210 Errata 25 and I211 Errata 10
   16413  * Slow System Clock.
   16414  */
   16415 static int
   16416 wm_pll_workaround_i210(struct wm_softc *sc)
   16417 {
   16418 	uint32_t mdicnfg, wuc;
   16419 	uint32_t reg;
   16420 	pcireg_t pcireg;
   16421 	uint32_t pmreg;
   16422 	uint16_t nvmword, tmp_nvmword;
   16423 	uint16_t phyval;
   16424 	bool wa_done = false;
   16425 	int i, rv = 0;
   16426 
   16427 	/* Get Power Management cap offset */
   16428 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16429 	    &pmreg, NULL) == 0)
   16430 		return -1;
   16431 
   16432 	/* Save WUC and MDICNFG registers */
   16433 	wuc = CSR_READ(sc, WMREG_WUC);
   16434 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16435 
   16436 	reg = mdicnfg & ~MDICNFG_DEST;
   16437 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16438 
   16439 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   16440 		nvmword = INVM_DEFAULT_AL;
   16441 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16442 
   16443 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16444 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16445 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16446 
   16447 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16448 			rv = 0;
   16449 			break; /* OK */
   16450 		} else
   16451 			rv = -1;
   16452 
   16453 		wa_done = true;
   16454 		/* Directly reset the internal PHY */
   16455 		reg = CSR_READ(sc, WMREG_CTRL);
   16456 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16457 
   16458 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16459 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16460 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16461 
   16462 		CSR_WRITE(sc, WMREG_WUC, 0);
   16463 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16464 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16465 
   16466 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16467 		    pmreg + PCI_PMCSR);
   16468 		pcireg |= PCI_PMCSR_STATE_D3;
   16469 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16470 		    pmreg + PCI_PMCSR, pcireg);
   16471 		delay(1000);
   16472 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16473 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16474 		    pmreg + PCI_PMCSR, pcireg);
   16475 
   16476 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16477 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16478 
   16479 		/* Restore WUC register */
   16480 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16481 	}
   16482 
   16483 	/* Restore MDICNFG setting */
   16484 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16485 	if (wa_done)
   16486 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16487 	return rv;
   16488 }
   16489 
   16490 static void
   16491 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16492 {
   16493 	uint32_t reg;
   16494 
   16495 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16496 		device_xname(sc->sc_dev), __func__));
   16497 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16498 	    || (sc->sc_type == WM_T_PCH_CNP));
   16499 
   16500 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16501 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16502 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16503 
   16504 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16505 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16506 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16507 }
   16508