Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.662
      1 /*	$NetBSD: if_wm.c,v 1.662 2020/01/24 02:50:41 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.662 2020/01/24 02:50:41 knakahara Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/workqueue.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <net/rss_config.h>
    121 
    122 #include <netinet/in.h>			/* XXX for struct ip */
    123 #include <netinet/in_systm.h>		/* XXX for struct ip */
    124 #include <netinet/ip.h>			/* XXX for struct ip */
    125 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    126 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    127 
    128 #include <sys/bus.h>
    129 #include <sys/intr.h>
    130 #include <machine/endian.h>
    131 
    132 #include <dev/mii/mii.h>
    133 #include <dev/mii/mdio.h>
    134 #include <dev/mii/miivar.h>
    135 #include <dev/mii/miidevs.h>
    136 #include <dev/mii/mii_bitbang.h>
    137 #include <dev/mii/ikphyreg.h>
    138 #include <dev/mii/igphyreg.h>
    139 #include <dev/mii/igphyvar.h>
    140 #include <dev/mii/inbmphyreg.h>
    141 #include <dev/mii/ihphyreg.h>
    142 
    143 #include <dev/pci/pcireg.h>
    144 #include <dev/pci/pcivar.h>
    145 #include <dev/pci/pcidevs.h>
    146 
    147 #include <dev/pci/if_wmreg.h>
    148 #include <dev/pci/if_wmvar.h>
    149 
    150 #ifdef WM_DEBUG
    151 #define	WM_DEBUG_LINK		__BIT(0)
    152 #define	WM_DEBUG_TX		__BIT(1)
    153 #define	WM_DEBUG_RX		__BIT(2)
    154 #define	WM_DEBUG_GMII		__BIT(3)
    155 #define	WM_DEBUG_MANAGE		__BIT(4)
    156 #define	WM_DEBUG_NVM		__BIT(5)
    157 #define	WM_DEBUG_INIT		__BIT(6)
    158 #define	WM_DEBUG_LOCK		__BIT(7)
    159 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    160     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    161 #define	DPRINTF(x, y)	do { if (wm_debug & (x)) printf y; } while (0)
    162 #else
    163 #define	DPRINTF(x, y)	__nothing
    164 #endif /* WM_DEBUG */
    165 
    166 #ifdef NET_MPSAFE
    167 #define WM_MPSAFE	1
    168 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    169 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    170 #else
    171 #define CALLOUT_FLAGS	0
    172 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    173 #endif
    174 
    175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    176 
    177 /*
    178  * This device driver's max interrupt numbers.
    179  */
    180 #define WM_MAX_NQUEUEINTR	16
    181 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    182 
    183 #ifndef WM_DISABLE_MSI
    184 #define	WM_DISABLE_MSI 0
    185 #endif
    186 #ifndef WM_DISABLE_MSIX
    187 #define	WM_DISABLE_MSIX 0
    188 #endif
    189 
    190 int wm_disable_msi = WM_DISABLE_MSI;
    191 int wm_disable_msix = WM_DISABLE_MSIX;
    192 
    193 #ifndef WM_WATCHDOG_TIMEOUT
    194 #define WM_WATCHDOG_TIMEOUT 5
    195 #endif
    196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    197 
    198 /*
    199  * Transmit descriptor list size.  Due to errata, we can only have
    200  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    201  * on >= 82544. We tell the upper layers that they can queue a lot
    202  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    203  * of them at a time.
    204  *
    205  * We allow up to 64 DMA segments per packet.  Pathological packet
    206  * chains containing many small mbufs have been observed in zero-copy
    207  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    208  * m_defrag() is called to reduce it.
    209  */
    210 #define	WM_NTXSEGS		64
    211 #define	WM_IFQUEUELEN		256
    212 #define	WM_TXQUEUELEN_MAX	64
    213 #define	WM_TXQUEUELEN_MAX_82547	16
    214 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    215 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    216 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    217 #define	WM_NTXDESC_82542	256
    218 #define	WM_NTXDESC_82544	4096
    219 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    220 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    221 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    222 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    223 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    224 
    225 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    226 
    227 #define	WM_TXINTERQSIZE		256
    228 
    229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    230 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    231 #endif
    232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    233 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    234 #endif
    235 
    236 /*
    237  * Receive descriptor list size.  We have one Rx buffer for normal
    238  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    239  * packet.  We allocate 256 receive descriptors, each with a 2k
    240  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    241  */
    242 #define	WM_NRXDESC		256U
    243 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    244 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    245 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    246 
    247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    248 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    249 #endif
    250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    251 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    252 #endif
    253 
    254 typedef union txdescs {
    255 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    256 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    257 } txdescs_t;
    258 
    259 typedef union rxdescs {
    260 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    261 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    262 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    263 } rxdescs_t;
    264 
    265 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    266 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    267 
    268 /*
    269  * Software state for transmit jobs.
    270  */
    271 struct wm_txsoft {
    272 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    273 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    274 	int txs_firstdesc;		/* first descriptor in packet */
    275 	int txs_lastdesc;		/* last descriptor in packet */
    276 	int txs_ndesc;			/* # of descriptors used */
    277 };
    278 
    279 /*
    280  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    281  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    282  * them together.
    283  */
    284 struct wm_rxsoft {
    285 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    287 };
    288 
    289 #define WM_LINKUP_TIMEOUT	50
    290 
    291 static uint16_t swfwphysem[] = {
    292 	SWFW_PHY0_SM,
    293 	SWFW_PHY1_SM,
    294 	SWFW_PHY2_SM,
    295 	SWFW_PHY3_SM
    296 };
    297 
    298 static const uint32_t wm_82580_rxpbs_table[] = {
    299 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    300 };
    301 
    302 struct wm_softc;
    303 
    304 #ifdef WM_EVENT_COUNTERS
    305 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    306 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    307 	struct evcnt qname##_ev_##evname;
    308 
    309 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    310 	do {								\
    311 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    312 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    313 		    "%s%02d%s", #qname, (qnum), #evname);		\
    314 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    315 		    (evtype), NULL, (xname),				\
    316 		    (q)->qname##_##evname##_evcnt_name);		\
    317 	} while (0)
    318 
    319 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    320 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    321 
    322 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    323 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    324 
    325 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    326 	evcnt_detach(&(q)->qname##_ev_##evname);
    327 #endif /* WM_EVENT_COUNTERS */
    328 
    329 struct wm_txqueue {
    330 	kmutex_t *txq_lock;		/* lock for tx operations */
    331 
    332 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    333 
    334 	/* Software state for the transmit descriptors. */
    335 	int txq_num;			/* must be a power of two */
    336 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    337 
    338 	/* TX control data structures. */
    339 	int txq_ndesc;			/* must be a power of two */
    340 	size_t txq_descsize;		/* a tx descriptor size */
    341 	txdescs_t *txq_descs_u;
    342 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    343 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    344 	int txq_desc_rseg;		/* real number of control segment */
    345 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    346 #define	txq_descs	txq_descs_u->sctxu_txdescs
    347 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    348 
    349 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    350 
    351 	int txq_free;			/* number of free Tx descriptors */
    352 	int txq_next;			/* next ready Tx descriptor */
    353 
    354 	int txq_sfree;			/* number of free Tx jobs */
    355 	int txq_snext;			/* next free Tx job */
    356 	int txq_sdirty;			/* dirty Tx jobs */
    357 
    358 	/* These 4 variables are used only on the 82547. */
    359 	int txq_fifo_size;		/* Tx FIFO size */
    360 	int txq_fifo_head;		/* current head of FIFO */
    361 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    362 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    363 
    364 	/*
    365 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    366 	 * CPUs. This queue intermediate them without block.
    367 	 */
    368 	pcq_t *txq_interq;
    369 
    370 	/*
    371 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    372 	 * to manage Tx H/W queue's busy flag.
    373 	 */
    374 	int txq_flags;			/* flags for H/W queue, see below */
    375 #define	WM_TXQ_NO_SPACE	0x1
    376 
    377 	bool txq_stopping;
    378 
    379 	bool txq_sending;
    380 	time_t txq_lastsent;
    381 
    382 	uint32_t txq_packets;		/* for AIM */
    383 	uint32_t txq_bytes;		/* for AIM */
    384 #ifdef WM_EVENT_COUNTERS
    385 	/* TX event counters */
    386 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    387 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    388 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    389 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    390 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    391 					    /* XXX not used? */
    392 
    393 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    394 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    395 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    396 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    397 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    398 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    399 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    400 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    401 					    /* other than toomanyseg */
    402 
    403 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    404 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    405 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    406 
    407 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    408 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    409 #endif /* WM_EVENT_COUNTERS */
    410 };
    411 
    412 struct wm_rxqueue {
    413 	kmutex_t *rxq_lock;		/* lock for rx operations */
    414 
    415 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    416 
    417 	/* Software state for the receive descriptors. */
    418 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    419 
    420 	/* RX control data structures. */
    421 	int rxq_ndesc;			/* must be a power of two */
    422 	size_t rxq_descsize;		/* a rx descriptor size */
    423 	rxdescs_t *rxq_descs_u;
    424 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    425 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    426 	int rxq_desc_rseg;		/* real number of control segment */
    427 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    428 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    429 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    430 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    431 
    432 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    433 
    434 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    435 	int rxq_discard;
    436 	int rxq_len;
    437 	struct mbuf *rxq_head;
    438 	struct mbuf *rxq_tail;
    439 	struct mbuf **rxq_tailp;
    440 
    441 	bool rxq_stopping;
    442 
    443 	uint32_t rxq_packets;		/* for AIM */
    444 	uint32_t rxq_bytes;		/* for AIM */
    445 #ifdef WM_EVENT_COUNTERS
    446 	/* RX event counters */
    447 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    448 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    449 
    450 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    451 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    452 #endif
    453 };
    454 
    455 struct wm_queue {
    456 	int wmq_id;			/* index of TX/RX queues */
    457 	int wmq_intr_idx;		/* index of MSI-X tables */
    458 
    459 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    460 	bool wmq_set_itr;
    461 
    462 	struct wm_txqueue wmq_txq;
    463 	struct wm_rxqueue wmq_rxq;
    464 
    465 	bool wmq_txrx_use_workqueue;
    466 	struct work wmq_cookie;
    467 	void *wmq_si;
    468 	krndsource_t rnd_source;	/* random source */
    469 };
    470 
    471 struct wm_phyop {
    472 	int (*acquire)(struct wm_softc *);
    473 	void (*release)(struct wm_softc *);
    474 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    475 	int (*writereg_locked)(device_t, int, int, uint16_t);
    476 	int reset_delay_us;
    477 	bool no_errprint;
    478 };
    479 
    480 struct wm_nvmop {
    481 	int (*acquire)(struct wm_softc *);
    482 	void (*release)(struct wm_softc *);
    483 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    484 };
    485 
    486 /*
    487  * Software state per device.
    488  */
    489 struct wm_softc {
    490 	device_t sc_dev;		/* generic device information */
    491 	bus_space_tag_t sc_st;		/* bus space tag */
    492 	bus_space_handle_t sc_sh;	/* bus space handle */
    493 	bus_size_t sc_ss;		/* bus space size */
    494 	bus_space_tag_t sc_iot;		/* I/O space tag */
    495 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    496 	bus_size_t sc_ios;		/* I/O space size */
    497 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    498 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    499 	bus_size_t sc_flashs;		/* flash registers space size */
    500 	off_t sc_flashreg_offset;	/*
    501 					 * offset to flash registers from
    502 					 * start of BAR
    503 					 */
    504 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    505 
    506 	struct ethercom sc_ethercom;	/* ethernet common data */
    507 	struct mii_data sc_mii;		/* MII/media information */
    508 
    509 	pci_chipset_tag_t sc_pc;
    510 	pcitag_t sc_pcitag;
    511 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    512 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    513 
    514 	uint16_t sc_pcidevid;		/* PCI device ID */
    515 	wm_chip_type sc_type;		/* MAC type */
    516 	int sc_rev;			/* MAC revision */
    517 	wm_phy_type sc_phytype;		/* PHY type */
    518 	uint8_t sc_sfptype;		/* SFP type */
    519 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    520 #define	WM_MEDIATYPE_UNKNOWN		0x00
    521 #define	WM_MEDIATYPE_FIBER		0x01
    522 #define	WM_MEDIATYPE_COPPER		0x02
    523 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    524 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    525 	int sc_flags;			/* flags; see below */
    526 	u_short sc_if_flags;		/* last if_flags */
    527 	int sc_ec_capenable;		/* last ec_capenable */
    528 	int sc_flowflags;		/* 802.3x flow control flags */
    529 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    530 	int sc_align_tweak;
    531 
    532 	void *sc_ihs[WM_MAX_NINTR];	/*
    533 					 * interrupt cookie.
    534 					 * - legacy and msi use sc_ihs[0] only
    535 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    536 					 */
    537 	pci_intr_handle_t *sc_intrs;	/*
    538 					 * legacy and msi use sc_intrs[0] only
    539 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    540 					 */
    541 	int sc_nintrs;			/* number of interrupts */
    542 
    543 	int sc_link_intr_idx;		/* index of MSI-X tables */
    544 
    545 	callout_t sc_tick_ch;		/* tick callout */
    546 	bool sc_core_stopping;
    547 
    548 	int sc_nvm_ver_major;
    549 	int sc_nvm_ver_minor;
    550 	int sc_nvm_ver_build;
    551 	int sc_nvm_addrbits;		/* NVM address bits */
    552 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    553 	int sc_ich8_flash_base;
    554 	int sc_ich8_flash_bank_size;
    555 	int sc_nvm_k1_enabled;
    556 
    557 	int sc_nqueues;
    558 	struct wm_queue *sc_queue;
    559 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    560 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    561 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    562 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    563 	struct workqueue *sc_queue_wq;
    564 	bool sc_txrx_use_workqueue;
    565 
    566 	int sc_affinity_offset;
    567 
    568 #ifdef WM_EVENT_COUNTERS
    569 	/* Event counters. */
    570 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    571 
    572 	/* WM_T_82542_2_1 only */
    573 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    574 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    575 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    576 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    577 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    578 #endif /* WM_EVENT_COUNTERS */
    579 
    580 	struct sysctllog *sc_sysctllog;
    581 
    582 	/* This variable are used only on the 82547. */
    583 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    584 
    585 	uint32_t sc_ctrl;		/* prototype CTRL register */
    586 #if 0
    587 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    588 #endif
    589 	uint32_t sc_icr;		/* prototype interrupt bits */
    590 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    591 	uint32_t sc_tctl;		/* prototype TCTL register */
    592 	uint32_t sc_rctl;		/* prototype RCTL register */
    593 	uint32_t sc_txcw;		/* prototype TXCW register */
    594 	uint32_t sc_tipg;		/* prototype TIPG register */
    595 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    596 	uint32_t sc_pba;		/* prototype PBA register */
    597 
    598 	int sc_tbi_linkup;		/* TBI link status */
    599 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    600 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    601 
    602 	int sc_mchash_type;		/* multicast filter offset */
    603 
    604 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    605 
    606 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    607 	kmutex_t *sc_ich_phymtx;	/*
    608 					 * 82574/82583/ICH/PCH specific PHY
    609 					 * mutex. For 82574/82583, the mutex
    610 					 * is used for both PHY and NVM.
    611 					 */
    612 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    613 
    614 	struct wm_phyop phy;
    615 	struct wm_nvmop nvm;
    616 };
    617 
    618 #define WM_CORE_LOCK(_sc)						\
    619 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    620 #define WM_CORE_UNLOCK(_sc)						\
    621 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    622 #define WM_CORE_LOCKED(_sc)						\
    623 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    624 
    625 #define	WM_RXCHAIN_RESET(rxq)						\
    626 do {									\
    627 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    628 	*(rxq)->rxq_tailp = NULL;					\
    629 	(rxq)->rxq_len = 0;						\
    630 } while (/*CONSTCOND*/0)
    631 
    632 #define	WM_RXCHAIN_LINK(rxq, m)						\
    633 do {									\
    634 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    635 	(rxq)->rxq_tailp = &(m)->m_next;				\
    636 } while (/*CONSTCOND*/0)
    637 
    638 #ifdef WM_EVENT_COUNTERS
    639 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    640 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    641 
    642 #define WM_Q_EVCNT_INCR(qname, evname)			\
    643 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    644 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    645 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    646 #else /* !WM_EVENT_COUNTERS */
    647 #define	WM_EVCNT_INCR(ev)	/* nothing */
    648 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    649 
    650 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    651 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    652 #endif /* !WM_EVENT_COUNTERS */
    653 
    654 #define	CSR_READ(sc, reg)						\
    655 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    656 #define	CSR_WRITE(sc, reg, val)						\
    657 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    658 #define	CSR_WRITE_FLUSH(sc)						\
    659 	(void)CSR_READ((sc), WMREG_STATUS)
    660 
    661 #define ICH8_FLASH_READ32(sc, reg)					\
    662 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    663 	    (reg) + sc->sc_flashreg_offset)
    664 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    665 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    666 	    (reg) + sc->sc_flashreg_offset, (data))
    667 
    668 #define ICH8_FLASH_READ16(sc, reg)					\
    669 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    670 	    (reg) + sc->sc_flashreg_offset)
    671 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    672 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    673 	    (reg) + sc->sc_flashreg_offset, (data))
    674 
    675 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    676 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    677 
    678 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    679 #define	WM_CDTXADDR_HI(txq, x)						\
    680 	(sizeof(bus_addr_t) == 8 ?					\
    681 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    682 
    683 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    684 #define	WM_CDRXADDR_HI(rxq, x)						\
    685 	(sizeof(bus_addr_t) == 8 ?					\
    686 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    687 
    688 /*
    689  * Register read/write functions.
    690  * Other than CSR_{READ|WRITE}().
    691  */
    692 #if 0
    693 static inline uint32_t wm_io_read(struct wm_softc *, int);
    694 #endif
    695 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    696 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    697     uint32_t, uint32_t);
    698 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    699 
    700 /*
    701  * Descriptor sync/init functions.
    702  */
    703 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    704 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    705 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    706 
    707 /*
    708  * Device driver interface functions and commonly used functions.
    709  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    710  */
    711 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    712 static int	wm_match(device_t, cfdata_t, void *);
    713 static void	wm_attach(device_t, device_t, void *);
    714 static int	wm_detach(device_t, int);
    715 static bool	wm_suspend(device_t, const pmf_qual_t *);
    716 static bool	wm_resume(device_t, const pmf_qual_t *);
    717 static void	wm_watchdog(struct ifnet *);
    718 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    719     uint16_t *);
    720 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    721     uint16_t *);
    722 static void	wm_tick(void *);
    723 static int	wm_ifflags_cb(struct ethercom *);
    724 static int	wm_ioctl(struct ifnet *, u_long, void *);
    725 /* MAC address related */
    726 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    727 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    728 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    729 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    730 static int	wm_rar_count(struct wm_softc *);
    731 static void	wm_set_filter(struct wm_softc *);
    732 /* Reset and init related */
    733 static void	wm_set_vlan(struct wm_softc *);
    734 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    735 static void	wm_get_auto_rd_done(struct wm_softc *);
    736 static void	wm_lan_init_done(struct wm_softc *);
    737 static void	wm_get_cfg_done(struct wm_softc *);
    738 static int	wm_phy_post_reset(struct wm_softc *);
    739 static int	wm_write_smbus_addr(struct wm_softc *);
    740 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    741 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    742 static void	wm_initialize_hardware_bits(struct wm_softc *);
    743 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    744 static int	wm_reset_phy(struct wm_softc *);
    745 static void	wm_flush_desc_rings(struct wm_softc *);
    746 static void	wm_reset(struct wm_softc *);
    747 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    748 static void	wm_rxdrain(struct wm_rxqueue *);
    749 static void	wm_init_rss(struct wm_softc *);
    750 static void	wm_adjust_qnum(struct wm_softc *, int);
    751 static inline bool	wm_is_using_msix(struct wm_softc *);
    752 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    753 static int	wm_softhandler_establish(struct wm_softc *, int, int);
    754 static int	wm_setup_legacy(struct wm_softc *);
    755 static int	wm_setup_msix(struct wm_softc *);
    756 static int	wm_init(struct ifnet *);
    757 static int	wm_init_locked(struct ifnet *);
    758 static void	wm_init_sysctls(struct wm_softc *);
    759 static void	wm_unset_stopping_flags(struct wm_softc *);
    760 static void	wm_set_stopping_flags(struct wm_softc *);
    761 static void	wm_stop(struct ifnet *, int);
    762 static void	wm_stop_locked(struct ifnet *, int);
    763 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    764 static void	wm_82547_txfifo_stall(void *);
    765 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    766 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    767 /* DMA related */
    768 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    769 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    770 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    771 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    772     struct wm_txqueue *);
    773 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    774 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    775 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    776     struct wm_rxqueue *);
    777 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    778 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    779 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    780 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    781 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    782 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    783 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    784     struct wm_txqueue *);
    785 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    786     struct wm_rxqueue *);
    787 static int	wm_alloc_txrx_queues(struct wm_softc *);
    788 static void	wm_free_txrx_queues(struct wm_softc *);
    789 static int	wm_init_txrx_queues(struct wm_softc *);
    790 /* Start */
    791 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    792     struct wm_txsoft *, uint32_t *, uint8_t *);
    793 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    794 static void	wm_start(struct ifnet *);
    795 static void	wm_start_locked(struct ifnet *);
    796 static int	wm_transmit(struct ifnet *, struct mbuf *);
    797 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    798 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    799     bool);
    800 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    801     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    802 static void	wm_nq_start(struct ifnet *);
    803 static void	wm_nq_start_locked(struct ifnet *);
    804 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    805 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    806 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    807     bool);
    808 static void	wm_deferred_start_locked(struct wm_txqueue *);
    809 static void	wm_handle_queue(void *);
    810 static void	wm_handle_queue_work(struct work *, void *);
    811 /* Interrupt */
    812 static bool	wm_txeof(struct wm_txqueue *, u_int);
    813 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    814 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    815 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    816 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    817 static void	wm_linkintr(struct wm_softc *, uint32_t);
    818 static int	wm_intr_legacy(void *);
    819 static inline void	wm_txrxintr_disable(struct wm_queue *);
    820 static inline void	wm_txrxintr_enable(struct wm_queue *);
    821 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    822 static int	wm_txrxintr_msix(void *);
    823 static int	wm_linkintr_msix(void *);
    824 
    825 /*
    826  * Media related.
    827  * GMII, SGMII, TBI, SERDES and SFP.
    828  */
    829 /* Common */
    830 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    831 /* GMII related */
    832 static void	wm_gmii_reset(struct wm_softc *);
    833 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    834 static int	wm_get_phy_id_82575(struct wm_softc *);
    835 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    836 static int	wm_gmii_mediachange(struct ifnet *);
    837 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    838 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    839 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    840 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    841 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    842 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    843 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    844 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    845 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    846 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    847 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    848 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    849 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    850 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    851 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    852 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    853 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    854 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    855 	bool);
    856 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    857 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    858 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    859 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    860 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    861 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    862 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    863 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    864 static void	wm_gmii_statchg(struct ifnet *);
    865 /*
    866  * kumeran related (80003, ICH* and PCH*).
    867  * These functions are not for accessing MII registers but for accessing
    868  * kumeran specific registers.
    869  */
    870 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    871 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    872 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    873 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    874 /* EMI register related */
    875 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    876 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    877 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    878 /* SGMII */
    879 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    880 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    881 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    882 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    883 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    884 /* TBI related */
    885 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    886 static void	wm_tbi_mediainit(struct wm_softc *);
    887 static int	wm_tbi_mediachange(struct ifnet *);
    888 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    889 static int	wm_check_for_link(struct wm_softc *);
    890 static void	wm_tbi_tick(struct wm_softc *);
    891 /* SERDES related */
    892 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    893 static int	wm_serdes_mediachange(struct ifnet *);
    894 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    895 static void	wm_serdes_tick(struct wm_softc *);
    896 /* SFP related */
    897 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    898 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    899 
    900 /*
    901  * NVM related.
    902  * Microwire, SPI (w/wo EERD) and Flash.
    903  */
    904 /* Misc functions */
    905 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    906 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    907 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    908 /* Microwire */
    909 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    910 /* SPI */
    911 static int	wm_nvm_ready_spi(struct wm_softc *);
    912 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    913 /* Using with EERD */
    914 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    915 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    916 /* Flash */
    917 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    918     unsigned int *);
    919 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    920 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    921 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    922     uint32_t *);
    923 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    924 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    925 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    926 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    927 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    928 /* iNVM */
    929 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    930 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    931 /* Lock, detecting NVM type, validate checksum and read */
    932 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    933 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    934 static int	wm_nvm_validate_checksum(struct wm_softc *);
    935 static void	wm_nvm_version_invm(struct wm_softc *);
    936 static void	wm_nvm_version(struct wm_softc *);
    937 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    938 
    939 /*
    940  * Hardware semaphores.
    941  * Very complexed...
    942  */
    943 static int	wm_get_null(struct wm_softc *);
    944 static void	wm_put_null(struct wm_softc *);
    945 static int	wm_get_eecd(struct wm_softc *);
    946 static void	wm_put_eecd(struct wm_softc *);
    947 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    948 static void	wm_put_swsm_semaphore(struct wm_softc *);
    949 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    950 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    951 static int	wm_get_nvm_80003(struct wm_softc *);
    952 static void	wm_put_nvm_80003(struct wm_softc *);
    953 static int	wm_get_nvm_82571(struct wm_softc *);
    954 static void	wm_put_nvm_82571(struct wm_softc *);
    955 static int	wm_get_phy_82575(struct wm_softc *);
    956 static void	wm_put_phy_82575(struct wm_softc *);
    957 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    958 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    959 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    960 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    961 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    962 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    963 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    964 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    965 
    966 /*
    967  * Management mode and power management related subroutines.
    968  * BMC, AMT, suspend/resume and EEE.
    969  */
    970 #if 0
    971 static int	wm_check_mng_mode(struct wm_softc *);
    972 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    973 static int	wm_check_mng_mode_82574(struct wm_softc *);
    974 static int	wm_check_mng_mode_generic(struct wm_softc *);
    975 #endif
    976 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    977 static bool	wm_phy_resetisblocked(struct wm_softc *);
    978 static void	wm_get_hw_control(struct wm_softc *);
    979 static void	wm_release_hw_control(struct wm_softc *);
    980 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    981 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    982 static void	wm_init_manageability(struct wm_softc *);
    983 static void	wm_release_manageability(struct wm_softc *);
    984 static void	wm_get_wakeup(struct wm_softc *);
    985 static int	wm_ulp_disable(struct wm_softc *);
    986 static int	wm_enable_phy_wakeup(struct wm_softc *);
    987 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    988 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    989 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
    990 static void	wm_enable_wakeup(struct wm_softc *);
    991 static void	wm_disable_aspm(struct wm_softc *);
    992 /* LPLU (Low Power Link Up) */
    993 static void	wm_lplu_d0_disable(struct wm_softc *);
    994 /* EEE */
    995 static int	wm_set_eee_i350(struct wm_softc *);
    996 static int	wm_set_eee_pchlan(struct wm_softc *);
    997 static int	wm_set_eee(struct wm_softc *);
    998 
    999 /*
   1000  * Workarounds (mainly PHY related).
   1001  * Basically, PHY's workarounds are in the PHY drivers.
   1002  */
   1003 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1004 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1005 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1006 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1007 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1008 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1009 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1010 static int	wm_k1_workaround_lv(struct wm_softc *);
   1011 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1012 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1013 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1014 static void	wm_reset_init_script_82575(struct wm_softc *);
   1015 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1016 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1017 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1018 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1019 static int	wm_pll_workaround_i210(struct wm_softc *);
   1020 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1021 
   1022 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1023     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1024 
   1025 /*
   1026  * Devices supported by this driver.
   1027  */
   1028 static const struct wm_product {
   1029 	pci_vendor_id_t		wmp_vendor;
   1030 	pci_product_id_t	wmp_product;
   1031 	const char		*wmp_name;
   1032 	wm_chip_type		wmp_type;
   1033 	uint32_t		wmp_flags;
   1034 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1035 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1036 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1037 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1038 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1039 } wm_products[] = {
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1041 	  "Intel i82542 1000BASE-X Ethernet",
   1042 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1045 	  "Intel i82543GC 1000BASE-X Ethernet",
   1046 	  WM_T_82543,		WMP_F_FIBER },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1049 	  "Intel i82543GC 1000BASE-T Ethernet",
   1050 	  WM_T_82543,		WMP_F_COPPER },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1053 	  "Intel i82544EI 1000BASE-T Ethernet",
   1054 	  WM_T_82544,		WMP_F_COPPER },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1057 	  "Intel i82544EI 1000BASE-X Ethernet",
   1058 	  WM_T_82544,		WMP_F_FIBER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1061 	  "Intel i82544GC 1000BASE-T Ethernet",
   1062 	  WM_T_82544,		WMP_F_COPPER },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1065 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1066 	  WM_T_82544,		WMP_F_COPPER },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1069 	  "Intel i82540EM 1000BASE-T Ethernet",
   1070 	  WM_T_82540,		WMP_F_COPPER },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1073 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1074 	  WM_T_82540,		WMP_F_COPPER },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1077 	  "Intel i82540EP 1000BASE-T Ethernet",
   1078 	  WM_T_82540,		WMP_F_COPPER },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1081 	  "Intel i82540EP 1000BASE-T Ethernet",
   1082 	  WM_T_82540,		WMP_F_COPPER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1085 	  "Intel i82540EP 1000BASE-T Ethernet",
   1086 	  WM_T_82540,		WMP_F_COPPER },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1089 	  "Intel i82545EM 1000BASE-T Ethernet",
   1090 	  WM_T_82545,		WMP_F_COPPER },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1093 	  "Intel i82545GM 1000BASE-T Ethernet",
   1094 	  WM_T_82545_3,		WMP_F_COPPER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1097 	  "Intel i82545GM 1000BASE-X Ethernet",
   1098 	  WM_T_82545_3,		WMP_F_FIBER },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1101 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1102 	  WM_T_82545_3,		WMP_F_SERDES },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1105 	  "Intel i82546EB 1000BASE-T Ethernet",
   1106 	  WM_T_82546,		WMP_F_COPPER },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1109 	  "Intel i82546EB 1000BASE-T Ethernet",
   1110 	  WM_T_82546,		WMP_F_COPPER },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1113 	  "Intel i82545EM 1000BASE-X Ethernet",
   1114 	  WM_T_82545,		WMP_F_FIBER },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1117 	  "Intel i82546EB 1000BASE-X Ethernet",
   1118 	  WM_T_82546,		WMP_F_FIBER },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1121 	  "Intel i82546GB 1000BASE-T Ethernet",
   1122 	  WM_T_82546_3,		WMP_F_COPPER },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1125 	  "Intel i82546GB 1000BASE-X Ethernet",
   1126 	  WM_T_82546_3,		WMP_F_FIBER },
   1127 
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1129 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1130 	  WM_T_82546_3,		WMP_F_SERDES },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1133 	  "i82546GB quad-port Gigabit Ethernet",
   1134 	  WM_T_82546_3,		WMP_F_COPPER },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1137 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1138 	  WM_T_82546_3,		WMP_F_COPPER },
   1139 
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1141 	  "Intel PRO/1000MT (82546GB)",
   1142 	  WM_T_82546_3,		WMP_F_COPPER },
   1143 
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1145 	  "Intel i82541EI 1000BASE-T Ethernet",
   1146 	  WM_T_82541,		WMP_F_COPPER },
   1147 
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1149 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1150 	  WM_T_82541,		WMP_F_COPPER },
   1151 
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1153 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1154 	  WM_T_82541,		WMP_F_COPPER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1157 	  "Intel i82541ER 1000BASE-T Ethernet",
   1158 	  WM_T_82541_2,		WMP_F_COPPER },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1161 	  "Intel i82541GI 1000BASE-T Ethernet",
   1162 	  WM_T_82541_2,		WMP_F_COPPER },
   1163 
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1165 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1166 	  WM_T_82541_2,		WMP_F_COPPER },
   1167 
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1169 	  "Intel i82541PI 1000BASE-T Ethernet",
   1170 	  WM_T_82541_2,		WMP_F_COPPER },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1173 	  "Intel i82547EI 1000BASE-T Ethernet",
   1174 	  WM_T_82547,		WMP_F_COPPER },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1177 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1178 	  WM_T_82547,		WMP_F_COPPER },
   1179 
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1181 	  "Intel i82547GI 1000BASE-T Ethernet",
   1182 	  WM_T_82547_2,		WMP_F_COPPER },
   1183 
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1185 	  "Intel PRO/1000 PT (82571EB)",
   1186 	  WM_T_82571,		WMP_F_COPPER },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1189 	  "Intel PRO/1000 PF (82571EB)",
   1190 	  WM_T_82571,		WMP_F_FIBER },
   1191 
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1193 	  "Intel PRO/1000 PB (82571EB)",
   1194 	  WM_T_82571,		WMP_F_SERDES },
   1195 
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1197 	  "Intel PRO/1000 QT (82571EB)",
   1198 	  WM_T_82571,		WMP_F_COPPER },
   1199 
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1201 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1202 	  WM_T_82571,		WMP_F_COPPER },
   1203 
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1205 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1206 	  WM_T_82571,		WMP_F_COPPER },
   1207 
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1209 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1210 	  WM_T_82571,		WMP_F_SERDES },
   1211 
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1213 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1214 	  WM_T_82571,		WMP_F_SERDES },
   1215 
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1217 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1218 	  WM_T_82571,		WMP_F_FIBER },
   1219 
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1221 	  "Intel i82572EI 1000baseT Ethernet",
   1222 	  WM_T_82572,		WMP_F_COPPER },
   1223 
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1225 	  "Intel i82572EI 1000baseX Ethernet",
   1226 	  WM_T_82572,		WMP_F_FIBER },
   1227 
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1229 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1230 	  WM_T_82572,		WMP_F_SERDES },
   1231 
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1233 	  "Intel i82572EI 1000baseT Ethernet",
   1234 	  WM_T_82572,		WMP_F_COPPER },
   1235 
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1237 	  "Intel i82573E",
   1238 	  WM_T_82573,		WMP_F_COPPER },
   1239 
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1241 	  "Intel i82573E IAMT",
   1242 	  WM_T_82573,		WMP_F_COPPER },
   1243 
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1245 	  "Intel i82573L Gigabit Ethernet",
   1246 	  WM_T_82573,		WMP_F_COPPER },
   1247 
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1249 	  "Intel i82574L",
   1250 	  WM_T_82574,		WMP_F_COPPER },
   1251 
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1253 	  "Intel i82574L",
   1254 	  WM_T_82574,		WMP_F_COPPER },
   1255 
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1257 	  "Intel i82583V",
   1258 	  WM_T_82583,		WMP_F_COPPER },
   1259 
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1261 	  "i80003 dual 1000baseT Ethernet",
   1262 	  WM_T_80003,		WMP_F_COPPER },
   1263 
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1265 	  "i80003 dual 1000baseX Ethernet",
   1266 	  WM_T_80003,		WMP_F_COPPER },
   1267 
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1269 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1270 	  WM_T_80003,		WMP_F_SERDES },
   1271 
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1273 	  "Intel i80003 1000baseT Ethernet",
   1274 	  WM_T_80003,		WMP_F_COPPER },
   1275 
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1277 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1278 	  WM_T_80003,		WMP_F_SERDES },
   1279 
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1281 	  "Intel i82801H (M_AMT) LAN Controller",
   1282 	  WM_T_ICH8,		WMP_F_COPPER },
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1284 	  "Intel i82801H (AMT) LAN Controller",
   1285 	  WM_T_ICH8,		WMP_F_COPPER },
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1287 	  "Intel i82801H LAN Controller",
   1288 	  WM_T_ICH8,		WMP_F_COPPER },
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1290 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1291 	  WM_T_ICH8,		WMP_F_COPPER },
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1293 	  "Intel i82801H (M) LAN Controller",
   1294 	  WM_T_ICH8,		WMP_F_COPPER },
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1296 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1297 	  WM_T_ICH8,		WMP_F_COPPER },
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1299 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1300 	  WM_T_ICH8,		WMP_F_COPPER },
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1302 	  "82567V-3 LAN Controller",
   1303 	  WM_T_ICH8,		WMP_F_COPPER },
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1305 	  "82801I (AMT) LAN Controller",
   1306 	  WM_T_ICH9,		WMP_F_COPPER },
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1308 	  "82801I 10/100 LAN Controller",
   1309 	  WM_T_ICH9,		WMP_F_COPPER },
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1311 	  "82801I (G) 10/100 LAN Controller",
   1312 	  WM_T_ICH9,		WMP_F_COPPER },
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1314 	  "82801I (GT) 10/100 LAN Controller",
   1315 	  WM_T_ICH9,		WMP_F_COPPER },
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1317 	  "82801I (C) LAN Controller",
   1318 	  WM_T_ICH9,		WMP_F_COPPER },
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1320 	  "82801I mobile LAN Controller",
   1321 	  WM_T_ICH9,		WMP_F_COPPER },
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1323 	  "82801I mobile (V) LAN Controller",
   1324 	  WM_T_ICH9,		WMP_F_COPPER },
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1326 	  "82801I mobile (AMT) LAN Controller",
   1327 	  WM_T_ICH9,		WMP_F_COPPER },
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1329 	  "82567LM-4 LAN Controller",
   1330 	  WM_T_ICH9,		WMP_F_COPPER },
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1332 	  "82567LM-2 LAN Controller",
   1333 	  WM_T_ICH10,		WMP_F_COPPER },
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1335 	  "82567LF-2 LAN Controller",
   1336 	  WM_T_ICH10,		WMP_F_COPPER },
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1338 	  "82567LM-3 LAN Controller",
   1339 	  WM_T_ICH10,		WMP_F_COPPER },
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1341 	  "82567LF-3 LAN Controller",
   1342 	  WM_T_ICH10,		WMP_F_COPPER },
   1343 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1344 	  "82567V-2 LAN Controller",
   1345 	  WM_T_ICH10,		WMP_F_COPPER },
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1347 	  "82567V-3? LAN Controller",
   1348 	  WM_T_ICH10,		WMP_F_COPPER },
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1350 	  "HANKSVILLE LAN Controller",
   1351 	  WM_T_ICH10,		WMP_F_COPPER },
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1353 	  "PCH LAN (82577LM) Controller",
   1354 	  WM_T_PCH,		WMP_F_COPPER },
   1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1356 	  "PCH LAN (82577LC) Controller",
   1357 	  WM_T_PCH,		WMP_F_COPPER },
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1359 	  "PCH LAN (82578DM) Controller",
   1360 	  WM_T_PCH,		WMP_F_COPPER },
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1362 	  "PCH LAN (82578DC) Controller",
   1363 	  WM_T_PCH,		WMP_F_COPPER },
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1365 	  "PCH2 LAN (82579LM) Controller",
   1366 	  WM_T_PCH2,		WMP_F_COPPER },
   1367 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1368 	  "PCH2 LAN (82579V) Controller",
   1369 	  WM_T_PCH2,		WMP_F_COPPER },
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1371 	  "82575EB dual-1000baseT Ethernet",
   1372 	  WM_T_82575,		WMP_F_COPPER },
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1374 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1375 	  WM_T_82575,		WMP_F_SERDES },
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1377 	  "82575GB quad-1000baseT Ethernet",
   1378 	  WM_T_82575,		WMP_F_COPPER },
   1379 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1380 	  "82575GB quad-1000baseT Ethernet (PM)",
   1381 	  WM_T_82575,		WMP_F_COPPER },
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1383 	  "82576 1000BaseT Ethernet",
   1384 	  WM_T_82576,		WMP_F_COPPER },
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1386 	  "82576 1000BaseX Ethernet",
   1387 	  WM_T_82576,		WMP_F_FIBER },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1390 	  "82576 gigabit Ethernet (SERDES)",
   1391 	  WM_T_82576,		WMP_F_SERDES },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1394 	  "82576 quad-1000BaseT Ethernet",
   1395 	  WM_T_82576,		WMP_F_COPPER },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1398 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1399 	  WM_T_82576,		WMP_F_COPPER },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1402 	  "82576 gigabit Ethernet",
   1403 	  WM_T_82576,		WMP_F_COPPER },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1406 	  "82576 gigabit Ethernet (SERDES)",
   1407 	  WM_T_82576,		WMP_F_SERDES },
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1409 	  "82576 quad-gigabit Ethernet (SERDES)",
   1410 	  WM_T_82576,		WMP_F_SERDES },
   1411 
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1413 	  "82580 1000BaseT Ethernet",
   1414 	  WM_T_82580,		WMP_F_COPPER },
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1416 	  "82580 1000BaseX Ethernet",
   1417 	  WM_T_82580,		WMP_F_FIBER },
   1418 
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1420 	  "82580 1000BaseT Ethernet (SERDES)",
   1421 	  WM_T_82580,		WMP_F_SERDES },
   1422 
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1424 	  "82580 gigabit Ethernet (SGMII)",
   1425 	  WM_T_82580,		WMP_F_COPPER },
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1427 	  "82580 dual-1000BaseT Ethernet",
   1428 	  WM_T_82580,		WMP_F_COPPER },
   1429 
   1430 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1431 	  "82580 quad-1000BaseX Ethernet",
   1432 	  WM_T_82580,		WMP_F_FIBER },
   1433 
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1435 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1436 	  WM_T_82580,		WMP_F_COPPER },
   1437 
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1439 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1440 	  WM_T_82580,		WMP_F_SERDES },
   1441 
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1443 	  "DH89XXCC 1000BASE-KX Ethernet",
   1444 	  WM_T_82580,		WMP_F_SERDES },
   1445 
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1447 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1448 	  WM_T_82580,		WMP_F_SERDES },
   1449 
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1451 	  "I350 Gigabit Network Connection",
   1452 	  WM_T_I350,		WMP_F_COPPER },
   1453 
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1455 	  "I350 Gigabit Fiber Network Connection",
   1456 	  WM_T_I350,		WMP_F_FIBER },
   1457 
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1459 	  "I350 Gigabit Backplane Connection",
   1460 	  WM_T_I350,		WMP_F_SERDES },
   1461 
   1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1463 	  "I350 Quad Port Gigabit Ethernet",
   1464 	  WM_T_I350,		WMP_F_SERDES },
   1465 
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1467 	  "I350 Gigabit Connection",
   1468 	  WM_T_I350,		WMP_F_COPPER },
   1469 
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1471 	  "I354 Gigabit Ethernet (KX)",
   1472 	  WM_T_I354,		WMP_F_SERDES },
   1473 
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1475 	  "I354 Gigabit Ethernet (SGMII)",
   1476 	  WM_T_I354,		WMP_F_COPPER },
   1477 
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1479 	  "I354 Gigabit Ethernet (2.5G)",
   1480 	  WM_T_I354,		WMP_F_COPPER },
   1481 
   1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1483 	  "I210-T1 Ethernet Server Adapter",
   1484 	  WM_T_I210,		WMP_F_COPPER },
   1485 
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1487 	  "I210 Ethernet (Copper OEM)",
   1488 	  WM_T_I210,		WMP_F_COPPER },
   1489 
   1490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1491 	  "I210 Ethernet (Copper IT)",
   1492 	  WM_T_I210,		WMP_F_COPPER },
   1493 
   1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1495 	  "I210 Ethernet (Copper, FLASH less)",
   1496 	  WM_T_I210,		WMP_F_COPPER },
   1497 
   1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1499 	  "I210 Gigabit Ethernet (Fiber)",
   1500 	  WM_T_I210,		WMP_F_FIBER },
   1501 
   1502 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1503 	  "I210 Gigabit Ethernet (SERDES)",
   1504 	  WM_T_I210,		WMP_F_SERDES },
   1505 
   1506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1507 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1508 	  WM_T_I210,		WMP_F_SERDES },
   1509 
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1511 	  "I210 Gigabit Ethernet (SGMII)",
   1512 	  WM_T_I210,		WMP_F_COPPER },
   1513 
   1514 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1515 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1516 	  WM_T_I210,		WMP_F_COPPER },
   1517 
   1518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1519 	  "I211 Ethernet (COPPER)",
   1520 	  WM_T_I211,		WMP_F_COPPER },
   1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1522 	  "I217 V Ethernet Connection",
   1523 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1525 	  "I217 LM Ethernet Connection",
   1526 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1528 	  "I218 V Ethernet Connection",
   1529 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1530 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1531 	  "I218 V Ethernet Connection",
   1532 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1534 	  "I218 V Ethernet Connection",
   1535 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1537 	  "I218 LM Ethernet Connection",
   1538 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1540 	  "I218 LM Ethernet Connection",
   1541 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1543 	  "I218 LM Ethernet Connection",
   1544 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1545 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1546 	  "I219 LM Ethernet Connection",
   1547 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1548 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1549 	  "I219 LM Ethernet Connection",
   1550 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1552 	  "I219 LM Ethernet Connection",
   1553 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1555 	  "I219 LM Ethernet Connection",
   1556 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1557 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1558 	  "I219 LM Ethernet Connection",
   1559 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1560 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1561 	  "I219 LM Ethernet Connection",
   1562 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1563 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1564 	  "I219 LM Ethernet Connection",
   1565 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1566 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1567 	  "I219 LM Ethernet Connection",
   1568 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1569 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1570 	  "I219 LM Ethernet Connection",
   1571 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1573 	  "I219 LM Ethernet Connection",
   1574 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1575 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1576 	  "I219 LM Ethernet Connection",
   1577 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1578 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1579 	  "I219 LM Ethernet Connection",
   1580 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1581 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1582 	  "I219 LM Ethernet Connection",
   1583 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1584 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1585 	  "I219 LM Ethernet Connection",
   1586 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1587 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1588 	  "I219 LM Ethernet Connection",
   1589 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1590 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1591 	  "I219 V Ethernet Connection",
   1592 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1593 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1594 	  "I219 V Ethernet Connection",
   1595 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1596 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1597 	  "I219 V Ethernet Connection",
   1598 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1599 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1600 	  "I219 V Ethernet Connection",
   1601 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1602 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1603 	  "I219 V Ethernet Connection",
   1604 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1605 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1606 	  "I219 V Ethernet Connection",
   1607 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1608 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1609 	  "I219 V Ethernet Connection",
   1610 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1611 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1612 	  "I219 V Ethernet Connection",
   1613 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1614 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1615 	  "I219 V Ethernet Connection",
   1616 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1617 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1618 	  "I219 V Ethernet Connection",
   1619 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1620 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1621 	  "I219 V Ethernet Connection",
   1622 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1623 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1624 	  "I219 V Ethernet Connection",
   1625 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1626 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1627 	  "I219 V Ethernet Connection",
   1628 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1629 	{ 0,			0,
   1630 	  NULL,
   1631 	  0,			0 },
   1632 };
   1633 
   1634 /*
   1635  * Register read/write functions.
   1636  * Other than CSR_{READ|WRITE}().
   1637  */
   1638 
   1639 #if 0 /* Not currently used */
   1640 static inline uint32_t
   1641 wm_io_read(struct wm_softc *sc, int reg)
   1642 {
   1643 
   1644 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1645 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1646 }
   1647 #endif
   1648 
   1649 static inline void
   1650 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1651 {
   1652 
   1653 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1654 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1655 }
   1656 
   1657 static inline void
   1658 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1659     uint32_t data)
   1660 {
   1661 	uint32_t regval;
   1662 	int i;
   1663 
   1664 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1665 
   1666 	CSR_WRITE(sc, reg, regval);
   1667 
   1668 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1669 		delay(5);
   1670 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1671 			break;
   1672 	}
   1673 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1674 		aprint_error("%s: WARNING:"
   1675 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1676 		    device_xname(sc->sc_dev), reg);
   1677 	}
   1678 }
   1679 
   1680 static inline void
   1681 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1682 {
   1683 	wa->wa_low = htole32(v & 0xffffffffU);
   1684 	if (sizeof(bus_addr_t) == 8)
   1685 		wa->wa_high = htole32((uint64_t) v >> 32);
   1686 	else
   1687 		wa->wa_high = 0;
   1688 }
   1689 
   1690 /*
   1691  * Descriptor sync/init functions.
   1692  */
   1693 static inline void
   1694 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1695 {
   1696 	struct wm_softc *sc = txq->txq_sc;
   1697 
   1698 	/* If it will wrap around, sync to the end of the ring. */
   1699 	if ((start + num) > WM_NTXDESC(txq)) {
   1700 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1701 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1702 		    (WM_NTXDESC(txq) - start), ops);
   1703 		num -= (WM_NTXDESC(txq) - start);
   1704 		start = 0;
   1705 	}
   1706 
   1707 	/* Now sync whatever is left. */
   1708 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1709 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1710 }
   1711 
   1712 static inline void
   1713 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1714 {
   1715 	struct wm_softc *sc = rxq->rxq_sc;
   1716 
   1717 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1718 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1719 }
   1720 
   1721 static inline void
   1722 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1723 {
   1724 	struct wm_softc *sc = rxq->rxq_sc;
   1725 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1726 	struct mbuf *m = rxs->rxs_mbuf;
   1727 
   1728 	/*
   1729 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1730 	 * so that the payload after the Ethernet header is aligned
   1731 	 * to a 4-byte boundary.
   1732 
   1733 	 * XXX BRAINDAMAGE ALERT!
   1734 	 * The stupid chip uses the same size for every buffer, which
   1735 	 * is set in the Receive Control register.  We are using the 2K
   1736 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1737 	 * reason, we can't "scoot" packets longer than the standard
   1738 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1739 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1740 	 * the upper layer copy the headers.
   1741 	 */
   1742 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1743 
   1744 	if (sc->sc_type == WM_T_82574) {
   1745 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1746 		rxd->erx_data.erxd_addr =
   1747 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1748 		rxd->erx_data.erxd_dd = 0;
   1749 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1750 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1751 
   1752 		rxd->nqrx_data.nrxd_paddr =
   1753 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1754 		/* Currently, split header is not supported. */
   1755 		rxd->nqrx_data.nrxd_haddr = 0;
   1756 	} else {
   1757 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1758 
   1759 		wm_set_dma_addr(&rxd->wrx_addr,
   1760 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1761 		rxd->wrx_len = 0;
   1762 		rxd->wrx_cksum = 0;
   1763 		rxd->wrx_status = 0;
   1764 		rxd->wrx_errors = 0;
   1765 		rxd->wrx_special = 0;
   1766 	}
   1767 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1768 
   1769 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1770 }
   1771 
   1772 /*
   1773  * Device driver interface functions and commonly used functions.
   1774  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1775  */
   1776 
   1777 /* Lookup supported device table */
   1778 static const struct wm_product *
   1779 wm_lookup(const struct pci_attach_args *pa)
   1780 {
   1781 	const struct wm_product *wmp;
   1782 
   1783 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1784 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1785 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1786 			return wmp;
   1787 	}
   1788 	return NULL;
   1789 }
   1790 
   1791 /* The match function (ca_match) */
   1792 static int
   1793 wm_match(device_t parent, cfdata_t cf, void *aux)
   1794 {
   1795 	struct pci_attach_args *pa = aux;
   1796 
   1797 	if (wm_lookup(pa) != NULL)
   1798 		return 1;
   1799 
   1800 	return 0;
   1801 }
   1802 
   1803 /* The attach function (ca_attach) */
   1804 static void
   1805 wm_attach(device_t parent, device_t self, void *aux)
   1806 {
   1807 	struct wm_softc *sc = device_private(self);
   1808 	struct pci_attach_args *pa = aux;
   1809 	prop_dictionary_t dict;
   1810 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1811 	pci_chipset_tag_t pc = pa->pa_pc;
   1812 	int counts[PCI_INTR_TYPE_SIZE];
   1813 	pci_intr_type_t max_type;
   1814 	const char *eetype, *xname;
   1815 	bus_space_tag_t memt;
   1816 	bus_space_handle_t memh;
   1817 	bus_size_t memsize;
   1818 	int memh_valid;
   1819 	int i, error;
   1820 	const struct wm_product *wmp;
   1821 	prop_data_t ea;
   1822 	prop_number_t pn;
   1823 	uint8_t enaddr[ETHER_ADDR_LEN];
   1824 	char buf[256];
   1825 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1826 	pcireg_t preg, memtype;
   1827 	uint16_t eeprom_data, apme_mask;
   1828 	bool force_clear_smbi;
   1829 	uint32_t link_mode;
   1830 	uint32_t reg;
   1831 
   1832 	sc->sc_dev = self;
   1833 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1834 	sc->sc_core_stopping = false;
   1835 
   1836 	wmp = wm_lookup(pa);
   1837 #ifdef DIAGNOSTIC
   1838 	if (wmp == NULL) {
   1839 		printf("\n");
   1840 		panic("wm_attach: impossible");
   1841 	}
   1842 #endif
   1843 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1844 
   1845 	sc->sc_pc = pa->pa_pc;
   1846 	sc->sc_pcitag = pa->pa_tag;
   1847 
   1848 	if (pci_dma64_available(pa))
   1849 		sc->sc_dmat = pa->pa_dmat64;
   1850 	else
   1851 		sc->sc_dmat = pa->pa_dmat;
   1852 
   1853 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1854 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1855 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1856 
   1857 	sc->sc_type = wmp->wmp_type;
   1858 
   1859 	/* Set default function pointers */
   1860 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1861 	sc->phy.release = sc->nvm.release = wm_put_null;
   1862 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1863 
   1864 	if (sc->sc_type < WM_T_82543) {
   1865 		if (sc->sc_rev < 2) {
   1866 			aprint_error_dev(sc->sc_dev,
   1867 			    "i82542 must be at least rev. 2\n");
   1868 			return;
   1869 		}
   1870 		if (sc->sc_rev < 3)
   1871 			sc->sc_type = WM_T_82542_2_0;
   1872 	}
   1873 
   1874 	/*
   1875 	 * Disable MSI for Errata:
   1876 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1877 	 *
   1878 	 *  82544: Errata 25
   1879 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1880 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1881 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1882 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1883 	 *
   1884 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1885 	 *
   1886 	 *  82571 & 82572: Errata 63
   1887 	 */
   1888 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1889 	    || (sc->sc_type == WM_T_82572))
   1890 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1891 
   1892 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1893 	    || (sc->sc_type == WM_T_82580)
   1894 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1895 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1896 		sc->sc_flags |= WM_F_NEWQUEUE;
   1897 
   1898 	/* Set device properties (mactype) */
   1899 	dict = device_properties(sc->sc_dev);
   1900 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1901 
   1902 	/*
   1903 	 * Map the device.  All devices support memory-mapped acccess,
   1904 	 * and it is really required for normal operation.
   1905 	 */
   1906 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1907 	switch (memtype) {
   1908 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1909 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1910 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1911 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1912 		break;
   1913 	default:
   1914 		memh_valid = 0;
   1915 		break;
   1916 	}
   1917 
   1918 	if (memh_valid) {
   1919 		sc->sc_st = memt;
   1920 		sc->sc_sh = memh;
   1921 		sc->sc_ss = memsize;
   1922 	} else {
   1923 		aprint_error_dev(sc->sc_dev,
   1924 		    "unable to map device registers\n");
   1925 		return;
   1926 	}
   1927 
   1928 	/*
   1929 	 * In addition, i82544 and later support I/O mapped indirect
   1930 	 * register access.  It is not desirable (nor supported in
   1931 	 * this driver) to use it for normal operation, though it is
   1932 	 * required to work around bugs in some chip versions.
   1933 	 */
   1934 	if (sc->sc_type >= WM_T_82544) {
   1935 		/* First we have to find the I/O BAR. */
   1936 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1937 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1938 			if (memtype == PCI_MAPREG_TYPE_IO)
   1939 				break;
   1940 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1941 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1942 				i += 4;	/* skip high bits, too */
   1943 		}
   1944 		if (i < PCI_MAPREG_END) {
   1945 			/*
   1946 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1947 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1948 			 * It's no problem because newer chips has no this
   1949 			 * bug.
   1950 			 *
   1951 			 * The i8254x doesn't apparently respond when the
   1952 			 * I/O BAR is 0, which looks somewhat like it's not
   1953 			 * been configured.
   1954 			 */
   1955 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1956 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1957 				aprint_error_dev(sc->sc_dev,
   1958 				    "WARNING: I/O BAR at zero.\n");
   1959 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1960 					0, &sc->sc_iot, &sc->sc_ioh,
   1961 					NULL, &sc->sc_ios) == 0) {
   1962 				sc->sc_flags |= WM_F_IOH_VALID;
   1963 			} else
   1964 				aprint_error_dev(sc->sc_dev,
   1965 				    "WARNING: unable to map I/O space\n");
   1966 		}
   1967 
   1968 	}
   1969 
   1970 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1971 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1972 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1973 	if (sc->sc_type < WM_T_82542_2_1)
   1974 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1975 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1976 
   1977 	/* Power up chip */
   1978 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1979 	    && error != EOPNOTSUPP) {
   1980 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1981 		return;
   1982 	}
   1983 
   1984 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1985 	/*
   1986 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1987 	 * resource.
   1988 	 */
   1989 	if (sc->sc_nqueues > 1) {
   1990 		max_type = PCI_INTR_TYPE_MSIX;
   1991 		/*
   1992 		 *  82583 has a MSI-X capability in the PCI configuration space
   1993 		 * but it doesn't support it. At least the document doesn't
   1994 		 * say anything about MSI-X.
   1995 		 */
   1996 		counts[PCI_INTR_TYPE_MSIX]
   1997 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1998 	} else {
   1999 		max_type = PCI_INTR_TYPE_MSI;
   2000 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2001 	}
   2002 
   2003 	/* Allocation settings */
   2004 	counts[PCI_INTR_TYPE_MSI] = 1;
   2005 	counts[PCI_INTR_TYPE_INTX] = 1;
   2006 	/* overridden by disable flags */
   2007 	if (wm_disable_msi != 0) {
   2008 		counts[PCI_INTR_TYPE_MSI] = 0;
   2009 		if (wm_disable_msix != 0) {
   2010 			max_type = PCI_INTR_TYPE_INTX;
   2011 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2012 		}
   2013 	} else if (wm_disable_msix != 0) {
   2014 		max_type = PCI_INTR_TYPE_MSI;
   2015 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2016 	}
   2017 
   2018 alloc_retry:
   2019 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2020 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2021 		return;
   2022 	}
   2023 
   2024 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2025 		error = wm_setup_msix(sc);
   2026 		if (error) {
   2027 			pci_intr_release(pc, sc->sc_intrs,
   2028 			    counts[PCI_INTR_TYPE_MSIX]);
   2029 
   2030 			/* Setup for MSI: Disable MSI-X */
   2031 			max_type = PCI_INTR_TYPE_MSI;
   2032 			counts[PCI_INTR_TYPE_MSI] = 1;
   2033 			counts[PCI_INTR_TYPE_INTX] = 1;
   2034 			goto alloc_retry;
   2035 		}
   2036 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2037 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2038 		error = wm_setup_legacy(sc);
   2039 		if (error) {
   2040 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2041 			    counts[PCI_INTR_TYPE_MSI]);
   2042 
   2043 			/* The next try is for INTx: Disable MSI */
   2044 			max_type = PCI_INTR_TYPE_INTX;
   2045 			counts[PCI_INTR_TYPE_INTX] = 1;
   2046 			goto alloc_retry;
   2047 		}
   2048 	} else {
   2049 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2050 		error = wm_setup_legacy(sc);
   2051 		if (error) {
   2052 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2053 			    counts[PCI_INTR_TYPE_INTX]);
   2054 			return;
   2055 		}
   2056 	}
   2057 
   2058 	/*
   2059 	 * Check the function ID (unit number of the chip).
   2060 	 */
   2061 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2062 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2063 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2064 	    || (sc->sc_type == WM_T_82580)
   2065 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2066 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2067 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2068 	else
   2069 		sc->sc_funcid = 0;
   2070 
   2071 	/*
   2072 	 * Determine a few things about the bus we're connected to.
   2073 	 */
   2074 	if (sc->sc_type < WM_T_82543) {
   2075 		/* We don't really know the bus characteristics here. */
   2076 		sc->sc_bus_speed = 33;
   2077 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2078 		/*
   2079 		 * CSA (Communication Streaming Architecture) is about as fast
   2080 		 * a 32-bit 66MHz PCI Bus.
   2081 		 */
   2082 		sc->sc_flags |= WM_F_CSA;
   2083 		sc->sc_bus_speed = 66;
   2084 		aprint_verbose_dev(sc->sc_dev,
   2085 		    "Communication Streaming Architecture\n");
   2086 		if (sc->sc_type == WM_T_82547) {
   2087 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2088 			callout_setfunc(&sc->sc_txfifo_ch,
   2089 			    wm_82547_txfifo_stall, sc);
   2090 			aprint_verbose_dev(sc->sc_dev,
   2091 			    "using 82547 Tx FIFO stall work-around\n");
   2092 		}
   2093 	} else if (sc->sc_type >= WM_T_82571) {
   2094 		sc->sc_flags |= WM_F_PCIE;
   2095 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2096 		    && (sc->sc_type != WM_T_ICH10)
   2097 		    && (sc->sc_type != WM_T_PCH)
   2098 		    && (sc->sc_type != WM_T_PCH2)
   2099 		    && (sc->sc_type != WM_T_PCH_LPT)
   2100 		    && (sc->sc_type != WM_T_PCH_SPT)
   2101 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2102 			/* ICH* and PCH* have no PCIe capability registers */
   2103 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2104 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2105 				NULL) == 0)
   2106 				aprint_error_dev(sc->sc_dev,
   2107 				    "unable to find PCIe capability\n");
   2108 		}
   2109 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2110 	} else {
   2111 		reg = CSR_READ(sc, WMREG_STATUS);
   2112 		if (reg & STATUS_BUS64)
   2113 			sc->sc_flags |= WM_F_BUS64;
   2114 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2115 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2116 
   2117 			sc->sc_flags |= WM_F_PCIX;
   2118 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2119 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2120 				aprint_error_dev(sc->sc_dev,
   2121 				    "unable to find PCIX capability\n");
   2122 			else if (sc->sc_type != WM_T_82545_3 &&
   2123 				 sc->sc_type != WM_T_82546_3) {
   2124 				/*
   2125 				 * Work around a problem caused by the BIOS
   2126 				 * setting the max memory read byte count
   2127 				 * incorrectly.
   2128 				 */
   2129 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2130 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2131 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2132 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2133 
   2134 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2135 				    PCIX_CMD_BYTECNT_SHIFT;
   2136 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2137 				    PCIX_STATUS_MAXB_SHIFT;
   2138 				if (bytecnt > maxb) {
   2139 					aprint_verbose_dev(sc->sc_dev,
   2140 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2141 					    512 << bytecnt, 512 << maxb);
   2142 					pcix_cmd = (pcix_cmd &
   2143 					    ~PCIX_CMD_BYTECNT_MASK) |
   2144 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2145 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2146 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2147 					    pcix_cmd);
   2148 				}
   2149 			}
   2150 		}
   2151 		/*
   2152 		 * The quad port adapter is special; it has a PCIX-PCIX
   2153 		 * bridge on the board, and can run the secondary bus at
   2154 		 * a higher speed.
   2155 		 */
   2156 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2157 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2158 								      : 66;
   2159 		} else if (sc->sc_flags & WM_F_PCIX) {
   2160 			switch (reg & STATUS_PCIXSPD_MASK) {
   2161 			case STATUS_PCIXSPD_50_66:
   2162 				sc->sc_bus_speed = 66;
   2163 				break;
   2164 			case STATUS_PCIXSPD_66_100:
   2165 				sc->sc_bus_speed = 100;
   2166 				break;
   2167 			case STATUS_PCIXSPD_100_133:
   2168 				sc->sc_bus_speed = 133;
   2169 				break;
   2170 			default:
   2171 				aprint_error_dev(sc->sc_dev,
   2172 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2173 				    reg & STATUS_PCIXSPD_MASK);
   2174 				sc->sc_bus_speed = 66;
   2175 				break;
   2176 			}
   2177 		} else
   2178 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2179 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2180 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2181 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2182 	}
   2183 
   2184 	/* clear interesting stat counters */
   2185 	CSR_READ(sc, WMREG_COLC);
   2186 	CSR_READ(sc, WMREG_RXERRC);
   2187 
   2188 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2189 	    || (sc->sc_type >= WM_T_ICH8))
   2190 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2191 	if (sc->sc_type >= WM_T_ICH8)
   2192 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2193 
   2194 	/* Set PHY, NVM mutex related stuff */
   2195 	switch (sc->sc_type) {
   2196 	case WM_T_82542_2_0:
   2197 	case WM_T_82542_2_1:
   2198 	case WM_T_82543:
   2199 	case WM_T_82544:
   2200 		/* Microwire */
   2201 		sc->nvm.read = wm_nvm_read_uwire;
   2202 		sc->sc_nvm_wordsize = 64;
   2203 		sc->sc_nvm_addrbits = 6;
   2204 		break;
   2205 	case WM_T_82540:
   2206 	case WM_T_82545:
   2207 	case WM_T_82545_3:
   2208 	case WM_T_82546:
   2209 	case WM_T_82546_3:
   2210 		/* Microwire */
   2211 		sc->nvm.read = wm_nvm_read_uwire;
   2212 		reg = CSR_READ(sc, WMREG_EECD);
   2213 		if (reg & EECD_EE_SIZE) {
   2214 			sc->sc_nvm_wordsize = 256;
   2215 			sc->sc_nvm_addrbits = 8;
   2216 		} else {
   2217 			sc->sc_nvm_wordsize = 64;
   2218 			sc->sc_nvm_addrbits = 6;
   2219 		}
   2220 		sc->sc_flags |= WM_F_LOCK_EECD;
   2221 		sc->nvm.acquire = wm_get_eecd;
   2222 		sc->nvm.release = wm_put_eecd;
   2223 		break;
   2224 	case WM_T_82541:
   2225 	case WM_T_82541_2:
   2226 	case WM_T_82547:
   2227 	case WM_T_82547_2:
   2228 		reg = CSR_READ(sc, WMREG_EECD);
   2229 		/*
   2230 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2231 		 * on 8254[17], so set flags and functios before calling it.
   2232 		 */
   2233 		sc->sc_flags |= WM_F_LOCK_EECD;
   2234 		sc->nvm.acquire = wm_get_eecd;
   2235 		sc->nvm.release = wm_put_eecd;
   2236 		if (reg & EECD_EE_TYPE) {
   2237 			/* SPI */
   2238 			sc->nvm.read = wm_nvm_read_spi;
   2239 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2240 			wm_nvm_set_addrbits_size_eecd(sc);
   2241 		} else {
   2242 			/* Microwire */
   2243 			sc->nvm.read = wm_nvm_read_uwire;
   2244 			if ((reg & EECD_EE_ABITS) != 0) {
   2245 				sc->sc_nvm_wordsize = 256;
   2246 				sc->sc_nvm_addrbits = 8;
   2247 			} else {
   2248 				sc->sc_nvm_wordsize = 64;
   2249 				sc->sc_nvm_addrbits = 6;
   2250 			}
   2251 		}
   2252 		break;
   2253 	case WM_T_82571:
   2254 	case WM_T_82572:
   2255 		/* SPI */
   2256 		sc->nvm.read = wm_nvm_read_eerd;
   2257 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2258 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2259 		wm_nvm_set_addrbits_size_eecd(sc);
   2260 		sc->phy.acquire = wm_get_swsm_semaphore;
   2261 		sc->phy.release = wm_put_swsm_semaphore;
   2262 		sc->nvm.acquire = wm_get_nvm_82571;
   2263 		sc->nvm.release = wm_put_nvm_82571;
   2264 		break;
   2265 	case WM_T_82573:
   2266 	case WM_T_82574:
   2267 	case WM_T_82583:
   2268 		sc->nvm.read = wm_nvm_read_eerd;
   2269 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2270 		if (sc->sc_type == WM_T_82573) {
   2271 			sc->phy.acquire = wm_get_swsm_semaphore;
   2272 			sc->phy.release = wm_put_swsm_semaphore;
   2273 			sc->nvm.acquire = wm_get_nvm_82571;
   2274 			sc->nvm.release = wm_put_nvm_82571;
   2275 		} else {
   2276 			/* Both PHY and NVM use the same semaphore. */
   2277 			sc->phy.acquire = sc->nvm.acquire
   2278 			    = wm_get_swfwhw_semaphore;
   2279 			sc->phy.release = sc->nvm.release
   2280 			    = wm_put_swfwhw_semaphore;
   2281 		}
   2282 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2283 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2284 			sc->sc_nvm_wordsize = 2048;
   2285 		} else {
   2286 			/* SPI */
   2287 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2288 			wm_nvm_set_addrbits_size_eecd(sc);
   2289 		}
   2290 		break;
   2291 	case WM_T_82575:
   2292 	case WM_T_82576:
   2293 	case WM_T_82580:
   2294 	case WM_T_I350:
   2295 	case WM_T_I354:
   2296 	case WM_T_80003:
   2297 		/* SPI */
   2298 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2299 		wm_nvm_set_addrbits_size_eecd(sc);
   2300 		if ((sc->sc_type == WM_T_80003)
   2301 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2302 			sc->nvm.read = wm_nvm_read_eerd;
   2303 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2304 		} else {
   2305 			sc->nvm.read = wm_nvm_read_spi;
   2306 			sc->sc_flags |= WM_F_LOCK_EECD;
   2307 		}
   2308 		sc->phy.acquire = wm_get_phy_82575;
   2309 		sc->phy.release = wm_put_phy_82575;
   2310 		sc->nvm.acquire = wm_get_nvm_80003;
   2311 		sc->nvm.release = wm_put_nvm_80003;
   2312 		break;
   2313 	case WM_T_ICH8:
   2314 	case WM_T_ICH9:
   2315 	case WM_T_ICH10:
   2316 	case WM_T_PCH:
   2317 	case WM_T_PCH2:
   2318 	case WM_T_PCH_LPT:
   2319 		sc->nvm.read = wm_nvm_read_ich8;
   2320 		/* FLASH */
   2321 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2322 		sc->sc_nvm_wordsize = 2048;
   2323 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2324 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2325 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2326 			aprint_error_dev(sc->sc_dev,
   2327 			    "can't map FLASH registers\n");
   2328 			goto out;
   2329 		}
   2330 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2331 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2332 		    ICH_FLASH_SECTOR_SIZE;
   2333 		sc->sc_ich8_flash_bank_size =
   2334 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2335 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2336 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2337 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2338 		sc->sc_flashreg_offset = 0;
   2339 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2340 		sc->phy.release = wm_put_swflag_ich8lan;
   2341 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2342 		sc->nvm.release = wm_put_nvm_ich8lan;
   2343 		break;
   2344 	case WM_T_PCH_SPT:
   2345 	case WM_T_PCH_CNP:
   2346 		sc->nvm.read = wm_nvm_read_spt;
   2347 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2348 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2349 		sc->sc_flasht = sc->sc_st;
   2350 		sc->sc_flashh = sc->sc_sh;
   2351 		sc->sc_ich8_flash_base = 0;
   2352 		sc->sc_nvm_wordsize =
   2353 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2354 		    * NVM_SIZE_MULTIPLIER;
   2355 		/* It is size in bytes, we want words */
   2356 		sc->sc_nvm_wordsize /= 2;
   2357 		/* Assume 2 banks */
   2358 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2359 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2360 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2361 		sc->phy.release = wm_put_swflag_ich8lan;
   2362 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2363 		sc->nvm.release = wm_put_nvm_ich8lan;
   2364 		break;
   2365 	case WM_T_I210:
   2366 	case WM_T_I211:
   2367 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2368 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2369 		if (wm_nvm_flash_presence_i210(sc)) {
   2370 			sc->nvm.read = wm_nvm_read_eerd;
   2371 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2372 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2373 			wm_nvm_set_addrbits_size_eecd(sc);
   2374 		} else {
   2375 			sc->nvm.read = wm_nvm_read_invm;
   2376 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2377 			sc->sc_nvm_wordsize = INVM_SIZE;
   2378 		}
   2379 		sc->phy.acquire = wm_get_phy_82575;
   2380 		sc->phy.release = wm_put_phy_82575;
   2381 		sc->nvm.acquire = wm_get_nvm_80003;
   2382 		sc->nvm.release = wm_put_nvm_80003;
   2383 		break;
   2384 	default:
   2385 		break;
   2386 	}
   2387 
   2388 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2389 	switch (sc->sc_type) {
   2390 	case WM_T_82571:
   2391 	case WM_T_82572:
   2392 		reg = CSR_READ(sc, WMREG_SWSM2);
   2393 		if ((reg & SWSM2_LOCK) == 0) {
   2394 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2395 			force_clear_smbi = true;
   2396 		} else
   2397 			force_clear_smbi = false;
   2398 		break;
   2399 	case WM_T_82573:
   2400 	case WM_T_82574:
   2401 	case WM_T_82583:
   2402 		force_clear_smbi = true;
   2403 		break;
   2404 	default:
   2405 		force_clear_smbi = false;
   2406 		break;
   2407 	}
   2408 	if (force_clear_smbi) {
   2409 		reg = CSR_READ(sc, WMREG_SWSM);
   2410 		if ((reg & SWSM_SMBI) != 0)
   2411 			aprint_error_dev(sc->sc_dev,
   2412 			    "Please update the Bootagent\n");
   2413 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2414 	}
   2415 
   2416 	/*
   2417 	 * Defer printing the EEPROM type until after verifying the checksum
   2418 	 * This allows the EEPROM type to be printed correctly in the case
   2419 	 * that no EEPROM is attached.
   2420 	 */
   2421 	/*
   2422 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2423 	 * this for later, so we can fail future reads from the EEPROM.
   2424 	 */
   2425 	if (wm_nvm_validate_checksum(sc)) {
   2426 		/*
   2427 		 * Read twice again because some PCI-e parts fail the
   2428 		 * first check due to the link being in sleep state.
   2429 		 */
   2430 		if (wm_nvm_validate_checksum(sc))
   2431 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2432 	}
   2433 
   2434 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2435 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2436 	else {
   2437 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2438 		    sc->sc_nvm_wordsize);
   2439 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2440 			aprint_verbose("iNVM");
   2441 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2442 			aprint_verbose("FLASH(HW)");
   2443 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2444 			aprint_verbose("FLASH");
   2445 		else {
   2446 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2447 				eetype = "SPI";
   2448 			else
   2449 				eetype = "MicroWire";
   2450 			aprint_verbose("(%d address bits) %s EEPROM",
   2451 			    sc->sc_nvm_addrbits, eetype);
   2452 		}
   2453 	}
   2454 	wm_nvm_version(sc);
   2455 	aprint_verbose("\n");
   2456 
   2457 	/*
   2458 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2459 	 * incorrect.
   2460 	 */
   2461 	wm_gmii_setup_phytype(sc, 0, 0);
   2462 
   2463 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2464 	switch (sc->sc_type) {
   2465 	case WM_T_ICH8:
   2466 	case WM_T_ICH9:
   2467 	case WM_T_ICH10:
   2468 	case WM_T_PCH:
   2469 	case WM_T_PCH2:
   2470 	case WM_T_PCH_LPT:
   2471 	case WM_T_PCH_SPT:
   2472 	case WM_T_PCH_CNP:
   2473 		apme_mask = WUC_APME;
   2474 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2475 		if ((eeprom_data & apme_mask) != 0)
   2476 			sc->sc_flags |= WM_F_WOL;
   2477 		break;
   2478 	default:
   2479 		break;
   2480 	}
   2481 
   2482 	/* Reset the chip to a known state. */
   2483 	wm_reset(sc);
   2484 
   2485 	/*
   2486 	 * Check for I21[01] PLL workaround.
   2487 	 *
   2488 	 * Three cases:
   2489 	 * a) Chip is I211.
   2490 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2491 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2492 	 */
   2493 	if (sc->sc_type == WM_T_I211)
   2494 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2495 	if (sc->sc_type == WM_T_I210) {
   2496 		if (!wm_nvm_flash_presence_i210(sc))
   2497 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2498 		else if ((sc->sc_nvm_ver_major < 3)
   2499 		    || ((sc->sc_nvm_ver_major == 3)
   2500 			&& (sc->sc_nvm_ver_minor < 25))) {
   2501 			aprint_verbose_dev(sc->sc_dev,
   2502 			    "ROM image version %d.%d is older than 3.25\n",
   2503 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2504 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2505 		}
   2506 	}
   2507 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2508 		wm_pll_workaround_i210(sc);
   2509 
   2510 	wm_get_wakeup(sc);
   2511 
   2512 	/* Non-AMT based hardware can now take control from firmware */
   2513 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2514 		wm_get_hw_control(sc);
   2515 
   2516 	/*
   2517 	 * Read the Ethernet address from the EEPROM, if not first found
   2518 	 * in device properties.
   2519 	 */
   2520 	ea = prop_dictionary_get(dict, "mac-address");
   2521 	if (ea != NULL) {
   2522 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2523 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2524 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2525 	} else {
   2526 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2527 			aprint_error_dev(sc->sc_dev,
   2528 			    "unable to read Ethernet address\n");
   2529 			goto out;
   2530 		}
   2531 	}
   2532 
   2533 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2534 	    ether_sprintf(enaddr));
   2535 
   2536 	/*
   2537 	 * Read the config info from the EEPROM, and set up various
   2538 	 * bits in the control registers based on their contents.
   2539 	 */
   2540 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2541 	if (pn != NULL) {
   2542 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2543 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2544 	} else {
   2545 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2546 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2547 			goto out;
   2548 		}
   2549 	}
   2550 
   2551 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2552 	if (pn != NULL) {
   2553 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2554 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2555 	} else {
   2556 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2557 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2558 			goto out;
   2559 		}
   2560 	}
   2561 
   2562 	/* check for WM_F_WOL */
   2563 	switch (sc->sc_type) {
   2564 	case WM_T_82542_2_0:
   2565 	case WM_T_82542_2_1:
   2566 	case WM_T_82543:
   2567 		/* dummy? */
   2568 		eeprom_data = 0;
   2569 		apme_mask = NVM_CFG3_APME;
   2570 		break;
   2571 	case WM_T_82544:
   2572 		apme_mask = NVM_CFG2_82544_APM_EN;
   2573 		eeprom_data = cfg2;
   2574 		break;
   2575 	case WM_T_82546:
   2576 	case WM_T_82546_3:
   2577 	case WM_T_82571:
   2578 	case WM_T_82572:
   2579 	case WM_T_82573:
   2580 	case WM_T_82574:
   2581 	case WM_T_82583:
   2582 	case WM_T_80003:
   2583 	case WM_T_82575:
   2584 	case WM_T_82576:
   2585 		apme_mask = NVM_CFG3_APME;
   2586 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2587 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2588 		break;
   2589 	case WM_T_82580:
   2590 	case WM_T_I350:
   2591 	case WM_T_I354:
   2592 	case WM_T_I210:
   2593 	case WM_T_I211:
   2594 		apme_mask = NVM_CFG3_APME;
   2595 		wm_nvm_read(sc,
   2596 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2597 		    1, &eeprom_data);
   2598 		break;
   2599 	case WM_T_ICH8:
   2600 	case WM_T_ICH9:
   2601 	case WM_T_ICH10:
   2602 	case WM_T_PCH:
   2603 	case WM_T_PCH2:
   2604 	case WM_T_PCH_LPT:
   2605 	case WM_T_PCH_SPT:
   2606 	case WM_T_PCH_CNP:
   2607 		/* Already checked before wm_reset () */
   2608 		apme_mask = eeprom_data = 0;
   2609 		break;
   2610 	default: /* XXX 82540 */
   2611 		apme_mask = NVM_CFG3_APME;
   2612 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2613 		break;
   2614 	}
   2615 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2616 	if ((eeprom_data & apme_mask) != 0)
   2617 		sc->sc_flags |= WM_F_WOL;
   2618 
   2619 	/*
   2620 	 * We have the eeprom settings, now apply the special cases
   2621 	 * where the eeprom may be wrong or the board won't support
   2622 	 * wake on lan on a particular port
   2623 	 */
   2624 	switch (sc->sc_pcidevid) {
   2625 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2626 		sc->sc_flags &= ~WM_F_WOL;
   2627 		break;
   2628 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2629 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2630 		/* Wake events only supported on port A for dual fiber
   2631 		 * regardless of eeprom setting */
   2632 		if (sc->sc_funcid == 1)
   2633 			sc->sc_flags &= ~WM_F_WOL;
   2634 		break;
   2635 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2636 		/* If quad port adapter, disable WoL on all but port A */
   2637 		if (sc->sc_funcid != 0)
   2638 			sc->sc_flags &= ~WM_F_WOL;
   2639 		break;
   2640 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2641 		/* Wake events only supported on port A for dual fiber
   2642 		 * regardless of eeprom setting */
   2643 		if (sc->sc_funcid == 1)
   2644 			sc->sc_flags &= ~WM_F_WOL;
   2645 		break;
   2646 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2647 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2648 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2649 		/* If quad port adapter, disable WoL on all but port A */
   2650 		if (sc->sc_funcid != 0)
   2651 			sc->sc_flags &= ~WM_F_WOL;
   2652 		break;
   2653 	}
   2654 
   2655 	if (sc->sc_type >= WM_T_82575) {
   2656 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2657 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2658 			    nvmword);
   2659 			if ((sc->sc_type == WM_T_82575) ||
   2660 			    (sc->sc_type == WM_T_82576)) {
   2661 				/* Check NVM for autonegotiation */
   2662 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2663 				    != 0)
   2664 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2665 			}
   2666 			if ((sc->sc_type == WM_T_82575) ||
   2667 			    (sc->sc_type == WM_T_I350)) {
   2668 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2669 					sc->sc_flags |= WM_F_MAS;
   2670 			}
   2671 		}
   2672 	}
   2673 
   2674 	/*
   2675 	 * XXX need special handling for some multiple port cards
   2676 	 * to disable a paticular port.
   2677 	 */
   2678 
   2679 	if (sc->sc_type >= WM_T_82544) {
   2680 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2681 		if (pn != NULL) {
   2682 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2683 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2684 		} else {
   2685 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2686 				aprint_error_dev(sc->sc_dev,
   2687 				    "unable to read SWDPIN\n");
   2688 				goto out;
   2689 			}
   2690 		}
   2691 	}
   2692 
   2693 	if (cfg1 & NVM_CFG1_ILOS)
   2694 		sc->sc_ctrl |= CTRL_ILOS;
   2695 
   2696 	/*
   2697 	 * XXX
   2698 	 * This code isn't correct because pin 2 and 3 are located
   2699 	 * in different position on newer chips. Check all datasheet.
   2700 	 *
   2701 	 * Until resolve this problem, check if a chip < 82580
   2702 	 */
   2703 	if (sc->sc_type <= WM_T_82580) {
   2704 		if (sc->sc_type >= WM_T_82544) {
   2705 			sc->sc_ctrl |=
   2706 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2707 			    CTRL_SWDPIO_SHIFT;
   2708 			sc->sc_ctrl |=
   2709 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2710 			    CTRL_SWDPINS_SHIFT;
   2711 		} else {
   2712 			sc->sc_ctrl |=
   2713 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2714 			    CTRL_SWDPIO_SHIFT;
   2715 		}
   2716 	}
   2717 
   2718 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2719 		wm_nvm_read(sc,
   2720 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2721 		    1, &nvmword);
   2722 		if (nvmword & NVM_CFG3_ILOS)
   2723 			sc->sc_ctrl |= CTRL_ILOS;
   2724 	}
   2725 
   2726 #if 0
   2727 	if (sc->sc_type >= WM_T_82544) {
   2728 		if (cfg1 & NVM_CFG1_IPS0)
   2729 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2730 		if (cfg1 & NVM_CFG1_IPS1)
   2731 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2732 		sc->sc_ctrl_ext |=
   2733 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2734 		    CTRL_EXT_SWDPIO_SHIFT;
   2735 		sc->sc_ctrl_ext |=
   2736 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2737 		    CTRL_EXT_SWDPINS_SHIFT;
   2738 	} else {
   2739 		sc->sc_ctrl_ext |=
   2740 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2741 		    CTRL_EXT_SWDPIO_SHIFT;
   2742 	}
   2743 #endif
   2744 
   2745 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2746 #if 0
   2747 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2748 #endif
   2749 
   2750 	if (sc->sc_type == WM_T_PCH) {
   2751 		uint16_t val;
   2752 
   2753 		/* Save the NVM K1 bit setting */
   2754 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2755 
   2756 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2757 			sc->sc_nvm_k1_enabled = 1;
   2758 		else
   2759 			sc->sc_nvm_k1_enabled = 0;
   2760 	}
   2761 
   2762 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2763 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2764 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2765 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2766 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2767 	    || sc->sc_type == WM_T_82573
   2768 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2769 		/* Copper only */
   2770 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2771 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2772 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2773 	    || (sc->sc_type ==WM_T_I211)) {
   2774 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2775 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2776 		switch (link_mode) {
   2777 		case CTRL_EXT_LINK_MODE_1000KX:
   2778 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2779 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2780 			break;
   2781 		case CTRL_EXT_LINK_MODE_SGMII:
   2782 			if (wm_sgmii_uses_mdio(sc)) {
   2783 				aprint_normal_dev(sc->sc_dev,
   2784 				    "SGMII(MDIO)\n");
   2785 				sc->sc_flags |= WM_F_SGMII;
   2786 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2787 				break;
   2788 			}
   2789 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2790 			/*FALLTHROUGH*/
   2791 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2792 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2793 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2794 				if (link_mode
   2795 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2796 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2797 					sc->sc_flags |= WM_F_SGMII;
   2798 					aprint_verbose_dev(sc->sc_dev,
   2799 					    "SGMII\n");
   2800 				} else {
   2801 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2802 					aprint_verbose_dev(sc->sc_dev,
   2803 					    "SERDES\n");
   2804 				}
   2805 				break;
   2806 			}
   2807 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2808 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2809 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2810 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2811 				sc->sc_flags |= WM_F_SGMII;
   2812 			}
   2813 			/* Do not change link mode for 100BaseFX */
   2814 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2815 				break;
   2816 
   2817 			/* Change current link mode setting */
   2818 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2819 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2820 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2821 			else
   2822 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2823 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2824 			break;
   2825 		case CTRL_EXT_LINK_MODE_GMII:
   2826 		default:
   2827 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2828 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2829 			break;
   2830 		}
   2831 
   2832 		reg &= ~CTRL_EXT_I2C_ENA;
   2833 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2834 			reg |= CTRL_EXT_I2C_ENA;
   2835 		else
   2836 			reg &= ~CTRL_EXT_I2C_ENA;
   2837 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2838 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   2839 			wm_gmii_setup_phytype(sc, 0, 0);
   2840 			wm_reset_mdicnfg_82580(sc);
   2841 		}
   2842 	} else if (sc->sc_type < WM_T_82543 ||
   2843 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2844 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2845 			aprint_error_dev(sc->sc_dev,
   2846 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2847 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2848 		}
   2849 	} else {
   2850 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2851 			aprint_error_dev(sc->sc_dev,
   2852 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2853 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2854 		}
   2855 	}
   2856 
   2857 	if (sc->sc_type >= WM_T_PCH2)
   2858 		sc->sc_flags |= WM_F_EEE;
   2859 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2860 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2861 		/* XXX: Need special handling for I354. (not yet) */
   2862 		if (sc->sc_type != WM_T_I354)
   2863 			sc->sc_flags |= WM_F_EEE;
   2864 	}
   2865 
   2866 	/* Set device properties (macflags) */
   2867 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2868 
   2869 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2870 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2871 
   2872 	/* Initialize the media structures accordingly. */
   2873 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2874 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2875 	else
   2876 		wm_tbi_mediainit(sc); /* All others */
   2877 
   2878 	ifp = &sc->sc_ethercom.ec_if;
   2879 	xname = device_xname(sc->sc_dev);
   2880 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2881 	ifp->if_softc = sc;
   2882 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2883 #ifdef WM_MPSAFE
   2884 	ifp->if_extflags = IFEF_MPSAFE;
   2885 #endif
   2886 	ifp->if_ioctl = wm_ioctl;
   2887 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2888 		ifp->if_start = wm_nq_start;
   2889 		/*
   2890 		 * When the number of CPUs is one and the controller can use
   2891 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2892 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2893 		 * and the other is used for link status changing.
   2894 		 * In this situation, wm_nq_transmit() is disadvantageous
   2895 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2896 		 */
   2897 		if (wm_is_using_multiqueue(sc))
   2898 			ifp->if_transmit = wm_nq_transmit;
   2899 	} else {
   2900 		ifp->if_start = wm_start;
   2901 		/*
   2902 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2903 		 */
   2904 		if (wm_is_using_multiqueue(sc))
   2905 			ifp->if_transmit = wm_transmit;
   2906 	}
   2907 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2908 	ifp->if_init = wm_init;
   2909 	ifp->if_stop = wm_stop;
   2910 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2911 	IFQ_SET_READY(&ifp->if_snd);
   2912 
   2913 	/* Check for jumbo frame */
   2914 	switch (sc->sc_type) {
   2915 	case WM_T_82573:
   2916 		/* XXX limited to 9234 if ASPM is disabled */
   2917 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2918 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2919 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2920 		break;
   2921 	case WM_T_82571:
   2922 	case WM_T_82572:
   2923 	case WM_T_82574:
   2924 	case WM_T_82583:
   2925 	case WM_T_82575:
   2926 	case WM_T_82576:
   2927 	case WM_T_82580:
   2928 	case WM_T_I350:
   2929 	case WM_T_I354:
   2930 	case WM_T_I210:
   2931 	case WM_T_I211:
   2932 	case WM_T_80003:
   2933 	case WM_T_ICH9:
   2934 	case WM_T_ICH10:
   2935 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2936 	case WM_T_PCH_LPT:
   2937 	case WM_T_PCH_SPT:
   2938 	case WM_T_PCH_CNP:
   2939 		/* XXX limited to 9234 */
   2940 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2941 		break;
   2942 	case WM_T_PCH:
   2943 		/* XXX limited to 4096 */
   2944 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2945 		break;
   2946 	case WM_T_82542_2_0:
   2947 	case WM_T_82542_2_1:
   2948 	case WM_T_ICH8:
   2949 		/* No support for jumbo frame */
   2950 		break;
   2951 	default:
   2952 		/* ETHER_MAX_LEN_JUMBO */
   2953 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2954 		break;
   2955 	}
   2956 
   2957 	/* If we're a i82543 or greater, we can support VLANs. */
   2958 	if (sc->sc_type >= WM_T_82543) {
   2959 		sc->sc_ethercom.ec_capabilities |=
   2960 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2961 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   2962 	}
   2963 
   2964 	if ((sc->sc_flags & WM_F_EEE) != 0)
   2965 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   2966 
   2967 	/*
   2968 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2969 	 * on i82543 and later.
   2970 	 */
   2971 	if (sc->sc_type >= WM_T_82543) {
   2972 		ifp->if_capabilities |=
   2973 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2974 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2975 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2976 		    IFCAP_CSUM_TCPv6_Tx |
   2977 		    IFCAP_CSUM_UDPv6_Tx;
   2978 	}
   2979 
   2980 	/*
   2981 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2982 	 *
   2983 	 *	82541GI (8086:1076) ... no
   2984 	 *	82572EI (8086:10b9) ... yes
   2985 	 */
   2986 	if (sc->sc_type >= WM_T_82571) {
   2987 		ifp->if_capabilities |=
   2988 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2989 	}
   2990 
   2991 	/*
   2992 	 * If we're a i82544 or greater (except i82547), we can do
   2993 	 * TCP segmentation offload.
   2994 	 */
   2995 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2996 		ifp->if_capabilities |= IFCAP_TSOv4;
   2997 	}
   2998 
   2999 	if (sc->sc_type >= WM_T_82571) {
   3000 		ifp->if_capabilities |= IFCAP_TSOv6;
   3001 	}
   3002 
   3003 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3004 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3005 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3006 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3007 
   3008 #ifdef WM_MPSAFE
   3009 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   3010 #else
   3011 	sc->sc_core_lock = NULL;
   3012 #endif
   3013 
   3014 	/* Attach the interface. */
   3015 	error = if_initialize(ifp);
   3016 	if (error != 0) {
   3017 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   3018 		    error);
   3019 		return; /* Error */
   3020 	}
   3021 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3022 	ether_ifattach(ifp, enaddr);
   3023 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3024 	if_register(ifp);
   3025 
   3026 #ifdef WM_EVENT_COUNTERS
   3027 	/* Attach event counters. */
   3028 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3029 	    NULL, xname, "linkintr");
   3030 
   3031 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3032 	    NULL, xname, "tx_xoff");
   3033 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3034 	    NULL, xname, "tx_xon");
   3035 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3036 	    NULL, xname, "rx_xoff");
   3037 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3038 	    NULL, xname, "rx_xon");
   3039 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3040 	    NULL, xname, "rx_macctl");
   3041 #endif /* WM_EVENT_COUNTERS */
   3042 
   3043 	sc->sc_txrx_use_workqueue = false;
   3044 
   3045 	wm_init_sysctls(sc);
   3046 
   3047 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3048 		pmf_class_network_register(self, ifp);
   3049 	else
   3050 		aprint_error_dev(self, "couldn't establish power handler\n");
   3051 
   3052 	sc->sc_flags |= WM_F_ATTACHED;
   3053 out:
   3054 	return;
   3055 }
   3056 
   3057 /* The detach function (ca_detach) */
   3058 static int
   3059 wm_detach(device_t self, int flags __unused)
   3060 {
   3061 	struct wm_softc *sc = device_private(self);
   3062 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3063 	int i;
   3064 
   3065 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3066 		return 0;
   3067 
   3068 	/* Stop the interface. Callouts are stopped in it. */
   3069 	wm_stop(ifp, 1);
   3070 
   3071 	pmf_device_deregister(self);
   3072 
   3073 	sysctl_teardown(&sc->sc_sysctllog);
   3074 
   3075 #ifdef WM_EVENT_COUNTERS
   3076 	evcnt_detach(&sc->sc_ev_linkintr);
   3077 
   3078 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3079 	evcnt_detach(&sc->sc_ev_tx_xon);
   3080 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3081 	evcnt_detach(&sc->sc_ev_rx_xon);
   3082 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3083 #endif /* WM_EVENT_COUNTERS */
   3084 
   3085 	/* Tell the firmware about the release */
   3086 	WM_CORE_LOCK(sc);
   3087 	wm_release_manageability(sc);
   3088 	wm_release_hw_control(sc);
   3089 	wm_enable_wakeup(sc);
   3090 	WM_CORE_UNLOCK(sc);
   3091 
   3092 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3093 
   3094 	/* Delete all remaining media. */
   3095 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   3096 
   3097 	ether_ifdetach(ifp);
   3098 	if_detach(ifp);
   3099 	if_percpuq_destroy(sc->sc_ipq);
   3100 
   3101 	/* Unload RX dmamaps and free mbufs */
   3102 	for (i = 0; i < sc->sc_nqueues; i++) {
   3103 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3104 		mutex_enter(rxq->rxq_lock);
   3105 		wm_rxdrain(rxq);
   3106 		mutex_exit(rxq->rxq_lock);
   3107 	}
   3108 	/* Must unlock here */
   3109 
   3110 	/* Disestablish the interrupt handler */
   3111 	for (i = 0; i < sc->sc_nintrs; i++) {
   3112 		if (sc->sc_ihs[i] != NULL) {
   3113 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3114 			sc->sc_ihs[i] = NULL;
   3115 		}
   3116 	}
   3117 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3118 
   3119 	for (i = 0; i < sc->sc_nqueues; i++)
   3120 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3121 
   3122 	wm_free_txrx_queues(sc);
   3123 
   3124 	/* Unmap the registers */
   3125 	if (sc->sc_ss) {
   3126 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3127 		sc->sc_ss = 0;
   3128 	}
   3129 	if (sc->sc_ios) {
   3130 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3131 		sc->sc_ios = 0;
   3132 	}
   3133 	if (sc->sc_flashs) {
   3134 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3135 		sc->sc_flashs = 0;
   3136 	}
   3137 
   3138 	if (sc->sc_core_lock)
   3139 		mutex_obj_free(sc->sc_core_lock);
   3140 	if (sc->sc_ich_phymtx)
   3141 		mutex_obj_free(sc->sc_ich_phymtx);
   3142 	if (sc->sc_ich_nvmmtx)
   3143 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3144 
   3145 	return 0;
   3146 }
   3147 
   3148 static bool
   3149 wm_suspend(device_t self, const pmf_qual_t *qual)
   3150 {
   3151 	struct wm_softc *sc = device_private(self);
   3152 
   3153 	wm_release_manageability(sc);
   3154 	wm_release_hw_control(sc);
   3155 	wm_enable_wakeup(sc);
   3156 
   3157 	return true;
   3158 }
   3159 
   3160 static bool
   3161 wm_resume(device_t self, const pmf_qual_t *qual)
   3162 {
   3163 	struct wm_softc *sc = device_private(self);
   3164 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3165 	pcireg_t reg;
   3166 	char buf[256];
   3167 
   3168 	reg = CSR_READ(sc, WMREG_WUS);
   3169 	if (reg != 0) {
   3170 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3171 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3172 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3173 	}
   3174 
   3175 	if (sc->sc_type >= WM_T_PCH2)
   3176 		wm_resume_workarounds_pchlan(sc);
   3177 	if ((ifp->if_flags & IFF_UP) == 0) {
   3178 		wm_reset(sc);
   3179 		/* Non-AMT based hardware can now take control from firmware */
   3180 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3181 			wm_get_hw_control(sc);
   3182 		wm_init_manageability(sc);
   3183 	} else {
   3184 		/*
   3185 		 * We called pmf_class_network_register(), so if_init() is
   3186 		 * automatically called when IFF_UP. wm_reset(),
   3187 		 * wm_get_hw_control() and wm_init_manageability() are called
   3188 		 * via wm_init().
   3189 		 */
   3190 	}
   3191 
   3192 	return true;
   3193 }
   3194 
   3195 /*
   3196  * wm_watchdog:		[ifnet interface function]
   3197  *
   3198  *	Watchdog timer handler.
   3199  */
   3200 static void
   3201 wm_watchdog(struct ifnet *ifp)
   3202 {
   3203 	int qid;
   3204 	struct wm_softc *sc = ifp->if_softc;
   3205 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3206 
   3207 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3208 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3209 
   3210 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3211 	}
   3212 
   3213 	/* IF any of queues hanged up, reset the interface. */
   3214 	if (hang_queue != 0) {
   3215 		(void)wm_init(ifp);
   3216 
   3217 		/*
   3218 		 * There are still some upper layer processing which call
   3219 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3220 		 */
   3221 		/* Try to get more packets going. */
   3222 		ifp->if_start(ifp);
   3223 	}
   3224 }
   3225 
   3226 
   3227 static void
   3228 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3229 {
   3230 
   3231 	mutex_enter(txq->txq_lock);
   3232 	if (txq->txq_sending &&
   3233 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3234 		wm_watchdog_txq_locked(ifp, txq, hang);
   3235 
   3236 	mutex_exit(txq->txq_lock);
   3237 }
   3238 
   3239 static void
   3240 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3241     uint16_t *hang)
   3242 {
   3243 	struct wm_softc *sc = ifp->if_softc;
   3244 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3245 
   3246 	KASSERT(mutex_owned(txq->txq_lock));
   3247 
   3248 	/*
   3249 	 * Since we're using delayed interrupts, sweep up
   3250 	 * before we report an error.
   3251 	 */
   3252 	wm_txeof(txq, UINT_MAX);
   3253 
   3254 	if (txq->txq_sending)
   3255 		*hang |= __BIT(wmq->wmq_id);
   3256 
   3257 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3258 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3259 		    device_xname(sc->sc_dev));
   3260 	} else {
   3261 #ifdef WM_DEBUG
   3262 		int i, j;
   3263 		struct wm_txsoft *txs;
   3264 #endif
   3265 		log(LOG_ERR,
   3266 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3267 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3268 		    txq->txq_next);
   3269 		ifp->if_oerrors++;
   3270 #ifdef WM_DEBUG
   3271 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3272 		    i = WM_NEXTTXS(txq, i)) {
   3273 			txs = &txq->txq_soft[i];
   3274 			printf("txs %d tx %d -> %d\n",
   3275 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3276 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3277 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3278 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3279 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3280 					printf("\t %#08x%08x\n",
   3281 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3282 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3283 				} else {
   3284 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3285 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3286 					    txq->txq_descs[j].wtx_addr.wa_low);
   3287 					printf("\t %#04x%02x%02x%08x\n",
   3288 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3289 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3290 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3291 					    txq->txq_descs[j].wtx_cmdlen);
   3292 				}
   3293 				if (j == txs->txs_lastdesc)
   3294 					break;
   3295 			}
   3296 		}
   3297 #endif
   3298 	}
   3299 }
   3300 
   3301 /*
   3302  * wm_tick:
   3303  *
   3304  *	One second timer, used to check link status, sweep up
   3305  *	completed transmit jobs, etc.
   3306  */
   3307 static void
   3308 wm_tick(void *arg)
   3309 {
   3310 	struct wm_softc *sc = arg;
   3311 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3312 #ifndef WM_MPSAFE
   3313 	int s = splnet();
   3314 #endif
   3315 
   3316 	WM_CORE_LOCK(sc);
   3317 
   3318 	if (sc->sc_core_stopping) {
   3319 		WM_CORE_UNLOCK(sc);
   3320 #ifndef WM_MPSAFE
   3321 		splx(s);
   3322 #endif
   3323 		return;
   3324 	}
   3325 
   3326 	if (sc->sc_type >= WM_T_82542_2_1) {
   3327 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3328 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3329 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3330 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3331 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3332 	}
   3333 
   3334 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3335 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3336 	    + CSR_READ(sc, WMREG_CRCERRS)
   3337 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3338 	    + CSR_READ(sc, WMREG_SYMERRC)
   3339 	    + CSR_READ(sc, WMREG_RXERRC)
   3340 	    + CSR_READ(sc, WMREG_SEC)
   3341 	    + CSR_READ(sc, WMREG_CEXTERR)
   3342 	    + CSR_READ(sc, WMREG_RLEC);
   3343 	/*
   3344 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3345 	 * memory. It does not mean the number of dropped packet. Because
   3346 	 * ethernet controller can receive packets in such case if there is
   3347 	 * space in phy's FIFO.
   3348 	 *
   3349 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3350 	 * own EVCNT instead of if_iqdrops.
   3351 	 */
   3352 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3353 
   3354 	if (sc->sc_flags & WM_F_HAS_MII)
   3355 		mii_tick(&sc->sc_mii);
   3356 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3357 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3358 		wm_serdes_tick(sc);
   3359 	else
   3360 		wm_tbi_tick(sc);
   3361 
   3362 	WM_CORE_UNLOCK(sc);
   3363 
   3364 	wm_watchdog(ifp);
   3365 
   3366 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3367 }
   3368 
   3369 static int
   3370 wm_ifflags_cb(struct ethercom *ec)
   3371 {
   3372 	struct ifnet *ifp = &ec->ec_if;
   3373 	struct wm_softc *sc = ifp->if_softc;
   3374 	u_short iffchange;
   3375 	int ecchange;
   3376 	bool needreset = false;
   3377 	int rc = 0;
   3378 
   3379 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3380 		device_xname(sc->sc_dev), __func__));
   3381 
   3382 	WM_CORE_LOCK(sc);
   3383 
   3384 	/*
   3385 	 * Check for if_flags.
   3386 	 * Main usage is to prevent linkdown when opening bpf.
   3387 	 */
   3388 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3389 	sc->sc_if_flags = ifp->if_flags;
   3390 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3391 		needreset = true;
   3392 		goto ec;
   3393 	}
   3394 
   3395 	/* iff related updates */
   3396 	if ((iffchange & IFF_PROMISC) != 0)
   3397 		wm_set_filter(sc);
   3398 
   3399 	wm_set_vlan(sc);
   3400 
   3401 ec:
   3402 	/* Check for ec_capenable. */
   3403 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3404 	sc->sc_ec_capenable = ec->ec_capenable;
   3405 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3406 		needreset = true;
   3407 		goto out;
   3408 	}
   3409 
   3410 	/* ec related updates */
   3411 	wm_set_eee(sc);
   3412 
   3413 out:
   3414 	if (needreset)
   3415 		rc = ENETRESET;
   3416 	WM_CORE_UNLOCK(sc);
   3417 
   3418 	return rc;
   3419 }
   3420 
   3421 /*
   3422  * wm_ioctl:		[ifnet interface function]
   3423  *
   3424  *	Handle control requests from the operator.
   3425  */
   3426 static int
   3427 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3428 {
   3429 	struct wm_softc *sc = ifp->if_softc;
   3430 	struct ifreq *ifr = (struct ifreq *)data;
   3431 	struct ifaddr *ifa = (struct ifaddr *)data;
   3432 	struct sockaddr_dl *sdl;
   3433 	int s, error;
   3434 
   3435 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3436 		device_xname(sc->sc_dev), __func__));
   3437 
   3438 #ifndef WM_MPSAFE
   3439 	s = splnet();
   3440 #endif
   3441 	switch (cmd) {
   3442 	case SIOCSIFMEDIA:
   3443 		WM_CORE_LOCK(sc);
   3444 		/* Flow control requires full-duplex mode. */
   3445 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3446 		    (ifr->ifr_media & IFM_FDX) == 0)
   3447 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3448 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3449 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3450 				/* We can do both TXPAUSE and RXPAUSE. */
   3451 				ifr->ifr_media |=
   3452 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3453 			}
   3454 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3455 		}
   3456 		WM_CORE_UNLOCK(sc);
   3457 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3458 		break;
   3459 	case SIOCINITIFADDR:
   3460 		WM_CORE_LOCK(sc);
   3461 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3462 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3463 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3464 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3465 			/* Unicast address is the first multicast entry */
   3466 			wm_set_filter(sc);
   3467 			error = 0;
   3468 			WM_CORE_UNLOCK(sc);
   3469 			break;
   3470 		}
   3471 		WM_CORE_UNLOCK(sc);
   3472 		/*FALLTHROUGH*/
   3473 	default:
   3474 #ifdef WM_MPSAFE
   3475 		s = splnet();
   3476 #endif
   3477 		/* It may call wm_start, so unlock here */
   3478 		error = ether_ioctl(ifp, cmd, data);
   3479 #ifdef WM_MPSAFE
   3480 		splx(s);
   3481 #endif
   3482 		if (error != ENETRESET)
   3483 			break;
   3484 
   3485 		error = 0;
   3486 
   3487 		if (cmd == SIOCSIFCAP)
   3488 			error = (*ifp->if_init)(ifp);
   3489 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3490 			;
   3491 		else if (ifp->if_flags & IFF_RUNNING) {
   3492 			/*
   3493 			 * Multicast list has changed; set the hardware filter
   3494 			 * accordingly.
   3495 			 */
   3496 			WM_CORE_LOCK(sc);
   3497 			wm_set_filter(sc);
   3498 			WM_CORE_UNLOCK(sc);
   3499 		}
   3500 		break;
   3501 	}
   3502 
   3503 #ifndef WM_MPSAFE
   3504 	splx(s);
   3505 #endif
   3506 	return error;
   3507 }
   3508 
   3509 /* MAC address related */
   3510 
   3511 /*
   3512  * Get the offset of MAC address and return it.
   3513  * If error occured, use offset 0.
   3514  */
   3515 static uint16_t
   3516 wm_check_alt_mac_addr(struct wm_softc *sc)
   3517 {
   3518 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3519 	uint16_t offset = NVM_OFF_MACADDR;
   3520 
   3521 	/* Try to read alternative MAC address pointer */
   3522 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3523 		return 0;
   3524 
   3525 	/* Check pointer if it's valid or not. */
   3526 	if ((offset == 0x0000) || (offset == 0xffff))
   3527 		return 0;
   3528 
   3529 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3530 	/*
   3531 	 * Check whether alternative MAC address is valid or not.
   3532 	 * Some cards have non 0xffff pointer but those don't use
   3533 	 * alternative MAC address in reality.
   3534 	 *
   3535 	 * Check whether the broadcast bit is set or not.
   3536 	 */
   3537 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3538 		if (((myea[0] & 0xff) & 0x01) == 0)
   3539 			return offset; /* Found */
   3540 
   3541 	/* Not found */
   3542 	return 0;
   3543 }
   3544 
   3545 static int
   3546 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3547 {
   3548 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3549 	uint16_t offset = NVM_OFF_MACADDR;
   3550 	int do_invert = 0;
   3551 
   3552 	switch (sc->sc_type) {
   3553 	case WM_T_82580:
   3554 	case WM_T_I350:
   3555 	case WM_T_I354:
   3556 		/* EEPROM Top Level Partitioning */
   3557 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3558 		break;
   3559 	case WM_T_82571:
   3560 	case WM_T_82575:
   3561 	case WM_T_82576:
   3562 	case WM_T_80003:
   3563 	case WM_T_I210:
   3564 	case WM_T_I211:
   3565 		offset = wm_check_alt_mac_addr(sc);
   3566 		if (offset == 0)
   3567 			if ((sc->sc_funcid & 0x01) == 1)
   3568 				do_invert = 1;
   3569 		break;
   3570 	default:
   3571 		if ((sc->sc_funcid & 0x01) == 1)
   3572 			do_invert = 1;
   3573 		break;
   3574 	}
   3575 
   3576 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3577 		goto bad;
   3578 
   3579 	enaddr[0] = myea[0] & 0xff;
   3580 	enaddr[1] = myea[0] >> 8;
   3581 	enaddr[2] = myea[1] & 0xff;
   3582 	enaddr[3] = myea[1] >> 8;
   3583 	enaddr[4] = myea[2] & 0xff;
   3584 	enaddr[5] = myea[2] >> 8;
   3585 
   3586 	/*
   3587 	 * Toggle the LSB of the MAC address on the second port
   3588 	 * of some dual port cards.
   3589 	 */
   3590 	if (do_invert != 0)
   3591 		enaddr[5] ^= 1;
   3592 
   3593 	return 0;
   3594 
   3595  bad:
   3596 	return -1;
   3597 }
   3598 
   3599 /*
   3600  * wm_set_ral:
   3601  *
   3602  *	Set an entery in the receive address list.
   3603  */
   3604 static void
   3605 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3606 {
   3607 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3608 	uint32_t wlock_mac;
   3609 	int rv;
   3610 
   3611 	if (enaddr != NULL) {
   3612 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3613 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3614 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3615 		ral_hi |= RAL_AV;
   3616 	} else {
   3617 		ral_lo = 0;
   3618 		ral_hi = 0;
   3619 	}
   3620 
   3621 	switch (sc->sc_type) {
   3622 	case WM_T_82542_2_0:
   3623 	case WM_T_82542_2_1:
   3624 	case WM_T_82543:
   3625 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3626 		CSR_WRITE_FLUSH(sc);
   3627 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3628 		CSR_WRITE_FLUSH(sc);
   3629 		break;
   3630 	case WM_T_PCH2:
   3631 	case WM_T_PCH_LPT:
   3632 	case WM_T_PCH_SPT:
   3633 	case WM_T_PCH_CNP:
   3634 		if (idx == 0) {
   3635 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3636 			CSR_WRITE_FLUSH(sc);
   3637 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3638 			CSR_WRITE_FLUSH(sc);
   3639 			return;
   3640 		}
   3641 		if (sc->sc_type != WM_T_PCH2) {
   3642 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3643 			    FWSM_WLOCK_MAC);
   3644 			addrl = WMREG_SHRAL(idx - 1);
   3645 			addrh = WMREG_SHRAH(idx - 1);
   3646 		} else {
   3647 			wlock_mac = 0;
   3648 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3649 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3650 		}
   3651 
   3652 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3653 			rv = wm_get_swflag_ich8lan(sc);
   3654 			if (rv != 0)
   3655 				return;
   3656 			CSR_WRITE(sc, addrl, ral_lo);
   3657 			CSR_WRITE_FLUSH(sc);
   3658 			CSR_WRITE(sc, addrh, ral_hi);
   3659 			CSR_WRITE_FLUSH(sc);
   3660 			wm_put_swflag_ich8lan(sc);
   3661 		}
   3662 
   3663 		break;
   3664 	default:
   3665 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3666 		CSR_WRITE_FLUSH(sc);
   3667 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3668 		CSR_WRITE_FLUSH(sc);
   3669 		break;
   3670 	}
   3671 }
   3672 
   3673 /*
   3674  * wm_mchash:
   3675  *
   3676  *	Compute the hash of the multicast address for the 4096-bit
   3677  *	multicast filter.
   3678  */
   3679 static uint32_t
   3680 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3681 {
   3682 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3683 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3684 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3685 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3686 	uint32_t hash;
   3687 
   3688 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3689 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3690 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3691 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3692 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3693 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3694 		return (hash & 0x3ff);
   3695 	}
   3696 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3697 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3698 
   3699 	return (hash & 0xfff);
   3700 }
   3701 
   3702 /*
   3703  *
   3704  *
   3705  */
   3706 static int
   3707 wm_rar_count(struct wm_softc *sc)
   3708 {
   3709 	int size;
   3710 
   3711 	switch (sc->sc_type) {
   3712 	case WM_T_ICH8:
   3713 		size = WM_RAL_TABSIZE_ICH8 -1;
   3714 		break;
   3715 	case WM_T_ICH9:
   3716 	case WM_T_ICH10:
   3717 	case WM_T_PCH:
   3718 		size = WM_RAL_TABSIZE_ICH8;
   3719 		break;
   3720 	case WM_T_PCH2:
   3721 		size = WM_RAL_TABSIZE_PCH2;
   3722 		break;
   3723 	case WM_T_PCH_LPT:
   3724 	case WM_T_PCH_SPT:
   3725 	case WM_T_PCH_CNP:
   3726 		size = WM_RAL_TABSIZE_PCH_LPT;
   3727 		break;
   3728 	case WM_T_82575:
   3729 	case WM_T_I210:
   3730 	case WM_T_I211:
   3731 		size = WM_RAL_TABSIZE_82575;
   3732 		break;
   3733 	case WM_T_82576:
   3734 	case WM_T_82580:
   3735 		size = WM_RAL_TABSIZE_82576;
   3736 		break;
   3737 	case WM_T_I350:
   3738 	case WM_T_I354:
   3739 		size = WM_RAL_TABSIZE_I350;
   3740 		break;
   3741 	default:
   3742 		size = WM_RAL_TABSIZE;
   3743 	}
   3744 
   3745 	return size;
   3746 }
   3747 
   3748 /*
   3749  * wm_set_filter:
   3750  *
   3751  *	Set up the receive filter.
   3752  */
   3753 static void
   3754 wm_set_filter(struct wm_softc *sc)
   3755 {
   3756 	struct ethercom *ec = &sc->sc_ethercom;
   3757 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3758 	struct ether_multi *enm;
   3759 	struct ether_multistep step;
   3760 	bus_addr_t mta_reg;
   3761 	uint32_t hash, reg, bit;
   3762 	int i, size, ralmax;
   3763 
   3764 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3765 		device_xname(sc->sc_dev), __func__));
   3766 
   3767 	if (sc->sc_type >= WM_T_82544)
   3768 		mta_reg = WMREG_CORDOVA_MTA;
   3769 	else
   3770 		mta_reg = WMREG_MTA;
   3771 
   3772 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3773 
   3774 	if (ifp->if_flags & IFF_BROADCAST)
   3775 		sc->sc_rctl |= RCTL_BAM;
   3776 	if (ifp->if_flags & IFF_PROMISC) {
   3777 		sc->sc_rctl |= RCTL_UPE;
   3778 		ETHER_LOCK(ec);
   3779 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3780 		ETHER_UNLOCK(ec);
   3781 		goto allmulti;
   3782 	}
   3783 
   3784 	/*
   3785 	 * Set the station address in the first RAL slot, and
   3786 	 * clear the remaining slots.
   3787 	 */
   3788 	size = wm_rar_count(sc);
   3789 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3790 
   3791 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3792 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3793 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3794 		switch (i) {
   3795 		case 0:
   3796 			/* We can use all entries */
   3797 			ralmax = size;
   3798 			break;
   3799 		case 1:
   3800 			/* Only RAR[0] */
   3801 			ralmax = 1;
   3802 			break;
   3803 		default:
   3804 			/* Available SHRA + RAR[0] */
   3805 			ralmax = i + 1;
   3806 		}
   3807 	} else
   3808 		ralmax = size;
   3809 	for (i = 1; i < size; i++) {
   3810 		if (i < ralmax)
   3811 			wm_set_ral(sc, NULL, i);
   3812 	}
   3813 
   3814 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3815 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3816 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3817 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3818 		size = WM_ICH8_MC_TABSIZE;
   3819 	else
   3820 		size = WM_MC_TABSIZE;
   3821 	/* Clear out the multicast table. */
   3822 	for (i = 0; i < size; i++) {
   3823 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3824 		CSR_WRITE_FLUSH(sc);
   3825 	}
   3826 
   3827 	ETHER_LOCK(ec);
   3828 	ETHER_FIRST_MULTI(step, ec, enm);
   3829 	while (enm != NULL) {
   3830 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3831 			ec->ec_flags |= ETHER_F_ALLMULTI;
   3832 			ETHER_UNLOCK(ec);
   3833 			/*
   3834 			 * We must listen to a range of multicast addresses.
   3835 			 * For now, just accept all multicasts, rather than
   3836 			 * trying to set only those filter bits needed to match
   3837 			 * the range.  (At this time, the only use of address
   3838 			 * ranges is for IP multicast routing, for which the
   3839 			 * range is big enough to require all bits set.)
   3840 			 */
   3841 			goto allmulti;
   3842 		}
   3843 
   3844 		hash = wm_mchash(sc, enm->enm_addrlo);
   3845 
   3846 		reg = (hash >> 5);
   3847 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3848 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3849 		    || (sc->sc_type == WM_T_PCH2)
   3850 		    || (sc->sc_type == WM_T_PCH_LPT)
   3851 		    || (sc->sc_type == WM_T_PCH_SPT)
   3852 		    || (sc->sc_type == WM_T_PCH_CNP))
   3853 			reg &= 0x1f;
   3854 		else
   3855 			reg &= 0x7f;
   3856 		bit = hash & 0x1f;
   3857 
   3858 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3859 		hash |= 1U << bit;
   3860 
   3861 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3862 			/*
   3863 			 * 82544 Errata 9: Certain register cannot be written
   3864 			 * with particular alignments in PCI-X bus operation
   3865 			 * (FCAH, MTA and VFTA).
   3866 			 */
   3867 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3868 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3869 			CSR_WRITE_FLUSH(sc);
   3870 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3871 			CSR_WRITE_FLUSH(sc);
   3872 		} else {
   3873 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3874 			CSR_WRITE_FLUSH(sc);
   3875 		}
   3876 
   3877 		ETHER_NEXT_MULTI(step, enm);
   3878 	}
   3879 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   3880 	ETHER_UNLOCK(ec);
   3881 
   3882 	goto setit;
   3883 
   3884  allmulti:
   3885 	sc->sc_rctl |= RCTL_MPE;
   3886 
   3887  setit:
   3888 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3889 }
   3890 
   3891 /* Reset and init related */
   3892 
   3893 static void
   3894 wm_set_vlan(struct wm_softc *sc)
   3895 {
   3896 
   3897 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3898 		device_xname(sc->sc_dev), __func__));
   3899 
   3900 	/* Deal with VLAN enables. */
   3901 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3902 		sc->sc_ctrl |= CTRL_VME;
   3903 	else
   3904 		sc->sc_ctrl &= ~CTRL_VME;
   3905 
   3906 	/* Write the control registers. */
   3907 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3908 }
   3909 
   3910 static void
   3911 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3912 {
   3913 	uint32_t gcr;
   3914 	pcireg_t ctrl2;
   3915 
   3916 	gcr = CSR_READ(sc, WMREG_GCR);
   3917 
   3918 	/* Only take action if timeout value is defaulted to 0 */
   3919 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3920 		goto out;
   3921 
   3922 	if ((gcr & GCR_CAP_VER2) == 0) {
   3923 		gcr |= GCR_CMPL_TMOUT_10MS;
   3924 		goto out;
   3925 	}
   3926 
   3927 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3928 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3929 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3930 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3931 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3932 
   3933 out:
   3934 	/* Disable completion timeout resend */
   3935 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3936 
   3937 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3938 }
   3939 
   3940 void
   3941 wm_get_auto_rd_done(struct wm_softc *sc)
   3942 {
   3943 	int i;
   3944 
   3945 	/* wait for eeprom to reload */
   3946 	switch (sc->sc_type) {
   3947 	case WM_T_82571:
   3948 	case WM_T_82572:
   3949 	case WM_T_82573:
   3950 	case WM_T_82574:
   3951 	case WM_T_82583:
   3952 	case WM_T_82575:
   3953 	case WM_T_82576:
   3954 	case WM_T_82580:
   3955 	case WM_T_I350:
   3956 	case WM_T_I354:
   3957 	case WM_T_I210:
   3958 	case WM_T_I211:
   3959 	case WM_T_80003:
   3960 	case WM_T_ICH8:
   3961 	case WM_T_ICH9:
   3962 		for (i = 0; i < 10; i++) {
   3963 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3964 				break;
   3965 			delay(1000);
   3966 		}
   3967 		if (i == 10) {
   3968 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3969 			    "complete\n", device_xname(sc->sc_dev));
   3970 		}
   3971 		break;
   3972 	default:
   3973 		break;
   3974 	}
   3975 }
   3976 
   3977 void
   3978 wm_lan_init_done(struct wm_softc *sc)
   3979 {
   3980 	uint32_t reg = 0;
   3981 	int i;
   3982 
   3983 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3984 		device_xname(sc->sc_dev), __func__));
   3985 
   3986 	/* Wait for eeprom to reload */
   3987 	switch (sc->sc_type) {
   3988 	case WM_T_ICH10:
   3989 	case WM_T_PCH:
   3990 	case WM_T_PCH2:
   3991 	case WM_T_PCH_LPT:
   3992 	case WM_T_PCH_SPT:
   3993 	case WM_T_PCH_CNP:
   3994 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3995 			reg = CSR_READ(sc, WMREG_STATUS);
   3996 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3997 				break;
   3998 			delay(100);
   3999 		}
   4000 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4001 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4002 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4003 		}
   4004 		break;
   4005 	default:
   4006 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4007 		    __func__);
   4008 		break;
   4009 	}
   4010 
   4011 	reg &= ~STATUS_LAN_INIT_DONE;
   4012 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4013 }
   4014 
   4015 void
   4016 wm_get_cfg_done(struct wm_softc *sc)
   4017 {
   4018 	int mask;
   4019 	uint32_t reg;
   4020 	int i;
   4021 
   4022 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4023 		device_xname(sc->sc_dev), __func__));
   4024 
   4025 	/* Wait for eeprom to reload */
   4026 	switch (sc->sc_type) {
   4027 	case WM_T_82542_2_0:
   4028 	case WM_T_82542_2_1:
   4029 		/* null */
   4030 		break;
   4031 	case WM_T_82543:
   4032 	case WM_T_82544:
   4033 	case WM_T_82540:
   4034 	case WM_T_82545:
   4035 	case WM_T_82545_3:
   4036 	case WM_T_82546:
   4037 	case WM_T_82546_3:
   4038 	case WM_T_82541:
   4039 	case WM_T_82541_2:
   4040 	case WM_T_82547:
   4041 	case WM_T_82547_2:
   4042 	case WM_T_82573:
   4043 	case WM_T_82574:
   4044 	case WM_T_82583:
   4045 		/* generic */
   4046 		delay(10*1000);
   4047 		break;
   4048 	case WM_T_80003:
   4049 	case WM_T_82571:
   4050 	case WM_T_82572:
   4051 	case WM_T_82575:
   4052 	case WM_T_82576:
   4053 	case WM_T_82580:
   4054 	case WM_T_I350:
   4055 	case WM_T_I354:
   4056 	case WM_T_I210:
   4057 	case WM_T_I211:
   4058 		if (sc->sc_type == WM_T_82571) {
   4059 			/* Only 82571 shares port 0 */
   4060 			mask = EEMNGCTL_CFGDONE_0;
   4061 		} else
   4062 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4063 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4064 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4065 				break;
   4066 			delay(1000);
   4067 		}
   4068 		if (i >= WM_PHY_CFG_TIMEOUT)
   4069 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   4070 				device_xname(sc->sc_dev), __func__));
   4071 		break;
   4072 	case WM_T_ICH8:
   4073 	case WM_T_ICH9:
   4074 	case WM_T_ICH10:
   4075 	case WM_T_PCH:
   4076 	case WM_T_PCH2:
   4077 	case WM_T_PCH_LPT:
   4078 	case WM_T_PCH_SPT:
   4079 	case WM_T_PCH_CNP:
   4080 		delay(10*1000);
   4081 		if (sc->sc_type >= WM_T_ICH10)
   4082 			wm_lan_init_done(sc);
   4083 		else
   4084 			wm_get_auto_rd_done(sc);
   4085 
   4086 		/* Clear PHY Reset Asserted bit */
   4087 		reg = CSR_READ(sc, WMREG_STATUS);
   4088 		if ((reg & STATUS_PHYRA) != 0)
   4089 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4090 		break;
   4091 	default:
   4092 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4093 		    __func__);
   4094 		break;
   4095 	}
   4096 }
   4097 
   4098 int
   4099 wm_phy_post_reset(struct wm_softc *sc)
   4100 {
   4101 	device_t dev = sc->sc_dev;
   4102 	uint16_t reg;
   4103 	int rv = 0;
   4104 
   4105 	/* This function is only for ICH8 and newer. */
   4106 	if (sc->sc_type < WM_T_ICH8)
   4107 		return 0;
   4108 
   4109 	if (wm_phy_resetisblocked(sc)) {
   4110 		/* XXX */
   4111 		device_printf(dev, "PHY is blocked\n");
   4112 		return -1;
   4113 	}
   4114 
   4115 	/* Allow time for h/w to get to quiescent state after reset */
   4116 	delay(10*1000);
   4117 
   4118 	/* Perform any necessary post-reset workarounds */
   4119 	if (sc->sc_type == WM_T_PCH)
   4120 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4121 	else if (sc->sc_type == WM_T_PCH2)
   4122 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4123 	if (rv != 0)
   4124 		return rv;
   4125 
   4126 	/* Clear the host wakeup bit after lcd reset */
   4127 	if (sc->sc_type >= WM_T_PCH) {
   4128 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4129 		reg &= ~BM_WUC_HOST_WU_BIT;
   4130 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4131 	}
   4132 
   4133 	/* Configure the LCD with the extended configuration region in NVM */
   4134 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4135 		return rv;
   4136 
   4137 	/* Configure the LCD with the OEM bits in NVM */
   4138 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4139 
   4140 	if (sc->sc_type == WM_T_PCH2) {
   4141 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4142 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4143 			delay(10 * 1000);
   4144 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4145 		}
   4146 		/* Set EEE LPI Update Timer to 200usec */
   4147 		rv = sc->phy.acquire(sc);
   4148 		if (rv)
   4149 			return rv;
   4150 		rv = wm_write_emi_reg_locked(dev,
   4151 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4152 		sc->phy.release(sc);
   4153 	}
   4154 
   4155 	return rv;
   4156 }
   4157 
   4158 /* Only for PCH and newer */
   4159 static int
   4160 wm_write_smbus_addr(struct wm_softc *sc)
   4161 {
   4162 	uint32_t strap, freq;
   4163 	uint16_t phy_data;
   4164 	int rv;
   4165 
   4166 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4167 		device_xname(sc->sc_dev), __func__));
   4168 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4169 
   4170 	strap = CSR_READ(sc, WMREG_STRAP);
   4171 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4172 
   4173 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4174 	if (rv != 0)
   4175 		return -1;
   4176 
   4177 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4178 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4179 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4180 
   4181 	if (sc->sc_phytype == WMPHY_I217) {
   4182 		/* Restore SMBus frequency */
   4183 		if (freq --) {
   4184 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4185 			    | HV_SMB_ADDR_FREQ_HIGH);
   4186 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4187 			    HV_SMB_ADDR_FREQ_LOW);
   4188 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4189 			    HV_SMB_ADDR_FREQ_HIGH);
   4190 		} else
   4191 			DPRINTF(WM_DEBUG_INIT,
   4192 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4193 				device_xname(sc->sc_dev), __func__));
   4194 	}
   4195 
   4196 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4197 	    phy_data);
   4198 }
   4199 
   4200 static int
   4201 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4202 {
   4203 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4204 	uint16_t phy_page = 0;
   4205 	int rv = 0;
   4206 
   4207 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4208 		device_xname(sc->sc_dev), __func__));
   4209 
   4210 	switch (sc->sc_type) {
   4211 	case WM_T_ICH8:
   4212 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4213 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4214 			return 0;
   4215 
   4216 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4217 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4218 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4219 			break;
   4220 		}
   4221 		/* FALLTHROUGH */
   4222 	case WM_T_PCH:
   4223 	case WM_T_PCH2:
   4224 	case WM_T_PCH_LPT:
   4225 	case WM_T_PCH_SPT:
   4226 	case WM_T_PCH_CNP:
   4227 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4228 		break;
   4229 	default:
   4230 		return 0;
   4231 	}
   4232 
   4233 	if ((rv = sc->phy.acquire(sc)) != 0)
   4234 		return rv;
   4235 
   4236 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4237 	if ((reg & sw_cfg_mask) == 0)
   4238 		goto release;
   4239 
   4240 	/*
   4241 	 * Make sure HW does not configure LCD from PHY extended configuration
   4242 	 * before SW configuration
   4243 	 */
   4244 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4245 	if ((sc->sc_type < WM_T_PCH2)
   4246 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4247 		goto release;
   4248 
   4249 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4250 		device_xname(sc->sc_dev), __func__));
   4251 	/* word_addr is in DWORD */
   4252 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4253 
   4254 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4255 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4256 	if (cnf_size == 0)
   4257 		goto release;
   4258 
   4259 	if (((sc->sc_type == WM_T_PCH)
   4260 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4261 	    || (sc->sc_type > WM_T_PCH)) {
   4262 		/*
   4263 		 * HW configures the SMBus address and LEDs when the OEM and
   4264 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4265 		 * are cleared, SW will configure them instead.
   4266 		 */
   4267 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4268 			device_xname(sc->sc_dev), __func__));
   4269 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4270 			goto release;
   4271 
   4272 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4273 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4274 		    (uint16_t)reg);
   4275 		if (rv != 0)
   4276 			goto release;
   4277 	}
   4278 
   4279 	/* Configure LCD from extended configuration region. */
   4280 	for (i = 0; i < cnf_size; i++) {
   4281 		uint16_t reg_data, reg_addr;
   4282 
   4283 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4284 			goto release;
   4285 
   4286 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4287 			goto release;
   4288 
   4289 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4290 			phy_page = reg_data;
   4291 
   4292 		reg_addr &= IGPHY_MAXREGADDR;
   4293 		reg_addr |= phy_page;
   4294 
   4295 		KASSERT(sc->phy.writereg_locked != NULL);
   4296 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4297 		    reg_data);
   4298 	}
   4299 
   4300 release:
   4301 	sc->phy.release(sc);
   4302 	return rv;
   4303 }
   4304 
   4305 /*
   4306  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4307  *  @sc:       pointer to the HW structure
   4308  *  @d0_state: boolean if entering d0 or d3 device state
   4309  *
   4310  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4311  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4312  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4313  */
   4314 int
   4315 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4316 {
   4317 	uint32_t mac_reg;
   4318 	uint16_t oem_reg;
   4319 	int rv;
   4320 
   4321 	if (sc->sc_type < WM_T_PCH)
   4322 		return 0;
   4323 
   4324 	rv = sc->phy.acquire(sc);
   4325 	if (rv != 0)
   4326 		return rv;
   4327 
   4328 	if (sc->sc_type == WM_T_PCH) {
   4329 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4330 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4331 			goto release;
   4332 	}
   4333 
   4334 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4335 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4336 		goto release;
   4337 
   4338 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4339 
   4340 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4341 	if (rv != 0)
   4342 		goto release;
   4343 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4344 
   4345 	if (d0_state) {
   4346 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4347 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4348 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4349 			oem_reg |= HV_OEM_BITS_LPLU;
   4350 	} else {
   4351 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4352 		    != 0)
   4353 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4354 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4355 		    != 0)
   4356 			oem_reg |= HV_OEM_BITS_LPLU;
   4357 	}
   4358 
   4359 	/* Set Restart auto-neg to activate the bits */
   4360 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4361 	    && (wm_phy_resetisblocked(sc) == false))
   4362 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4363 
   4364 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4365 
   4366 release:
   4367 	sc->phy.release(sc);
   4368 
   4369 	return rv;
   4370 }
   4371 
   4372 /* Init hardware bits */
   4373 void
   4374 wm_initialize_hardware_bits(struct wm_softc *sc)
   4375 {
   4376 	uint32_t tarc0, tarc1, reg;
   4377 
   4378 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4379 		device_xname(sc->sc_dev), __func__));
   4380 
   4381 	/* For 82571 variant, 80003 and ICHs */
   4382 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4383 	    || (sc->sc_type >= WM_T_80003)) {
   4384 
   4385 		/* Transmit Descriptor Control 0 */
   4386 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4387 		reg |= TXDCTL_COUNT_DESC;
   4388 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4389 
   4390 		/* Transmit Descriptor Control 1 */
   4391 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4392 		reg |= TXDCTL_COUNT_DESC;
   4393 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4394 
   4395 		/* TARC0 */
   4396 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4397 		switch (sc->sc_type) {
   4398 		case WM_T_82571:
   4399 		case WM_T_82572:
   4400 		case WM_T_82573:
   4401 		case WM_T_82574:
   4402 		case WM_T_82583:
   4403 		case WM_T_80003:
   4404 			/* Clear bits 30..27 */
   4405 			tarc0 &= ~__BITS(30, 27);
   4406 			break;
   4407 		default:
   4408 			break;
   4409 		}
   4410 
   4411 		switch (sc->sc_type) {
   4412 		case WM_T_82571:
   4413 		case WM_T_82572:
   4414 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4415 
   4416 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4417 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4418 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4419 			/* 8257[12] Errata No.7 */
   4420 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4421 
   4422 			/* TARC1 bit 28 */
   4423 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4424 				tarc1 &= ~__BIT(28);
   4425 			else
   4426 				tarc1 |= __BIT(28);
   4427 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4428 
   4429 			/*
   4430 			 * 8257[12] Errata No.13
   4431 			 * Disable Dyamic Clock Gating.
   4432 			 */
   4433 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4434 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4435 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4436 			break;
   4437 		case WM_T_82573:
   4438 		case WM_T_82574:
   4439 		case WM_T_82583:
   4440 			if ((sc->sc_type == WM_T_82574)
   4441 			    || (sc->sc_type == WM_T_82583))
   4442 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4443 
   4444 			/* Extended Device Control */
   4445 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4446 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4447 			reg |= __BIT(22);	/* Set bit 22 */
   4448 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4449 
   4450 			/* Device Control */
   4451 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4452 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4453 
   4454 			/* PCIe Control Register */
   4455 			/*
   4456 			 * 82573 Errata (unknown).
   4457 			 *
   4458 			 * 82574 Errata 25 and 82583 Errata 12
   4459 			 * "Dropped Rx Packets":
   4460 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4461 			 */
   4462 			reg = CSR_READ(sc, WMREG_GCR);
   4463 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4464 			CSR_WRITE(sc, WMREG_GCR, reg);
   4465 
   4466 			if ((sc->sc_type == WM_T_82574)
   4467 			    || (sc->sc_type == WM_T_82583)) {
   4468 				/*
   4469 				 * Document says this bit must be set for
   4470 				 * proper operation.
   4471 				 */
   4472 				reg = CSR_READ(sc, WMREG_GCR);
   4473 				reg |= __BIT(22);
   4474 				CSR_WRITE(sc, WMREG_GCR, reg);
   4475 
   4476 				/*
   4477 				 * Apply workaround for hardware errata
   4478 				 * documented in errata docs Fixes issue where
   4479 				 * some error prone or unreliable PCIe
   4480 				 * completions are occurring, particularly
   4481 				 * with ASPM enabled. Without fix, issue can
   4482 				 * cause Tx timeouts.
   4483 				 */
   4484 				reg = CSR_READ(sc, WMREG_GCR2);
   4485 				reg |= __BIT(0);
   4486 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4487 			}
   4488 			break;
   4489 		case WM_T_80003:
   4490 			/* TARC0 */
   4491 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4492 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4493 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4494 
   4495 			/* TARC1 bit 28 */
   4496 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4497 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4498 				tarc1 &= ~__BIT(28);
   4499 			else
   4500 				tarc1 |= __BIT(28);
   4501 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4502 			break;
   4503 		case WM_T_ICH8:
   4504 		case WM_T_ICH9:
   4505 		case WM_T_ICH10:
   4506 		case WM_T_PCH:
   4507 		case WM_T_PCH2:
   4508 		case WM_T_PCH_LPT:
   4509 		case WM_T_PCH_SPT:
   4510 		case WM_T_PCH_CNP:
   4511 			/* TARC0 */
   4512 			if (sc->sc_type == WM_T_ICH8) {
   4513 				/* Set TARC0 bits 29 and 28 */
   4514 				tarc0 |= __BITS(29, 28);
   4515 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4516 				tarc0 |= __BIT(29);
   4517 				/*
   4518 				 *  Drop bit 28. From Linux.
   4519 				 * See I218/I219 spec update
   4520 				 * "5. Buffer Overrun While the I219 is
   4521 				 * Processing DMA Transactions"
   4522 				 */
   4523 				tarc0 &= ~__BIT(28);
   4524 			}
   4525 			/* Set TARC0 bits 23,24,26,27 */
   4526 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4527 
   4528 			/* CTRL_EXT */
   4529 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4530 			reg |= __BIT(22);	/* Set bit 22 */
   4531 			/*
   4532 			 * Enable PHY low-power state when MAC is at D3
   4533 			 * w/o WoL
   4534 			 */
   4535 			if (sc->sc_type >= WM_T_PCH)
   4536 				reg |= CTRL_EXT_PHYPDEN;
   4537 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4538 
   4539 			/* TARC1 */
   4540 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4541 			/* bit 28 */
   4542 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4543 				tarc1 &= ~__BIT(28);
   4544 			else
   4545 				tarc1 |= __BIT(28);
   4546 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4547 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4548 
   4549 			/* Device Status */
   4550 			if (sc->sc_type == WM_T_ICH8) {
   4551 				reg = CSR_READ(sc, WMREG_STATUS);
   4552 				reg &= ~__BIT(31);
   4553 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4554 
   4555 			}
   4556 
   4557 			/* IOSFPC */
   4558 			if (sc->sc_type == WM_T_PCH_SPT) {
   4559 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4560 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4561 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4562 			}
   4563 			/*
   4564 			 * Work-around descriptor data corruption issue during
   4565 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4566 			 * capability.
   4567 			 */
   4568 			reg = CSR_READ(sc, WMREG_RFCTL);
   4569 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4570 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4571 			break;
   4572 		default:
   4573 			break;
   4574 		}
   4575 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4576 
   4577 		switch (sc->sc_type) {
   4578 		/*
   4579 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4580 		 * Avoid RSS Hash Value bug.
   4581 		 */
   4582 		case WM_T_82571:
   4583 		case WM_T_82572:
   4584 		case WM_T_82573:
   4585 		case WM_T_80003:
   4586 		case WM_T_ICH8:
   4587 			reg = CSR_READ(sc, WMREG_RFCTL);
   4588 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4589 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4590 			break;
   4591 		case WM_T_82574:
   4592 			/* Use extened Rx descriptor. */
   4593 			reg = CSR_READ(sc, WMREG_RFCTL);
   4594 			reg |= WMREG_RFCTL_EXSTEN;
   4595 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4596 			break;
   4597 		default:
   4598 			break;
   4599 		}
   4600 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4601 		/*
   4602 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4603 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4604 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4605 		 * Correctly by the Device"
   4606 		 *
   4607 		 * I354(C2000) Errata AVR53:
   4608 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4609 		 * Hang"
   4610 		 */
   4611 		reg = CSR_READ(sc, WMREG_RFCTL);
   4612 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4613 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4614 	}
   4615 }
   4616 
   4617 static uint32_t
   4618 wm_rxpbs_adjust_82580(uint32_t val)
   4619 {
   4620 	uint32_t rv = 0;
   4621 
   4622 	if (val < __arraycount(wm_82580_rxpbs_table))
   4623 		rv = wm_82580_rxpbs_table[val];
   4624 
   4625 	return rv;
   4626 }
   4627 
   4628 /*
   4629  * wm_reset_phy:
   4630  *
   4631  *	generic PHY reset function.
   4632  *	Same as e1000_phy_hw_reset_generic()
   4633  */
   4634 static int
   4635 wm_reset_phy(struct wm_softc *sc)
   4636 {
   4637 	uint32_t reg;
   4638 
   4639 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4640 		device_xname(sc->sc_dev), __func__));
   4641 	if (wm_phy_resetisblocked(sc))
   4642 		return -1;
   4643 
   4644 	sc->phy.acquire(sc);
   4645 
   4646 	reg = CSR_READ(sc, WMREG_CTRL);
   4647 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4648 	CSR_WRITE_FLUSH(sc);
   4649 
   4650 	delay(sc->phy.reset_delay_us);
   4651 
   4652 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4653 	CSR_WRITE_FLUSH(sc);
   4654 
   4655 	delay(150);
   4656 
   4657 	sc->phy.release(sc);
   4658 
   4659 	wm_get_cfg_done(sc);
   4660 	wm_phy_post_reset(sc);
   4661 
   4662 	return 0;
   4663 }
   4664 
   4665 /*
   4666  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4667  * so it is enough to check sc->sc_queue[0] only.
   4668  */
   4669 static void
   4670 wm_flush_desc_rings(struct wm_softc *sc)
   4671 {
   4672 	pcireg_t preg;
   4673 	uint32_t reg;
   4674 	struct wm_txqueue *txq;
   4675 	wiseman_txdesc_t *txd;
   4676 	int nexttx;
   4677 	uint32_t rctl;
   4678 
   4679 	/* First, disable MULR fix in FEXTNVM11 */
   4680 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4681 	reg |= FEXTNVM11_DIS_MULRFIX;
   4682 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4683 
   4684 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4685 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4686 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4687 		return;
   4688 
   4689 	/* TX */
   4690 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
   4691 	    preg, reg);
   4692 	reg = CSR_READ(sc, WMREG_TCTL);
   4693 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4694 
   4695 	txq = &sc->sc_queue[0].wmq_txq;
   4696 	nexttx = txq->txq_next;
   4697 	txd = &txq->txq_descs[nexttx];
   4698 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4699 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4700 	txd->wtx_fields.wtxu_status = 0;
   4701 	txd->wtx_fields.wtxu_options = 0;
   4702 	txd->wtx_fields.wtxu_vlan = 0;
   4703 
   4704 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4705 	    BUS_SPACE_BARRIER_WRITE);
   4706 
   4707 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4708 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4709 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4710 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4711 	delay(250);
   4712 
   4713 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4714 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4715 		return;
   4716 
   4717 	/* RX */
   4718 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4719 	rctl = CSR_READ(sc, WMREG_RCTL);
   4720 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4721 	CSR_WRITE_FLUSH(sc);
   4722 	delay(150);
   4723 
   4724 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4725 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4726 	reg &= 0xffffc000;
   4727 	/*
   4728 	 * Update thresholds: prefetch threshold to 31, host threshold
   4729 	 * to 1 and make sure the granularity is "descriptors" and not
   4730 	 * "cache lines"
   4731 	 */
   4732 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4733 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4734 
   4735 	/* Momentarily enable the RX ring for the changes to take effect */
   4736 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4737 	CSR_WRITE_FLUSH(sc);
   4738 	delay(150);
   4739 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4740 }
   4741 
   4742 /*
   4743  * wm_reset:
   4744  *
   4745  *	Reset the i82542 chip.
   4746  */
   4747 static void
   4748 wm_reset(struct wm_softc *sc)
   4749 {
   4750 	int phy_reset = 0;
   4751 	int i, error = 0;
   4752 	uint32_t reg;
   4753 	uint16_t kmreg;
   4754 	int rv;
   4755 
   4756 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4757 		device_xname(sc->sc_dev), __func__));
   4758 	KASSERT(sc->sc_type != 0);
   4759 
   4760 	/*
   4761 	 * Allocate on-chip memory according to the MTU size.
   4762 	 * The Packet Buffer Allocation register must be written
   4763 	 * before the chip is reset.
   4764 	 */
   4765 	switch (sc->sc_type) {
   4766 	case WM_T_82547:
   4767 	case WM_T_82547_2:
   4768 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4769 		    PBA_22K : PBA_30K;
   4770 		for (i = 0; i < sc->sc_nqueues; i++) {
   4771 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4772 			txq->txq_fifo_head = 0;
   4773 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4774 			txq->txq_fifo_size =
   4775 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4776 			txq->txq_fifo_stall = 0;
   4777 		}
   4778 		break;
   4779 	case WM_T_82571:
   4780 	case WM_T_82572:
   4781 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4782 	case WM_T_80003:
   4783 		sc->sc_pba = PBA_32K;
   4784 		break;
   4785 	case WM_T_82573:
   4786 		sc->sc_pba = PBA_12K;
   4787 		break;
   4788 	case WM_T_82574:
   4789 	case WM_T_82583:
   4790 		sc->sc_pba = PBA_20K;
   4791 		break;
   4792 	case WM_T_82576:
   4793 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4794 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4795 		break;
   4796 	case WM_T_82580:
   4797 	case WM_T_I350:
   4798 	case WM_T_I354:
   4799 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4800 		break;
   4801 	case WM_T_I210:
   4802 	case WM_T_I211:
   4803 		sc->sc_pba = PBA_34K;
   4804 		break;
   4805 	case WM_T_ICH8:
   4806 		/* Workaround for a bit corruption issue in FIFO memory */
   4807 		sc->sc_pba = PBA_8K;
   4808 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4809 		break;
   4810 	case WM_T_ICH9:
   4811 	case WM_T_ICH10:
   4812 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4813 		    PBA_14K : PBA_10K;
   4814 		break;
   4815 	case WM_T_PCH:
   4816 	case WM_T_PCH2:	/* XXX 14K? */
   4817 	case WM_T_PCH_LPT:
   4818 	case WM_T_PCH_SPT:
   4819 	case WM_T_PCH_CNP:
   4820 		sc->sc_pba = PBA_26K;
   4821 		break;
   4822 	default:
   4823 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4824 		    PBA_40K : PBA_48K;
   4825 		break;
   4826 	}
   4827 	/*
   4828 	 * Only old or non-multiqueue devices have the PBA register
   4829 	 * XXX Need special handling for 82575.
   4830 	 */
   4831 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4832 	    || (sc->sc_type == WM_T_82575))
   4833 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4834 
   4835 	/* Prevent the PCI-E bus from sticking */
   4836 	if (sc->sc_flags & WM_F_PCIE) {
   4837 		int timeout = 800;
   4838 
   4839 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4840 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4841 
   4842 		while (timeout--) {
   4843 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4844 			    == 0)
   4845 				break;
   4846 			delay(100);
   4847 		}
   4848 		if (timeout == 0)
   4849 			device_printf(sc->sc_dev,
   4850 			    "failed to disable busmastering\n");
   4851 	}
   4852 
   4853 	/* Set the completion timeout for interface */
   4854 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4855 	    || (sc->sc_type == WM_T_82580)
   4856 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4857 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4858 		wm_set_pcie_completion_timeout(sc);
   4859 
   4860 	/* Clear interrupt */
   4861 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4862 	if (wm_is_using_msix(sc)) {
   4863 		if (sc->sc_type != WM_T_82574) {
   4864 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4865 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4866 		} else
   4867 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4868 	}
   4869 
   4870 	/* Stop the transmit and receive processes. */
   4871 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4872 	sc->sc_rctl &= ~RCTL_EN;
   4873 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4874 	CSR_WRITE_FLUSH(sc);
   4875 
   4876 	/* XXX set_tbi_sbp_82543() */
   4877 
   4878 	delay(10*1000);
   4879 
   4880 	/* Must acquire the MDIO ownership before MAC reset */
   4881 	switch (sc->sc_type) {
   4882 	case WM_T_82573:
   4883 	case WM_T_82574:
   4884 	case WM_T_82583:
   4885 		error = wm_get_hw_semaphore_82573(sc);
   4886 		break;
   4887 	default:
   4888 		break;
   4889 	}
   4890 
   4891 	/*
   4892 	 * 82541 Errata 29? & 82547 Errata 28?
   4893 	 * See also the description about PHY_RST bit in CTRL register
   4894 	 * in 8254x_GBe_SDM.pdf.
   4895 	 */
   4896 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4897 		CSR_WRITE(sc, WMREG_CTRL,
   4898 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4899 		CSR_WRITE_FLUSH(sc);
   4900 		delay(5000);
   4901 	}
   4902 
   4903 	switch (sc->sc_type) {
   4904 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4905 	case WM_T_82541:
   4906 	case WM_T_82541_2:
   4907 	case WM_T_82547:
   4908 	case WM_T_82547_2:
   4909 		/*
   4910 		 * On some chipsets, a reset through a memory-mapped write
   4911 		 * cycle can cause the chip to reset before completing the
   4912 		 * write cycle. This causes major headache that can be avoided
   4913 		 * by issuing the reset via indirect register writes through
   4914 		 * I/O space.
   4915 		 *
   4916 		 * So, if we successfully mapped the I/O BAR at attach time,
   4917 		 * use that. Otherwise, try our luck with a memory-mapped
   4918 		 * reset.
   4919 		 */
   4920 		if (sc->sc_flags & WM_F_IOH_VALID)
   4921 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4922 		else
   4923 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4924 		break;
   4925 	case WM_T_82545_3:
   4926 	case WM_T_82546_3:
   4927 		/* Use the shadow control register on these chips. */
   4928 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4929 		break;
   4930 	case WM_T_80003:
   4931 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4932 		sc->phy.acquire(sc);
   4933 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4934 		sc->phy.release(sc);
   4935 		break;
   4936 	case WM_T_ICH8:
   4937 	case WM_T_ICH9:
   4938 	case WM_T_ICH10:
   4939 	case WM_T_PCH:
   4940 	case WM_T_PCH2:
   4941 	case WM_T_PCH_LPT:
   4942 	case WM_T_PCH_SPT:
   4943 	case WM_T_PCH_CNP:
   4944 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4945 		if (wm_phy_resetisblocked(sc) == false) {
   4946 			/*
   4947 			 * Gate automatic PHY configuration by hardware on
   4948 			 * non-managed 82579
   4949 			 */
   4950 			if ((sc->sc_type == WM_T_PCH2)
   4951 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4952 				== 0))
   4953 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4954 
   4955 			reg |= CTRL_PHY_RESET;
   4956 			phy_reset = 1;
   4957 		} else
   4958 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   4959 		sc->phy.acquire(sc);
   4960 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4961 		/* Don't insert a completion barrier when reset */
   4962 		delay(20*1000);
   4963 		mutex_exit(sc->sc_ich_phymtx);
   4964 		break;
   4965 	case WM_T_82580:
   4966 	case WM_T_I350:
   4967 	case WM_T_I354:
   4968 	case WM_T_I210:
   4969 	case WM_T_I211:
   4970 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4971 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4972 			CSR_WRITE_FLUSH(sc);
   4973 		delay(5000);
   4974 		break;
   4975 	case WM_T_82542_2_0:
   4976 	case WM_T_82542_2_1:
   4977 	case WM_T_82543:
   4978 	case WM_T_82540:
   4979 	case WM_T_82545:
   4980 	case WM_T_82546:
   4981 	case WM_T_82571:
   4982 	case WM_T_82572:
   4983 	case WM_T_82573:
   4984 	case WM_T_82574:
   4985 	case WM_T_82575:
   4986 	case WM_T_82576:
   4987 	case WM_T_82583:
   4988 	default:
   4989 		/* Everything else can safely use the documented method. */
   4990 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4991 		break;
   4992 	}
   4993 
   4994 	/* Must release the MDIO ownership after MAC reset */
   4995 	switch (sc->sc_type) {
   4996 	case WM_T_82573:
   4997 	case WM_T_82574:
   4998 	case WM_T_82583:
   4999 		if (error == 0)
   5000 			wm_put_hw_semaphore_82573(sc);
   5001 		break;
   5002 	default:
   5003 		break;
   5004 	}
   5005 
   5006 	/* Set Phy Config Counter to 50msec */
   5007 	if (sc->sc_type == WM_T_PCH2) {
   5008 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5009 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5010 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5011 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5012 	}
   5013 
   5014 	if (phy_reset != 0)
   5015 		wm_get_cfg_done(sc);
   5016 
   5017 	/* Reload EEPROM */
   5018 	switch (sc->sc_type) {
   5019 	case WM_T_82542_2_0:
   5020 	case WM_T_82542_2_1:
   5021 	case WM_T_82543:
   5022 	case WM_T_82544:
   5023 		delay(10);
   5024 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5025 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5026 		CSR_WRITE_FLUSH(sc);
   5027 		delay(2000);
   5028 		break;
   5029 	case WM_T_82540:
   5030 	case WM_T_82545:
   5031 	case WM_T_82545_3:
   5032 	case WM_T_82546:
   5033 	case WM_T_82546_3:
   5034 		delay(5*1000);
   5035 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5036 		break;
   5037 	case WM_T_82541:
   5038 	case WM_T_82541_2:
   5039 	case WM_T_82547:
   5040 	case WM_T_82547_2:
   5041 		delay(20000);
   5042 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5043 		break;
   5044 	case WM_T_82571:
   5045 	case WM_T_82572:
   5046 	case WM_T_82573:
   5047 	case WM_T_82574:
   5048 	case WM_T_82583:
   5049 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5050 			delay(10);
   5051 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5052 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5053 			CSR_WRITE_FLUSH(sc);
   5054 		}
   5055 		/* check EECD_EE_AUTORD */
   5056 		wm_get_auto_rd_done(sc);
   5057 		/*
   5058 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5059 		 * is set.
   5060 		 */
   5061 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5062 		    || (sc->sc_type == WM_T_82583))
   5063 			delay(25*1000);
   5064 		break;
   5065 	case WM_T_82575:
   5066 	case WM_T_82576:
   5067 	case WM_T_82580:
   5068 	case WM_T_I350:
   5069 	case WM_T_I354:
   5070 	case WM_T_I210:
   5071 	case WM_T_I211:
   5072 	case WM_T_80003:
   5073 		/* check EECD_EE_AUTORD */
   5074 		wm_get_auto_rd_done(sc);
   5075 		break;
   5076 	case WM_T_ICH8:
   5077 	case WM_T_ICH9:
   5078 	case WM_T_ICH10:
   5079 	case WM_T_PCH:
   5080 	case WM_T_PCH2:
   5081 	case WM_T_PCH_LPT:
   5082 	case WM_T_PCH_SPT:
   5083 	case WM_T_PCH_CNP:
   5084 		break;
   5085 	default:
   5086 		panic("%s: unknown type\n", __func__);
   5087 	}
   5088 
   5089 	/* Check whether EEPROM is present or not */
   5090 	switch (sc->sc_type) {
   5091 	case WM_T_82575:
   5092 	case WM_T_82576:
   5093 	case WM_T_82580:
   5094 	case WM_T_I350:
   5095 	case WM_T_I354:
   5096 	case WM_T_ICH8:
   5097 	case WM_T_ICH9:
   5098 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5099 			/* Not found */
   5100 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5101 			if (sc->sc_type == WM_T_82575)
   5102 				wm_reset_init_script_82575(sc);
   5103 		}
   5104 		break;
   5105 	default:
   5106 		break;
   5107 	}
   5108 
   5109 	if (phy_reset != 0)
   5110 		wm_phy_post_reset(sc);
   5111 
   5112 	if ((sc->sc_type == WM_T_82580)
   5113 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5114 		/* Clear global device reset status bit */
   5115 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5116 	}
   5117 
   5118 	/* Clear any pending interrupt events. */
   5119 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5120 	reg = CSR_READ(sc, WMREG_ICR);
   5121 	if (wm_is_using_msix(sc)) {
   5122 		if (sc->sc_type != WM_T_82574) {
   5123 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5124 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5125 		} else
   5126 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5127 	}
   5128 
   5129 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5130 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5131 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5132 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5133 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5134 		reg |= KABGTXD_BGSQLBIAS;
   5135 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5136 	}
   5137 
   5138 	/* Reload sc_ctrl */
   5139 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5140 
   5141 	wm_set_eee(sc);
   5142 
   5143 	/*
   5144 	 * For PCH, this write will make sure that any noise will be detected
   5145 	 * as a CRC error and be dropped rather than show up as a bad packet
   5146 	 * to the DMA engine
   5147 	 */
   5148 	if (sc->sc_type == WM_T_PCH)
   5149 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5150 
   5151 	if (sc->sc_type >= WM_T_82544)
   5152 		CSR_WRITE(sc, WMREG_WUC, 0);
   5153 
   5154 	if (sc->sc_type < WM_T_82575)
   5155 		wm_disable_aspm(sc); /* Workaround for some chips */
   5156 
   5157 	wm_reset_mdicnfg_82580(sc);
   5158 
   5159 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5160 		wm_pll_workaround_i210(sc);
   5161 
   5162 	if (sc->sc_type == WM_T_80003) {
   5163 		/* Default to TRUE to enable the MDIC W/A */
   5164 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5165 
   5166 		rv = wm_kmrn_readreg(sc,
   5167 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5168 		if (rv == 0) {
   5169 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5170 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5171 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5172 			else
   5173 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5174 		}
   5175 	}
   5176 }
   5177 
   5178 /*
   5179  * wm_add_rxbuf:
   5180  *
   5181  *	Add a receive buffer to the indiciated descriptor.
   5182  */
   5183 static int
   5184 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5185 {
   5186 	struct wm_softc *sc = rxq->rxq_sc;
   5187 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5188 	struct mbuf *m;
   5189 	int error;
   5190 
   5191 	KASSERT(mutex_owned(rxq->rxq_lock));
   5192 
   5193 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5194 	if (m == NULL)
   5195 		return ENOBUFS;
   5196 
   5197 	MCLGET(m, M_DONTWAIT);
   5198 	if ((m->m_flags & M_EXT) == 0) {
   5199 		m_freem(m);
   5200 		return ENOBUFS;
   5201 	}
   5202 
   5203 	if (rxs->rxs_mbuf != NULL)
   5204 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5205 
   5206 	rxs->rxs_mbuf = m;
   5207 
   5208 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5209 	/*
   5210 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5211 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5212 	 */
   5213 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5214 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5215 	if (error) {
   5216 		/* XXX XXX XXX */
   5217 		aprint_error_dev(sc->sc_dev,
   5218 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5219 		panic("wm_add_rxbuf");
   5220 	}
   5221 
   5222 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5223 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5224 
   5225 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5226 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5227 			wm_init_rxdesc(rxq, idx);
   5228 	} else
   5229 		wm_init_rxdesc(rxq, idx);
   5230 
   5231 	return 0;
   5232 }
   5233 
   5234 /*
   5235  * wm_rxdrain:
   5236  *
   5237  *	Drain the receive queue.
   5238  */
   5239 static void
   5240 wm_rxdrain(struct wm_rxqueue *rxq)
   5241 {
   5242 	struct wm_softc *sc = rxq->rxq_sc;
   5243 	struct wm_rxsoft *rxs;
   5244 	int i;
   5245 
   5246 	KASSERT(mutex_owned(rxq->rxq_lock));
   5247 
   5248 	for (i = 0; i < WM_NRXDESC; i++) {
   5249 		rxs = &rxq->rxq_soft[i];
   5250 		if (rxs->rxs_mbuf != NULL) {
   5251 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5252 			m_freem(rxs->rxs_mbuf);
   5253 			rxs->rxs_mbuf = NULL;
   5254 		}
   5255 	}
   5256 }
   5257 
   5258 /*
   5259  * Setup registers for RSS.
   5260  *
   5261  * XXX not yet VMDq support
   5262  */
   5263 static void
   5264 wm_init_rss(struct wm_softc *sc)
   5265 {
   5266 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5267 	int i;
   5268 
   5269 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5270 
   5271 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5272 		unsigned int qid, reta_ent;
   5273 
   5274 		qid  = i % sc->sc_nqueues;
   5275 		switch (sc->sc_type) {
   5276 		case WM_T_82574:
   5277 			reta_ent = __SHIFTIN(qid,
   5278 			    RETA_ENT_QINDEX_MASK_82574);
   5279 			break;
   5280 		case WM_T_82575:
   5281 			reta_ent = __SHIFTIN(qid,
   5282 			    RETA_ENT_QINDEX1_MASK_82575);
   5283 			break;
   5284 		default:
   5285 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5286 			break;
   5287 		}
   5288 
   5289 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5290 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5291 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5292 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5293 	}
   5294 
   5295 	rss_getkey((uint8_t *)rss_key);
   5296 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5297 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5298 
   5299 	if (sc->sc_type == WM_T_82574)
   5300 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5301 	else
   5302 		mrqc = MRQC_ENABLE_RSS_MQ;
   5303 
   5304 	/*
   5305 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5306 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5307 	 */
   5308 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5309 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5310 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5311 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   5312 
   5313 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5314 }
   5315 
   5316 /*
   5317  * Adjust TX and RX queue numbers which the system actulally uses.
   5318  *
   5319  * The numbers are affected by below parameters.
   5320  *     - The nubmer of hardware queues
   5321  *     - The number of MSI-X vectors (= "nvectors" argument)
   5322  *     - ncpu
   5323  */
   5324 static void
   5325 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5326 {
   5327 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5328 
   5329 	if (nvectors < 2) {
   5330 		sc->sc_nqueues = 1;
   5331 		return;
   5332 	}
   5333 
   5334 	switch (sc->sc_type) {
   5335 	case WM_T_82572:
   5336 		hw_ntxqueues = 2;
   5337 		hw_nrxqueues = 2;
   5338 		break;
   5339 	case WM_T_82574:
   5340 		hw_ntxqueues = 2;
   5341 		hw_nrxqueues = 2;
   5342 		break;
   5343 	case WM_T_82575:
   5344 		hw_ntxqueues = 4;
   5345 		hw_nrxqueues = 4;
   5346 		break;
   5347 	case WM_T_82576:
   5348 		hw_ntxqueues = 16;
   5349 		hw_nrxqueues = 16;
   5350 		break;
   5351 	case WM_T_82580:
   5352 	case WM_T_I350:
   5353 	case WM_T_I354:
   5354 		hw_ntxqueues = 8;
   5355 		hw_nrxqueues = 8;
   5356 		break;
   5357 	case WM_T_I210:
   5358 		hw_ntxqueues = 4;
   5359 		hw_nrxqueues = 4;
   5360 		break;
   5361 	case WM_T_I211:
   5362 		hw_ntxqueues = 2;
   5363 		hw_nrxqueues = 2;
   5364 		break;
   5365 		/*
   5366 		 * As below ethernet controllers does not support MSI-X,
   5367 		 * this driver let them not use multiqueue.
   5368 		 *     - WM_T_80003
   5369 		 *     - WM_T_ICH8
   5370 		 *     - WM_T_ICH9
   5371 		 *     - WM_T_ICH10
   5372 		 *     - WM_T_PCH
   5373 		 *     - WM_T_PCH2
   5374 		 *     - WM_T_PCH_LPT
   5375 		 */
   5376 	default:
   5377 		hw_ntxqueues = 1;
   5378 		hw_nrxqueues = 1;
   5379 		break;
   5380 	}
   5381 
   5382 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5383 
   5384 	/*
   5385 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5386 	 * the number of queues used actually.
   5387 	 */
   5388 	if (nvectors < hw_nqueues + 1)
   5389 		sc->sc_nqueues = nvectors - 1;
   5390 	else
   5391 		sc->sc_nqueues = hw_nqueues;
   5392 
   5393 	/*
   5394 	 * As queues more then cpus cannot improve scaling, we limit
   5395 	 * the number of queues used actually.
   5396 	 */
   5397 	if (ncpu < sc->sc_nqueues)
   5398 		sc->sc_nqueues = ncpu;
   5399 }
   5400 
   5401 static inline bool
   5402 wm_is_using_msix(struct wm_softc *sc)
   5403 {
   5404 
   5405 	return (sc->sc_nintrs > 1);
   5406 }
   5407 
   5408 static inline bool
   5409 wm_is_using_multiqueue(struct wm_softc *sc)
   5410 {
   5411 
   5412 	return (sc->sc_nqueues > 1);
   5413 }
   5414 
   5415 static int
   5416 wm_softhandler_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5417 {
   5418 	char wqname[MAXCOMLEN];
   5419 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5420 	int error;
   5421 
   5422 	wmq->wmq_id = qidx;
   5423 	wmq->wmq_intr_idx = intr_idx;
   5424 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5425 #ifdef WM_MPSAFE
   5426 	    | SOFTINT_MPSAFE
   5427 #endif
   5428 	    , wm_handle_queue, wmq);
   5429 	if (wmq->wmq_si == NULL) {
   5430 		aprint_error_dev(sc->sc_dev,
   5431 		    "unable to establish queue[%d] softint handler\n",
   5432 		    wmq->wmq_id);
   5433 		goto err;
   5434 	}
   5435 
   5436 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   5437 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   5438 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   5439 	    WM_WORKQUEUE_FLAGS);
   5440 	if (error) {
   5441 		softint_disestablish(wmq->wmq_si);
   5442 		aprint_error_dev(sc->sc_dev,
   5443 		    "unable to create queue[%d] workqueue\n",
   5444 		    wmq->wmq_id);
   5445 		goto err;
   5446 	}
   5447 
   5448 	return 0;
   5449 
   5450 err:
   5451 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5452 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5453 	return ENOMEM;
   5454 }
   5455 
   5456 /*
   5457  * Both single interrupt MSI and INTx can use this function.
   5458  */
   5459 static int
   5460 wm_setup_legacy(struct wm_softc *sc)
   5461 {
   5462 	pci_chipset_tag_t pc = sc->sc_pc;
   5463 	const char *intrstr = NULL;
   5464 	char intrbuf[PCI_INTRSTR_LEN];
   5465 	int error;
   5466 
   5467 	error = wm_alloc_txrx_queues(sc);
   5468 	if (error) {
   5469 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5470 		    error);
   5471 		return ENOMEM;
   5472 	}
   5473 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5474 	    sizeof(intrbuf));
   5475 #ifdef WM_MPSAFE
   5476 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5477 #endif
   5478 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5479 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5480 	if (sc->sc_ihs[0] == NULL) {
   5481 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5482 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5483 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5484 		return ENOMEM;
   5485 	}
   5486 
   5487 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5488 	sc->sc_nintrs = 1;
   5489 
   5490 	return wm_softhandler_establish(sc, 0, 0);
   5491 }
   5492 
   5493 static int
   5494 wm_setup_msix(struct wm_softc *sc)
   5495 {
   5496 	void *vih;
   5497 	kcpuset_t *affinity;
   5498 	int qidx, error, intr_idx, txrx_established;
   5499 	pci_chipset_tag_t pc = sc->sc_pc;
   5500 	const char *intrstr = NULL;
   5501 	char intrbuf[PCI_INTRSTR_LEN];
   5502 	char intr_xname[INTRDEVNAMEBUF];
   5503 
   5504 	if (sc->sc_nqueues < ncpu) {
   5505 		/*
   5506 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5507 		 * interrupts start from CPU#1.
   5508 		 */
   5509 		sc->sc_affinity_offset = 1;
   5510 	} else {
   5511 		/*
   5512 		 * In this case, this device use all CPUs. So, we unify
   5513 		 * affinitied cpu_index to msix vector number for readability.
   5514 		 */
   5515 		sc->sc_affinity_offset = 0;
   5516 	}
   5517 
   5518 	error = wm_alloc_txrx_queues(sc);
   5519 	if (error) {
   5520 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5521 		    error);
   5522 		return ENOMEM;
   5523 	}
   5524 
   5525 	kcpuset_create(&affinity, false);
   5526 	intr_idx = 0;
   5527 
   5528 	/*
   5529 	 * TX and RX
   5530 	 */
   5531 	txrx_established = 0;
   5532 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5533 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5534 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5535 
   5536 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5537 		    sizeof(intrbuf));
   5538 #ifdef WM_MPSAFE
   5539 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5540 		    PCI_INTR_MPSAFE, true);
   5541 #endif
   5542 		memset(intr_xname, 0, sizeof(intr_xname));
   5543 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5544 		    device_xname(sc->sc_dev), qidx);
   5545 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5546 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5547 		if (vih == NULL) {
   5548 			aprint_error_dev(sc->sc_dev,
   5549 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5550 			    intrstr ? " at " : "",
   5551 			    intrstr ? intrstr : "");
   5552 
   5553 			goto fail;
   5554 		}
   5555 		kcpuset_zero(affinity);
   5556 		/* Round-robin affinity */
   5557 		kcpuset_set(affinity, affinity_to);
   5558 		error = interrupt_distribute(vih, affinity, NULL);
   5559 		if (error == 0) {
   5560 			aprint_normal_dev(sc->sc_dev,
   5561 			    "for TX and RX interrupting at %s affinity to %u\n",
   5562 			    intrstr, affinity_to);
   5563 		} else {
   5564 			aprint_normal_dev(sc->sc_dev,
   5565 			    "for TX and RX interrupting at %s\n", intrstr);
   5566 		}
   5567 		sc->sc_ihs[intr_idx] = vih;
   5568 		if (wm_softhandler_establish(sc, qidx, intr_idx) != 0)
   5569 			goto fail;
   5570 		txrx_established++;
   5571 		intr_idx++;
   5572 	}
   5573 
   5574 	/* LINK */
   5575 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5576 	    sizeof(intrbuf));
   5577 #ifdef WM_MPSAFE
   5578 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5579 #endif
   5580 	memset(intr_xname, 0, sizeof(intr_xname));
   5581 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5582 	    device_xname(sc->sc_dev));
   5583 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5584 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5585 	if (vih == NULL) {
   5586 		aprint_error_dev(sc->sc_dev,
   5587 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5588 		    intrstr ? " at " : "",
   5589 		    intrstr ? intrstr : "");
   5590 
   5591 		goto fail;
   5592 	}
   5593 	/* Keep default affinity to LINK interrupt */
   5594 	aprint_normal_dev(sc->sc_dev,
   5595 	    "for LINK interrupting at %s\n", intrstr);
   5596 	sc->sc_ihs[intr_idx] = vih;
   5597 	sc->sc_link_intr_idx = intr_idx;
   5598 
   5599 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5600 	kcpuset_destroy(affinity);
   5601 	return 0;
   5602 
   5603  fail:
   5604 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5605 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5606 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5607 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5608 	}
   5609 
   5610 	kcpuset_destroy(affinity);
   5611 	return ENOMEM;
   5612 }
   5613 
   5614 static void
   5615 wm_unset_stopping_flags(struct wm_softc *sc)
   5616 {
   5617 	int i;
   5618 
   5619 	KASSERT(WM_CORE_LOCKED(sc));
   5620 
   5621 	/* Must unset stopping flags in ascending order. */
   5622 	for (i = 0; i < sc->sc_nqueues; i++) {
   5623 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5624 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5625 
   5626 		mutex_enter(txq->txq_lock);
   5627 		txq->txq_stopping = false;
   5628 		mutex_exit(txq->txq_lock);
   5629 
   5630 		mutex_enter(rxq->rxq_lock);
   5631 		rxq->rxq_stopping = false;
   5632 		mutex_exit(rxq->rxq_lock);
   5633 	}
   5634 
   5635 	sc->sc_core_stopping = false;
   5636 }
   5637 
   5638 static void
   5639 wm_set_stopping_flags(struct wm_softc *sc)
   5640 {
   5641 	int i;
   5642 
   5643 	KASSERT(WM_CORE_LOCKED(sc));
   5644 
   5645 	sc->sc_core_stopping = true;
   5646 
   5647 	/* Must set stopping flags in ascending order. */
   5648 	for (i = 0; i < sc->sc_nqueues; i++) {
   5649 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5650 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5651 
   5652 		mutex_enter(rxq->rxq_lock);
   5653 		rxq->rxq_stopping = true;
   5654 		mutex_exit(rxq->rxq_lock);
   5655 
   5656 		mutex_enter(txq->txq_lock);
   5657 		txq->txq_stopping = true;
   5658 		mutex_exit(txq->txq_lock);
   5659 	}
   5660 }
   5661 
   5662 /*
   5663  * Write interrupt interval value to ITR or EITR
   5664  */
   5665 static void
   5666 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5667 {
   5668 
   5669 	if (!wmq->wmq_set_itr)
   5670 		return;
   5671 
   5672 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5673 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5674 
   5675 		/*
   5676 		 * 82575 doesn't have CNT_INGR field.
   5677 		 * So, overwrite counter field by software.
   5678 		 */
   5679 		if (sc->sc_type == WM_T_82575)
   5680 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5681 		else
   5682 			eitr |= EITR_CNT_INGR;
   5683 
   5684 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5685 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5686 		/*
   5687 		 * 82574 has both ITR and EITR. SET EITR when we use
   5688 		 * the multi queue function with MSI-X.
   5689 		 */
   5690 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5691 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5692 	} else {
   5693 		KASSERT(wmq->wmq_id == 0);
   5694 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5695 	}
   5696 
   5697 	wmq->wmq_set_itr = false;
   5698 }
   5699 
   5700 /*
   5701  * TODO
   5702  * Below dynamic calculation of itr is almost the same as linux igb,
   5703  * however it does not fit to wm(4). So, we will have been disable AIM
   5704  * until we will find appropriate calculation of itr.
   5705  */
   5706 /*
   5707  * calculate interrupt interval value to be going to write register in
   5708  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5709  */
   5710 static void
   5711 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5712 {
   5713 #ifdef NOTYET
   5714 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5715 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5716 	uint32_t avg_size = 0;
   5717 	uint32_t new_itr;
   5718 
   5719 	if (rxq->rxq_packets)
   5720 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5721 	if (txq->txq_packets)
   5722 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5723 
   5724 	if (avg_size == 0) {
   5725 		new_itr = 450; /* restore default value */
   5726 		goto out;
   5727 	}
   5728 
   5729 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5730 	avg_size += 24;
   5731 
   5732 	/* Don't starve jumbo frames */
   5733 	avg_size = uimin(avg_size, 3000);
   5734 
   5735 	/* Give a little boost to mid-size frames */
   5736 	if ((avg_size > 300) && (avg_size < 1200))
   5737 		new_itr = avg_size / 3;
   5738 	else
   5739 		new_itr = avg_size / 2;
   5740 
   5741 out:
   5742 	/*
   5743 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5744 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5745 	 */
   5746 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5747 		new_itr *= 4;
   5748 
   5749 	if (new_itr != wmq->wmq_itr) {
   5750 		wmq->wmq_itr = new_itr;
   5751 		wmq->wmq_set_itr = true;
   5752 	} else
   5753 		wmq->wmq_set_itr = false;
   5754 
   5755 	rxq->rxq_packets = 0;
   5756 	rxq->rxq_bytes = 0;
   5757 	txq->txq_packets = 0;
   5758 	txq->txq_bytes = 0;
   5759 #endif
   5760 }
   5761 
   5762 static void
   5763 wm_init_sysctls(struct wm_softc *sc)
   5764 {
   5765 	struct sysctllog **log;
   5766 	const struct sysctlnode *rnode, *cnode;
   5767 	int rv;
   5768 	const char *dvname;
   5769 
   5770 	log = &sc->sc_sysctllog;
   5771 	dvname = device_xname(sc->sc_dev);
   5772 
   5773 	rv = sysctl_createv(log, 0, NULL, &rnode,
   5774 	    0, CTLTYPE_NODE, dvname,
   5775 	    SYSCTL_DESCR("wm information and settings"),
   5776 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   5777 	if (rv != 0)
   5778 		goto err;
   5779 
   5780 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5781 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   5782 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   5783 	if (rv != 0)
   5784 		goto teardown;
   5785 
   5786 	return;
   5787 
   5788 teardown:
   5789 	sysctl_teardown(log);
   5790 err:
   5791 	sc->sc_sysctllog = NULL;
   5792 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   5793 	    __func__, rv);
   5794 }
   5795 
   5796 /*
   5797  * wm_init:		[ifnet interface function]
   5798  *
   5799  *	Initialize the interface.
   5800  */
   5801 static int
   5802 wm_init(struct ifnet *ifp)
   5803 {
   5804 	struct wm_softc *sc = ifp->if_softc;
   5805 	int ret;
   5806 
   5807 	WM_CORE_LOCK(sc);
   5808 	ret = wm_init_locked(ifp);
   5809 	WM_CORE_UNLOCK(sc);
   5810 
   5811 	return ret;
   5812 }
   5813 
   5814 static int
   5815 wm_init_locked(struct ifnet *ifp)
   5816 {
   5817 	struct wm_softc *sc = ifp->if_softc;
   5818 	struct ethercom *ec = &sc->sc_ethercom;
   5819 	int i, j, trynum, error = 0;
   5820 	uint32_t reg, sfp_mask = 0;
   5821 
   5822 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5823 		device_xname(sc->sc_dev), __func__));
   5824 	KASSERT(WM_CORE_LOCKED(sc));
   5825 
   5826 	/*
   5827 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5828 	 * There is a small but measurable benefit to avoiding the adjusment
   5829 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5830 	 * on such platforms.  One possibility is that the DMA itself is
   5831 	 * slightly more efficient if the front of the entire packet (instead
   5832 	 * of the front of the headers) is aligned.
   5833 	 *
   5834 	 * Note we must always set align_tweak to 0 if we are using
   5835 	 * jumbo frames.
   5836 	 */
   5837 #ifdef __NO_STRICT_ALIGNMENT
   5838 	sc->sc_align_tweak = 0;
   5839 #else
   5840 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5841 		sc->sc_align_tweak = 0;
   5842 	else
   5843 		sc->sc_align_tweak = 2;
   5844 #endif /* __NO_STRICT_ALIGNMENT */
   5845 
   5846 	/* Cancel any pending I/O. */
   5847 	wm_stop_locked(ifp, 0);
   5848 
   5849 	/* Update statistics before reset */
   5850 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5851 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5852 
   5853 	/* PCH_SPT hardware workaround */
   5854 	if (sc->sc_type == WM_T_PCH_SPT)
   5855 		wm_flush_desc_rings(sc);
   5856 
   5857 	/* Reset the chip to a known state. */
   5858 	wm_reset(sc);
   5859 
   5860 	/*
   5861 	 * AMT based hardware can now take control from firmware
   5862 	 * Do this after reset.
   5863 	 */
   5864 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5865 		wm_get_hw_control(sc);
   5866 
   5867 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5868 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5869 		wm_legacy_irq_quirk_spt(sc);
   5870 
   5871 	/* Init hardware bits */
   5872 	wm_initialize_hardware_bits(sc);
   5873 
   5874 	/* Reset the PHY. */
   5875 	if (sc->sc_flags & WM_F_HAS_MII)
   5876 		wm_gmii_reset(sc);
   5877 
   5878 	if (sc->sc_type >= WM_T_ICH8) {
   5879 		reg = CSR_READ(sc, WMREG_GCR);
   5880 		/*
   5881 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5882 		 * default after reset.
   5883 		 */
   5884 		if (sc->sc_type == WM_T_ICH8)
   5885 			reg |= GCR_NO_SNOOP_ALL;
   5886 		else
   5887 			reg &= ~GCR_NO_SNOOP_ALL;
   5888 		CSR_WRITE(sc, WMREG_GCR, reg);
   5889 	}
   5890 	if ((sc->sc_type >= WM_T_ICH8)
   5891 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5892 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5893 
   5894 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5895 		reg |= CTRL_EXT_RO_DIS;
   5896 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5897 	}
   5898 
   5899 	/* Calculate (E)ITR value */
   5900 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5901 		/*
   5902 		 * For NEWQUEUE's EITR (except for 82575).
   5903 		 * 82575's EITR should be set same throttling value as other
   5904 		 * old controllers' ITR because the interrupt/sec calculation
   5905 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5906 		 *
   5907 		 * 82574's EITR should be set same throttling value as ITR.
   5908 		 *
   5909 		 * For N interrupts/sec, set this value to:
   5910 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5911 		 */
   5912 		sc->sc_itr_init = 450;
   5913 	} else if (sc->sc_type >= WM_T_82543) {
   5914 		/*
   5915 		 * Set up the interrupt throttling register (units of 256ns)
   5916 		 * Note that a footnote in Intel's documentation says this
   5917 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5918 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5919 		 * that that is also true for the 1024ns units of the other
   5920 		 * interrupt-related timer registers -- so, really, we ought
   5921 		 * to divide this value by 4 when the link speed is low.
   5922 		 *
   5923 		 * XXX implement this division at link speed change!
   5924 		 */
   5925 
   5926 		/*
   5927 		 * For N interrupts/sec, set this value to:
   5928 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5929 		 * absolute and packet timer values to this value
   5930 		 * divided by 4 to get "simple timer" behavior.
   5931 		 */
   5932 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5933 	}
   5934 
   5935 	error = wm_init_txrx_queues(sc);
   5936 	if (error)
   5937 		goto out;
   5938 
   5939 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   5940 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   5941 	    (sc->sc_type >= WM_T_82575))
   5942 		wm_serdes_power_up_link_82575(sc);
   5943 
   5944 	/* Clear out the VLAN table -- we don't use it (yet). */
   5945 	CSR_WRITE(sc, WMREG_VET, 0);
   5946 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5947 		trynum = 10; /* Due to hw errata */
   5948 	else
   5949 		trynum = 1;
   5950 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5951 		for (j = 0; j < trynum; j++)
   5952 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5953 
   5954 	/*
   5955 	 * Set up flow-control parameters.
   5956 	 *
   5957 	 * XXX Values could probably stand some tuning.
   5958 	 */
   5959 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5960 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5961 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5962 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5963 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5964 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5965 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5966 	}
   5967 
   5968 	sc->sc_fcrtl = FCRTL_DFLT;
   5969 	if (sc->sc_type < WM_T_82543) {
   5970 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5971 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5972 	} else {
   5973 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5974 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5975 	}
   5976 
   5977 	if (sc->sc_type == WM_T_80003)
   5978 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5979 	else
   5980 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5981 
   5982 	/* Writes the control register. */
   5983 	wm_set_vlan(sc);
   5984 
   5985 	if (sc->sc_flags & WM_F_HAS_MII) {
   5986 		uint16_t kmreg;
   5987 
   5988 		switch (sc->sc_type) {
   5989 		case WM_T_80003:
   5990 		case WM_T_ICH8:
   5991 		case WM_T_ICH9:
   5992 		case WM_T_ICH10:
   5993 		case WM_T_PCH:
   5994 		case WM_T_PCH2:
   5995 		case WM_T_PCH_LPT:
   5996 		case WM_T_PCH_SPT:
   5997 		case WM_T_PCH_CNP:
   5998 			/*
   5999 			 * Set the mac to wait the maximum time between each
   6000 			 * iteration and increase the max iterations when
   6001 			 * polling the phy; this fixes erroneous timeouts at
   6002 			 * 10Mbps.
   6003 			 */
   6004 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6005 			    0xFFFF);
   6006 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6007 			    &kmreg);
   6008 			kmreg |= 0x3F;
   6009 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6010 			    kmreg);
   6011 			break;
   6012 		default:
   6013 			break;
   6014 		}
   6015 
   6016 		if (sc->sc_type == WM_T_80003) {
   6017 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6018 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6019 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6020 
   6021 			/* Bypass RX and TX FIFO's */
   6022 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6023 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6024 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6025 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6026 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6027 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6028 		}
   6029 	}
   6030 #if 0
   6031 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6032 #endif
   6033 
   6034 	/* Set up checksum offload parameters. */
   6035 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6036 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6037 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6038 		reg |= RXCSUM_IPOFL;
   6039 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6040 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6041 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6042 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6043 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6044 
   6045 	/* Set registers about MSI-X */
   6046 	if (wm_is_using_msix(sc)) {
   6047 		uint32_t ivar, qintr_idx;
   6048 		struct wm_queue *wmq;
   6049 		unsigned int qid;
   6050 
   6051 		if (sc->sc_type == WM_T_82575) {
   6052 			/* Interrupt control */
   6053 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6054 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6055 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6056 
   6057 			/* TX and RX */
   6058 			for (i = 0; i < sc->sc_nqueues; i++) {
   6059 				wmq = &sc->sc_queue[i];
   6060 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6061 				    EITR_TX_QUEUE(wmq->wmq_id)
   6062 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6063 			}
   6064 			/* Link status */
   6065 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6066 			    EITR_OTHER);
   6067 		} else if (sc->sc_type == WM_T_82574) {
   6068 			/* Interrupt control */
   6069 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6070 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6071 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6072 
   6073 			/*
   6074 			 * Workaround issue with spurious interrupts
   6075 			 * in MSI-X mode.
   6076 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6077 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6078 			 */
   6079 			reg = CSR_READ(sc, WMREG_RFCTL);
   6080 			reg |= WMREG_RFCTL_ACKDIS;
   6081 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6082 
   6083 			ivar = 0;
   6084 			/* TX and RX */
   6085 			for (i = 0; i < sc->sc_nqueues; i++) {
   6086 				wmq = &sc->sc_queue[i];
   6087 				qid = wmq->wmq_id;
   6088 				qintr_idx = wmq->wmq_intr_idx;
   6089 
   6090 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6091 				    IVAR_TX_MASK_Q_82574(qid));
   6092 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6093 				    IVAR_RX_MASK_Q_82574(qid));
   6094 			}
   6095 			/* Link status */
   6096 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6097 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6098 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6099 		} else {
   6100 			/* Interrupt control */
   6101 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6102 			    | GPIE_EIAME | GPIE_PBA);
   6103 
   6104 			switch (sc->sc_type) {
   6105 			case WM_T_82580:
   6106 			case WM_T_I350:
   6107 			case WM_T_I354:
   6108 			case WM_T_I210:
   6109 			case WM_T_I211:
   6110 				/* TX and RX */
   6111 				for (i = 0; i < sc->sc_nqueues; i++) {
   6112 					wmq = &sc->sc_queue[i];
   6113 					qid = wmq->wmq_id;
   6114 					qintr_idx = wmq->wmq_intr_idx;
   6115 
   6116 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6117 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6118 					ivar |= __SHIFTIN((qintr_idx
   6119 						| IVAR_VALID),
   6120 					    IVAR_TX_MASK_Q(qid));
   6121 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6122 					ivar |= __SHIFTIN((qintr_idx
   6123 						| IVAR_VALID),
   6124 					    IVAR_RX_MASK_Q(qid));
   6125 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6126 				}
   6127 				break;
   6128 			case WM_T_82576:
   6129 				/* TX and RX */
   6130 				for (i = 0; i < sc->sc_nqueues; i++) {
   6131 					wmq = &sc->sc_queue[i];
   6132 					qid = wmq->wmq_id;
   6133 					qintr_idx = wmq->wmq_intr_idx;
   6134 
   6135 					ivar = CSR_READ(sc,
   6136 					    WMREG_IVAR_Q_82576(qid));
   6137 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6138 					ivar |= __SHIFTIN((qintr_idx
   6139 						| IVAR_VALID),
   6140 					    IVAR_TX_MASK_Q_82576(qid));
   6141 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6142 					ivar |= __SHIFTIN((qintr_idx
   6143 						| IVAR_VALID),
   6144 					    IVAR_RX_MASK_Q_82576(qid));
   6145 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6146 					    ivar);
   6147 				}
   6148 				break;
   6149 			default:
   6150 				break;
   6151 			}
   6152 
   6153 			/* Link status */
   6154 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6155 			    IVAR_MISC_OTHER);
   6156 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6157 		}
   6158 
   6159 		if (wm_is_using_multiqueue(sc)) {
   6160 			wm_init_rss(sc);
   6161 
   6162 			/*
   6163 			** NOTE: Receive Full-Packet Checksum Offload
   6164 			** is mutually exclusive with Multiqueue. However
   6165 			** this is not the same as TCP/IP checksums which
   6166 			** still work.
   6167 			*/
   6168 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6169 			reg |= RXCSUM_PCSD;
   6170 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6171 		}
   6172 	}
   6173 
   6174 	/* Set up the interrupt registers. */
   6175 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6176 
   6177 	/* Enable SFP module insertion interrupt if it's required */
   6178 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6179 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6180 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6181 		sfp_mask = ICR_GPI(0);
   6182 	}
   6183 
   6184 	if (wm_is_using_msix(sc)) {
   6185 		uint32_t mask;
   6186 		struct wm_queue *wmq;
   6187 
   6188 		switch (sc->sc_type) {
   6189 		case WM_T_82574:
   6190 			mask = 0;
   6191 			for (i = 0; i < sc->sc_nqueues; i++) {
   6192 				wmq = &sc->sc_queue[i];
   6193 				mask |= ICR_TXQ(wmq->wmq_id);
   6194 				mask |= ICR_RXQ(wmq->wmq_id);
   6195 			}
   6196 			mask |= ICR_OTHER;
   6197 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6198 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6199 			break;
   6200 		default:
   6201 			if (sc->sc_type == WM_T_82575) {
   6202 				mask = 0;
   6203 				for (i = 0; i < sc->sc_nqueues; i++) {
   6204 					wmq = &sc->sc_queue[i];
   6205 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6206 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6207 				}
   6208 				mask |= EITR_OTHER;
   6209 			} else {
   6210 				mask = 0;
   6211 				for (i = 0; i < sc->sc_nqueues; i++) {
   6212 					wmq = &sc->sc_queue[i];
   6213 					mask |= 1 << wmq->wmq_intr_idx;
   6214 				}
   6215 				mask |= 1 << sc->sc_link_intr_idx;
   6216 			}
   6217 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6218 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6219 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6220 
   6221 			/* For other interrupts */
   6222 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6223 			break;
   6224 		}
   6225 	} else {
   6226 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6227 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6228 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6229 	}
   6230 
   6231 	/* Set up the inter-packet gap. */
   6232 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6233 
   6234 	if (sc->sc_type >= WM_T_82543) {
   6235 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6236 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6237 			wm_itrs_writereg(sc, wmq);
   6238 		}
   6239 		/*
   6240 		 * Link interrupts occur much less than TX
   6241 		 * interrupts and RX interrupts. So, we don't
   6242 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6243 		 * FreeBSD's if_igb.
   6244 		 */
   6245 	}
   6246 
   6247 	/* Set the VLAN ethernetype. */
   6248 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6249 
   6250 	/*
   6251 	 * Set up the transmit control register; we start out with
   6252 	 * a collision distance suitable for FDX, but update it whe
   6253 	 * we resolve the media type.
   6254 	 */
   6255 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6256 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6257 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6258 	if (sc->sc_type >= WM_T_82571)
   6259 		sc->sc_tctl |= TCTL_MULR;
   6260 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6261 
   6262 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6263 		/* Write TDT after TCTL.EN is set. See the document. */
   6264 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6265 	}
   6266 
   6267 	if (sc->sc_type == WM_T_80003) {
   6268 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6269 		reg &= ~TCTL_EXT_GCEX_MASK;
   6270 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6271 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6272 	}
   6273 
   6274 	/* Set the media. */
   6275 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6276 		goto out;
   6277 
   6278 	/* Configure for OS presence */
   6279 	wm_init_manageability(sc);
   6280 
   6281 	/*
   6282 	 * Set up the receive control register; we actually program the
   6283 	 * register when we set the receive filter. Use multicast address
   6284 	 * offset type 0.
   6285 	 *
   6286 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6287 	 * don't enable that feature.
   6288 	 */
   6289 	sc->sc_mchash_type = 0;
   6290 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6291 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6292 
   6293 	/* 82574 use one buffer extended Rx descriptor. */
   6294 	if (sc->sc_type == WM_T_82574)
   6295 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6296 
   6297 	/*
   6298 	 * The I350 has a bug where it always strips the CRC whether
   6299 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6300 	 */
   6301 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6302 	    || (sc->sc_type == WM_T_I210))
   6303 		sc->sc_rctl |= RCTL_SECRC;
   6304 
   6305 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6306 	    && (ifp->if_mtu > ETHERMTU)) {
   6307 		sc->sc_rctl |= RCTL_LPE;
   6308 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6309 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6310 	}
   6311 
   6312 	if (MCLBYTES == 2048)
   6313 		sc->sc_rctl |= RCTL_2k;
   6314 	else {
   6315 		if (sc->sc_type >= WM_T_82543) {
   6316 			switch (MCLBYTES) {
   6317 			case 4096:
   6318 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6319 				break;
   6320 			case 8192:
   6321 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6322 				break;
   6323 			case 16384:
   6324 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6325 				break;
   6326 			default:
   6327 				panic("wm_init: MCLBYTES %d unsupported",
   6328 				    MCLBYTES);
   6329 				break;
   6330 			}
   6331 		} else
   6332 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6333 	}
   6334 
   6335 	/* Enable ECC */
   6336 	switch (sc->sc_type) {
   6337 	case WM_T_82571:
   6338 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6339 		reg |= PBA_ECC_CORR_EN;
   6340 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6341 		break;
   6342 	case WM_T_PCH_LPT:
   6343 	case WM_T_PCH_SPT:
   6344 	case WM_T_PCH_CNP:
   6345 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6346 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6347 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6348 
   6349 		sc->sc_ctrl |= CTRL_MEHE;
   6350 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6351 		break;
   6352 	default:
   6353 		break;
   6354 	}
   6355 
   6356 	/*
   6357 	 * Set the receive filter.
   6358 	 *
   6359 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6360 	 * the setting of RCTL.EN in wm_set_filter()
   6361 	 */
   6362 	wm_set_filter(sc);
   6363 
   6364 	/* On 575 and later set RDT only if RX enabled */
   6365 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6366 		int qidx;
   6367 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6368 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6369 			for (i = 0; i < WM_NRXDESC; i++) {
   6370 				mutex_enter(rxq->rxq_lock);
   6371 				wm_init_rxdesc(rxq, i);
   6372 				mutex_exit(rxq->rxq_lock);
   6373 
   6374 			}
   6375 		}
   6376 	}
   6377 
   6378 	wm_unset_stopping_flags(sc);
   6379 
   6380 	/* Start the one second link check clock. */
   6381 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   6382 
   6383 	/* ...all done! */
   6384 	ifp->if_flags |= IFF_RUNNING;
   6385 	ifp->if_flags &= ~IFF_OACTIVE;
   6386 
   6387  out:
   6388 	/* Save last flags for the callback */
   6389 	sc->sc_if_flags = ifp->if_flags;
   6390 	sc->sc_ec_capenable = ec->ec_capenable;
   6391 	if (error)
   6392 		log(LOG_ERR, "%s: interface not running\n",
   6393 		    device_xname(sc->sc_dev));
   6394 	return error;
   6395 }
   6396 
   6397 /*
   6398  * wm_stop:		[ifnet interface function]
   6399  *
   6400  *	Stop transmission on the interface.
   6401  */
   6402 static void
   6403 wm_stop(struct ifnet *ifp, int disable)
   6404 {
   6405 	struct wm_softc *sc = ifp->if_softc;
   6406 
   6407 	WM_CORE_LOCK(sc);
   6408 	wm_stop_locked(ifp, disable);
   6409 	WM_CORE_UNLOCK(sc);
   6410 }
   6411 
   6412 static void
   6413 wm_stop_locked(struct ifnet *ifp, int disable)
   6414 {
   6415 	struct wm_softc *sc = ifp->if_softc;
   6416 	struct wm_txsoft *txs;
   6417 	int i, qidx;
   6418 
   6419 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6420 		device_xname(sc->sc_dev), __func__));
   6421 	KASSERT(WM_CORE_LOCKED(sc));
   6422 
   6423 	wm_set_stopping_flags(sc);
   6424 
   6425 	/* Stop the one second clock. */
   6426 	callout_stop(&sc->sc_tick_ch);
   6427 
   6428 	/* Stop the 82547 Tx FIFO stall check timer. */
   6429 	if (sc->sc_type == WM_T_82547)
   6430 		callout_stop(&sc->sc_txfifo_ch);
   6431 
   6432 	if (sc->sc_flags & WM_F_HAS_MII) {
   6433 		/* Down the MII. */
   6434 		mii_down(&sc->sc_mii);
   6435 	} else {
   6436 #if 0
   6437 		/* Should we clear PHY's status properly? */
   6438 		wm_reset(sc);
   6439 #endif
   6440 	}
   6441 
   6442 	/* Stop the transmit and receive processes. */
   6443 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6444 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6445 	sc->sc_rctl &= ~RCTL_EN;
   6446 
   6447 	/*
   6448 	 * Clear the interrupt mask to ensure the device cannot assert its
   6449 	 * interrupt line.
   6450 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6451 	 * service any currently pending or shared interrupt.
   6452 	 */
   6453 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6454 	sc->sc_icr = 0;
   6455 	if (wm_is_using_msix(sc)) {
   6456 		if (sc->sc_type != WM_T_82574) {
   6457 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6458 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6459 		} else
   6460 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6461 	}
   6462 
   6463 	/* Release any queued transmit buffers. */
   6464 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6465 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6466 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6467 		mutex_enter(txq->txq_lock);
   6468 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6469 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6470 			txs = &txq->txq_soft[i];
   6471 			if (txs->txs_mbuf != NULL) {
   6472 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6473 				m_freem(txs->txs_mbuf);
   6474 				txs->txs_mbuf = NULL;
   6475 			}
   6476 		}
   6477 		mutex_exit(txq->txq_lock);
   6478 	}
   6479 
   6480 	/* Mark the interface as down and cancel the watchdog timer. */
   6481 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6482 
   6483 	if (disable) {
   6484 		for (i = 0; i < sc->sc_nqueues; i++) {
   6485 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6486 			mutex_enter(rxq->rxq_lock);
   6487 			wm_rxdrain(rxq);
   6488 			mutex_exit(rxq->rxq_lock);
   6489 		}
   6490 	}
   6491 
   6492 #if 0 /* notyet */
   6493 	if (sc->sc_type >= WM_T_82544)
   6494 		CSR_WRITE(sc, WMREG_WUC, 0);
   6495 #endif
   6496 }
   6497 
   6498 static void
   6499 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6500 {
   6501 	struct mbuf *m;
   6502 	int i;
   6503 
   6504 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6505 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6506 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6507 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6508 		    m->m_data, m->m_len, m->m_flags);
   6509 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6510 	    i, i == 1 ? "" : "s");
   6511 }
   6512 
   6513 /*
   6514  * wm_82547_txfifo_stall:
   6515  *
   6516  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6517  *	reset the FIFO pointers, and restart packet transmission.
   6518  */
   6519 static void
   6520 wm_82547_txfifo_stall(void *arg)
   6521 {
   6522 	struct wm_softc *sc = arg;
   6523 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6524 
   6525 	mutex_enter(txq->txq_lock);
   6526 
   6527 	if (txq->txq_stopping)
   6528 		goto out;
   6529 
   6530 	if (txq->txq_fifo_stall) {
   6531 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6532 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6533 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6534 			/*
   6535 			 * Packets have drained.  Stop transmitter, reset
   6536 			 * FIFO pointers, restart transmitter, and kick
   6537 			 * the packet queue.
   6538 			 */
   6539 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6540 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6541 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6542 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6543 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6544 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6545 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6546 			CSR_WRITE_FLUSH(sc);
   6547 
   6548 			txq->txq_fifo_head = 0;
   6549 			txq->txq_fifo_stall = 0;
   6550 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6551 		} else {
   6552 			/*
   6553 			 * Still waiting for packets to drain; try again in
   6554 			 * another tick.
   6555 			 */
   6556 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6557 		}
   6558 	}
   6559 
   6560 out:
   6561 	mutex_exit(txq->txq_lock);
   6562 }
   6563 
   6564 /*
   6565  * wm_82547_txfifo_bugchk:
   6566  *
   6567  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6568  *	prevent enqueueing a packet that would wrap around the end
   6569  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6570  *
   6571  *	We do this by checking the amount of space before the end
   6572  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6573  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6574  *	the internal FIFO pointers to the beginning, and restart
   6575  *	transmission on the interface.
   6576  */
   6577 #define	WM_FIFO_HDR		0x10
   6578 #define	WM_82547_PAD_LEN	0x3e0
   6579 static int
   6580 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6581 {
   6582 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6583 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6584 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6585 
   6586 	/* Just return if already stalled. */
   6587 	if (txq->txq_fifo_stall)
   6588 		return 1;
   6589 
   6590 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6591 		/* Stall only occurs in half-duplex mode. */
   6592 		goto send_packet;
   6593 	}
   6594 
   6595 	if (len >= WM_82547_PAD_LEN + space) {
   6596 		txq->txq_fifo_stall = 1;
   6597 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6598 		return 1;
   6599 	}
   6600 
   6601  send_packet:
   6602 	txq->txq_fifo_head += len;
   6603 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6604 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6605 
   6606 	return 0;
   6607 }
   6608 
   6609 static int
   6610 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6611 {
   6612 	int error;
   6613 
   6614 	/*
   6615 	 * Allocate the control data structures, and create and load the
   6616 	 * DMA map for it.
   6617 	 *
   6618 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6619 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6620 	 * both sets within the same 4G segment.
   6621 	 */
   6622 	if (sc->sc_type < WM_T_82544)
   6623 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6624 	else
   6625 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6626 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6627 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6628 	else
   6629 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6630 
   6631 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6632 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6633 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6634 		aprint_error_dev(sc->sc_dev,
   6635 		    "unable to allocate TX control data, error = %d\n",
   6636 		    error);
   6637 		goto fail_0;
   6638 	}
   6639 
   6640 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6641 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6642 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6643 		aprint_error_dev(sc->sc_dev,
   6644 		    "unable to map TX control data, error = %d\n", error);
   6645 		goto fail_1;
   6646 	}
   6647 
   6648 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6649 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6650 		aprint_error_dev(sc->sc_dev,
   6651 		    "unable to create TX control data DMA map, error = %d\n",
   6652 		    error);
   6653 		goto fail_2;
   6654 	}
   6655 
   6656 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6657 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6658 		aprint_error_dev(sc->sc_dev,
   6659 		    "unable to load TX control data DMA map, error = %d\n",
   6660 		    error);
   6661 		goto fail_3;
   6662 	}
   6663 
   6664 	return 0;
   6665 
   6666  fail_3:
   6667 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6668  fail_2:
   6669 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6670 	    WM_TXDESCS_SIZE(txq));
   6671  fail_1:
   6672 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6673  fail_0:
   6674 	return error;
   6675 }
   6676 
   6677 static void
   6678 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6679 {
   6680 
   6681 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6682 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6683 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6684 	    WM_TXDESCS_SIZE(txq));
   6685 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6686 }
   6687 
   6688 static int
   6689 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6690 {
   6691 	int error;
   6692 	size_t rxq_descs_size;
   6693 
   6694 	/*
   6695 	 * Allocate the control data structures, and create and load the
   6696 	 * DMA map for it.
   6697 	 *
   6698 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6699 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6700 	 * both sets within the same 4G segment.
   6701 	 */
   6702 	rxq->rxq_ndesc = WM_NRXDESC;
   6703 	if (sc->sc_type == WM_T_82574)
   6704 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6705 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6706 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6707 	else
   6708 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6709 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6710 
   6711 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6712 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6713 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6714 		aprint_error_dev(sc->sc_dev,
   6715 		    "unable to allocate RX control data, error = %d\n",
   6716 		    error);
   6717 		goto fail_0;
   6718 	}
   6719 
   6720 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6721 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6722 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6723 		aprint_error_dev(sc->sc_dev,
   6724 		    "unable to map RX control data, error = %d\n", error);
   6725 		goto fail_1;
   6726 	}
   6727 
   6728 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6729 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6730 		aprint_error_dev(sc->sc_dev,
   6731 		    "unable to create RX control data DMA map, error = %d\n",
   6732 		    error);
   6733 		goto fail_2;
   6734 	}
   6735 
   6736 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6737 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6738 		aprint_error_dev(sc->sc_dev,
   6739 		    "unable to load RX control data DMA map, error = %d\n",
   6740 		    error);
   6741 		goto fail_3;
   6742 	}
   6743 
   6744 	return 0;
   6745 
   6746  fail_3:
   6747 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6748  fail_2:
   6749 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6750 	    rxq_descs_size);
   6751  fail_1:
   6752 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6753  fail_0:
   6754 	return error;
   6755 }
   6756 
   6757 static void
   6758 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6759 {
   6760 
   6761 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6762 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6763 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6764 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6765 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6766 }
   6767 
   6768 
   6769 static int
   6770 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6771 {
   6772 	int i, error;
   6773 
   6774 	/* Create the transmit buffer DMA maps. */
   6775 	WM_TXQUEUELEN(txq) =
   6776 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6777 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6778 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6779 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6780 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6781 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6782 			aprint_error_dev(sc->sc_dev,
   6783 			    "unable to create Tx DMA map %d, error = %d\n",
   6784 			    i, error);
   6785 			goto fail;
   6786 		}
   6787 	}
   6788 
   6789 	return 0;
   6790 
   6791  fail:
   6792 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6793 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6794 			bus_dmamap_destroy(sc->sc_dmat,
   6795 			    txq->txq_soft[i].txs_dmamap);
   6796 	}
   6797 	return error;
   6798 }
   6799 
   6800 static void
   6801 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6802 {
   6803 	int i;
   6804 
   6805 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6806 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6807 			bus_dmamap_destroy(sc->sc_dmat,
   6808 			    txq->txq_soft[i].txs_dmamap);
   6809 	}
   6810 }
   6811 
   6812 static int
   6813 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6814 {
   6815 	int i, error;
   6816 
   6817 	/* Create the receive buffer DMA maps. */
   6818 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6819 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6820 			    MCLBYTES, 0, 0,
   6821 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6822 			aprint_error_dev(sc->sc_dev,
   6823 			    "unable to create Rx DMA map %d error = %d\n",
   6824 			    i, error);
   6825 			goto fail;
   6826 		}
   6827 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6828 	}
   6829 
   6830 	return 0;
   6831 
   6832  fail:
   6833 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6834 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6835 			bus_dmamap_destroy(sc->sc_dmat,
   6836 			    rxq->rxq_soft[i].rxs_dmamap);
   6837 	}
   6838 	return error;
   6839 }
   6840 
   6841 static void
   6842 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6843 {
   6844 	int i;
   6845 
   6846 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6847 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6848 			bus_dmamap_destroy(sc->sc_dmat,
   6849 			    rxq->rxq_soft[i].rxs_dmamap);
   6850 	}
   6851 }
   6852 
   6853 /*
   6854  * wm_alloc_quques:
   6855  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6856  */
   6857 static int
   6858 wm_alloc_txrx_queues(struct wm_softc *sc)
   6859 {
   6860 	int i, error, tx_done, rx_done;
   6861 
   6862 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6863 	    KM_SLEEP);
   6864 	if (sc->sc_queue == NULL) {
   6865 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6866 		error = ENOMEM;
   6867 		goto fail_0;
   6868 	}
   6869 
   6870 	/* For transmission */
   6871 	error = 0;
   6872 	tx_done = 0;
   6873 	for (i = 0; i < sc->sc_nqueues; i++) {
   6874 #ifdef WM_EVENT_COUNTERS
   6875 		int j;
   6876 		const char *xname;
   6877 #endif
   6878 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6879 		txq->txq_sc = sc;
   6880 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6881 
   6882 		error = wm_alloc_tx_descs(sc, txq);
   6883 		if (error)
   6884 			break;
   6885 		error = wm_alloc_tx_buffer(sc, txq);
   6886 		if (error) {
   6887 			wm_free_tx_descs(sc, txq);
   6888 			break;
   6889 		}
   6890 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6891 		if (txq->txq_interq == NULL) {
   6892 			wm_free_tx_descs(sc, txq);
   6893 			wm_free_tx_buffer(sc, txq);
   6894 			error = ENOMEM;
   6895 			break;
   6896 		}
   6897 
   6898 #ifdef WM_EVENT_COUNTERS
   6899 		xname = device_xname(sc->sc_dev);
   6900 
   6901 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6902 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6903 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6904 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6905 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6906 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6907 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6908 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6909 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6910 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6911 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6912 
   6913 		for (j = 0; j < WM_NTXSEGS; j++) {
   6914 			snprintf(txq->txq_txseg_evcnt_names[j],
   6915 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6916 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6917 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6918 		}
   6919 
   6920 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6921 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6922 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6923 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6924 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6925 #endif /* WM_EVENT_COUNTERS */
   6926 
   6927 		tx_done++;
   6928 	}
   6929 	if (error)
   6930 		goto fail_1;
   6931 
   6932 	/* For receive */
   6933 	error = 0;
   6934 	rx_done = 0;
   6935 	for (i = 0; i < sc->sc_nqueues; i++) {
   6936 #ifdef WM_EVENT_COUNTERS
   6937 		const char *xname;
   6938 #endif
   6939 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6940 		rxq->rxq_sc = sc;
   6941 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6942 
   6943 		error = wm_alloc_rx_descs(sc, rxq);
   6944 		if (error)
   6945 			break;
   6946 
   6947 		error = wm_alloc_rx_buffer(sc, rxq);
   6948 		if (error) {
   6949 			wm_free_rx_descs(sc, rxq);
   6950 			break;
   6951 		}
   6952 
   6953 #ifdef WM_EVENT_COUNTERS
   6954 		xname = device_xname(sc->sc_dev);
   6955 
   6956 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6957 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6958 
   6959 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6960 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6961 #endif /* WM_EVENT_COUNTERS */
   6962 
   6963 		rx_done++;
   6964 	}
   6965 	if (error)
   6966 		goto fail_2;
   6967 
   6968 	for (i = 0; i < sc->sc_nqueues; i++) {
   6969 		char rndname[16];
   6970 
   6971 		snprintf(rndname, sizeof(rndname), "%sTXRX%d",
   6972 		    device_xname(sc->sc_dev), i);
   6973 		rnd_attach_source(&sc->sc_queue[i].rnd_source, rndname,
   6974 		    RND_TYPE_NET, RND_FLAG_DEFAULT);
   6975 	}
   6976 
   6977 	return 0;
   6978 
   6979  fail_2:
   6980 	for (i = 0; i < rx_done; i++) {
   6981 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6982 		wm_free_rx_buffer(sc, rxq);
   6983 		wm_free_rx_descs(sc, rxq);
   6984 		if (rxq->rxq_lock)
   6985 			mutex_obj_free(rxq->rxq_lock);
   6986 	}
   6987  fail_1:
   6988 	for (i = 0; i < tx_done; i++) {
   6989 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6990 		pcq_destroy(txq->txq_interq);
   6991 		wm_free_tx_buffer(sc, txq);
   6992 		wm_free_tx_descs(sc, txq);
   6993 		if (txq->txq_lock)
   6994 			mutex_obj_free(txq->txq_lock);
   6995 	}
   6996 
   6997 	kmem_free(sc->sc_queue,
   6998 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6999  fail_0:
   7000 	return error;
   7001 }
   7002 
   7003 /*
   7004  * wm_free_quques:
   7005  *	Free {tx,rx}descs and {tx,rx} buffers
   7006  */
   7007 static void
   7008 wm_free_txrx_queues(struct wm_softc *sc)
   7009 {
   7010 	int i;
   7011 
   7012 	for (i = 0; i < sc->sc_nqueues; i++)
   7013 		rnd_detach_source(&sc->sc_queue[i].rnd_source);
   7014 
   7015 	for (i = 0; i < sc->sc_nqueues; i++) {
   7016 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7017 
   7018 #ifdef WM_EVENT_COUNTERS
   7019 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7020 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7021 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7022 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7023 #endif /* WM_EVENT_COUNTERS */
   7024 
   7025 		wm_free_rx_buffer(sc, rxq);
   7026 		wm_free_rx_descs(sc, rxq);
   7027 		if (rxq->rxq_lock)
   7028 			mutex_obj_free(rxq->rxq_lock);
   7029 	}
   7030 
   7031 	for (i = 0; i < sc->sc_nqueues; i++) {
   7032 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7033 		struct mbuf *m;
   7034 #ifdef WM_EVENT_COUNTERS
   7035 		int j;
   7036 
   7037 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7038 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7039 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7040 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7041 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7042 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7043 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7044 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7045 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7046 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7047 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7048 
   7049 		for (j = 0; j < WM_NTXSEGS; j++)
   7050 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7051 
   7052 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7053 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7054 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7055 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7056 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7057 #endif /* WM_EVENT_COUNTERS */
   7058 
   7059 		/* Drain txq_interq */
   7060 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7061 			m_freem(m);
   7062 		pcq_destroy(txq->txq_interq);
   7063 
   7064 		wm_free_tx_buffer(sc, txq);
   7065 		wm_free_tx_descs(sc, txq);
   7066 		if (txq->txq_lock)
   7067 			mutex_obj_free(txq->txq_lock);
   7068 	}
   7069 
   7070 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7071 }
   7072 
   7073 static void
   7074 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7075 {
   7076 
   7077 	KASSERT(mutex_owned(txq->txq_lock));
   7078 
   7079 	/* Initialize the transmit descriptor ring. */
   7080 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7081 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7082 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7083 	txq->txq_free = WM_NTXDESC(txq);
   7084 	txq->txq_next = 0;
   7085 }
   7086 
   7087 static void
   7088 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7089     struct wm_txqueue *txq)
   7090 {
   7091 
   7092 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7093 		device_xname(sc->sc_dev), __func__));
   7094 	KASSERT(mutex_owned(txq->txq_lock));
   7095 
   7096 	if (sc->sc_type < WM_T_82543) {
   7097 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7098 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7099 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7100 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7101 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7102 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7103 	} else {
   7104 		int qid = wmq->wmq_id;
   7105 
   7106 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7107 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7108 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7109 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7110 
   7111 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7112 			/*
   7113 			 * Don't write TDT before TCTL.EN is set.
   7114 			 * See the document.
   7115 			 */
   7116 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7117 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7118 			    | TXDCTL_WTHRESH(0));
   7119 		else {
   7120 			/* XXX should update with AIM? */
   7121 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7122 			if (sc->sc_type >= WM_T_82540) {
   7123 				/* Should be the same */
   7124 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7125 			}
   7126 
   7127 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7128 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7129 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7130 		}
   7131 	}
   7132 }
   7133 
   7134 static void
   7135 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7136 {
   7137 	int i;
   7138 
   7139 	KASSERT(mutex_owned(txq->txq_lock));
   7140 
   7141 	/* Initialize the transmit job descriptors. */
   7142 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7143 		txq->txq_soft[i].txs_mbuf = NULL;
   7144 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7145 	txq->txq_snext = 0;
   7146 	txq->txq_sdirty = 0;
   7147 }
   7148 
   7149 static void
   7150 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7151     struct wm_txqueue *txq)
   7152 {
   7153 
   7154 	KASSERT(mutex_owned(txq->txq_lock));
   7155 
   7156 	/*
   7157 	 * Set up some register offsets that are different between
   7158 	 * the i82542 and the i82543 and later chips.
   7159 	 */
   7160 	if (sc->sc_type < WM_T_82543)
   7161 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7162 	else
   7163 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7164 
   7165 	wm_init_tx_descs(sc, txq);
   7166 	wm_init_tx_regs(sc, wmq, txq);
   7167 	wm_init_tx_buffer(sc, txq);
   7168 
   7169 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7170 	txq->txq_sending = false;
   7171 }
   7172 
   7173 static void
   7174 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7175     struct wm_rxqueue *rxq)
   7176 {
   7177 
   7178 	KASSERT(mutex_owned(rxq->rxq_lock));
   7179 
   7180 	/*
   7181 	 * Initialize the receive descriptor and receive job
   7182 	 * descriptor rings.
   7183 	 */
   7184 	if (sc->sc_type < WM_T_82543) {
   7185 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7186 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7187 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7188 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7189 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7190 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7191 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7192 
   7193 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7194 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7195 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7196 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7197 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7198 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7199 	} else {
   7200 		int qid = wmq->wmq_id;
   7201 
   7202 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7203 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7204 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7205 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7206 
   7207 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7208 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7209 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7210 
   7211 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7212 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7213 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7214 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7215 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7216 			    | RXDCTL_WTHRESH(1));
   7217 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7218 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7219 		} else {
   7220 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7221 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7222 			/* XXX should update with AIM? */
   7223 			CSR_WRITE(sc, WMREG_RDTR,
   7224 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7225 			/* MUST be same */
   7226 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7227 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7228 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7229 		}
   7230 	}
   7231 }
   7232 
   7233 static int
   7234 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7235 {
   7236 	struct wm_rxsoft *rxs;
   7237 	int error, i;
   7238 
   7239 	KASSERT(mutex_owned(rxq->rxq_lock));
   7240 
   7241 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7242 		rxs = &rxq->rxq_soft[i];
   7243 		if (rxs->rxs_mbuf == NULL) {
   7244 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7245 				log(LOG_ERR, "%s: unable to allocate or map "
   7246 				    "rx buffer %d, error = %d\n",
   7247 				    device_xname(sc->sc_dev), i, error);
   7248 				/*
   7249 				 * XXX Should attempt to run with fewer receive
   7250 				 * XXX buffers instead of just failing.
   7251 				 */
   7252 				wm_rxdrain(rxq);
   7253 				return ENOMEM;
   7254 			}
   7255 		} else {
   7256 			/*
   7257 			 * For 82575 and 82576, the RX descriptors must be
   7258 			 * initialized after the setting of RCTL.EN in
   7259 			 * wm_set_filter()
   7260 			 */
   7261 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7262 				wm_init_rxdesc(rxq, i);
   7263 		}
   7264 	}
   7265 	rxq->rxq_ptr = 0;
   7266 	rxq->rxq_discard = 0;
   7267 	WM_RXCHAIN_RESET(rxq);
   7268 
   7269 	return 0;
   7270 }
   7271 
   7272 static int
   7273 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7274     struct wm_rxqueue *rxq)
   7275 {
   7276 
   7277 	KASSERT(mutex_owned(rxq->rxq_lock));
   7278 
   7279 	/*
   7280 	 * Set up some register offsets that are different between
   7281 	 * the i82542 and the i82543 and later chips.
   7282 	 */
   7283 	if (sc->sc_type < WM_T_82543)
   7284 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7285 	else
   7286 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7287 
   7288 	wm_init_rx_regs(sc, wmq, rxq);
   7289 	return wm_init_rx_buffer(sc, rxq);
   7290 }
   7291 
   7292 /*
   7293  * wm_init_quques:
   7294  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7295  */
   7296 static int
   7297 wm_init_txrx_queues(struct wm_softc *sc)
   7298 {
   7299 	int i, error = 0;
   7300 
   7301 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7302 		device_xname(sc->sc_dev), __func__));
   7303 
   7304 	for (i = 0; i < sc->sc_nqueues; i++) {
   7305 		struct wm_queue *wmq = &sc->sc_queue[i];
   7306 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7307 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7308 
   7309 		/*
   7310 		 * TODO
   7311 		 * Currently, use constant variable instead of AIM.
   7312 		 * Furthermore, the interrupt interval of multiqueue which use
   7313 		 * polling mode is less than default value.
   7314 		 * More tuning and AIM are required.
   7315 		 */
   7316 		if (wm_is_using_multiqueue(sc))
   7317 			wmq->wmq_itr = 50;
   7318 		else
   7319 			wmq->wmq_itr = sc->sc_itr_init;
   7320 		wmq->wmq_set_itr = true;
   7321 
   7322 		mutex_enter(txq->txq_lock);
   7323 		wm_init_tx_queue(sc, wmq, txq);
   7324 		mutex_exit(txq->txq_lock);
   7325 
   7326 		mutex_enter(rxq->rxq_lock);
   7327 		error = wm_init_rx_queue(sc, wmq, rxq);
   7328 		mutex_exit(rxq->rxq_lock);
   7329 		if (error)
   7330 			break;
   7331 	}
   7332 
   7333 	return error;
   7334 }
   7335 
   7336 /*
   7337  * wm_tx_offload:
   7338  *
   7339  *	Set up TCP/IP checksumming parameters for the
   7340  *	specified packet.
   7341  */
   7342 static int
   7343 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7344     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7345 {
   7346 	struct mbuf *m0 = txs->txs_mbuf;
   7347 	struct livengood_tcpip_ctxdesc *t;
   7348 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7349 	uint32_t ipcse;
   7350 	struct ether_header *eh;
   7351 	int offset, iphl;
   7352 	uint8_t fields;
   7353 
   7354 	/*
   7355 	 * XXX It would be nice if the mbuf pkthdr had offset
   7356 	 * fields for the protocol headers.
   7357 	 */
   7358 
   7359 	eh = mtod(m0, struct ether_header *);
   7360 	switch (htons(eh->ether_type)) {
   7361 	case ETHERTYPE_IP:
   7362 	case ETHERTYPE_IPV6:
   7363 		offset = ETHER_HDR_LEN;
   7364 		break;
   7365 
   7366 	case ETHERTYPE_VLAN:
   7367 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7368 		break;
   7369 
   7370 	default:
   7371 		/* Don't support this protocol or encapsulation. */
   7372 		*fieldsp = 0;
   7373 		*cmdp = 0;
   7374 		return 0;
   7375 	}
   7376 
   7377 	if ((m0->m_pkthdr.csum_flags &
   7378 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7379 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7380 	} else
   7381 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7382 
   7383 	ipcse = offset + iphl - 1;
   7384 
   7385 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7386 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7387 	seg = 0;
   7388 	fields = 0;
   7389 
   7390 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7391 		int hlen = offset + iphl;
   7392 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7393 
   7394 		if (__predict_false(m0->m_len <
   7395 				    (hlen + sizeof(struct tcphdr)))) {
   7396 			/*
   7397 			 * TCP/IP headers are not in the first mbuf; we need
   7398 			 * to do this the slow and painful way. Let's just
   7399 			 * hope this doesn't happen very often.
   7400 			 */
   7401 			struct tcphdr th;
   7402 
   7403 			WM_Q_EVCNT_INCR(txq, tsopain);
   7404 
   7405 			m_copydata(m0, hlen, sizeof(th), &th);
   7406 			if (v4) {
   7407 				struct ip ip;
   7408 
   7409 				m_copydata(m0, offset, sizeof(ip), &ip);
   7410 				ip.ip_len = 0;
   7411 				m_copyback(m0,
   7412 				    offset + offsetof(struct ip, ip_len),
   7413 				    sizeof(ip.ip_len), &ip.ip_len);
   7414 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7415 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7416 			} else {
   7417 				struct ip6_hdr ip6;
   7418 
   7419 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7420 				ip6.ip6_plen = 0;
   7421 				m_copyback(m0,
   7422 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7423 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7424 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7425 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7426 			}
   7427 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7428 			    sizeof(th.th_sum), &th.th_sum);
   7429 
   7430 			hlen += th.th_off << 2;
   7431 		} else {
   7432 			/*
   7433 			 * TCP/IP headers are in the first mbuf; we can do
   7434 			 * this the easy way.
   7435 			 */
   7436 			struct tcphdr *th;
   7437 
   7438 			if (v4) {
   7439 				struct ip *ip =
   7440 				    (void *)(mtod(m0, char *) + offset);
   7441 				th = (void *)(mtod(m0, char *) + hlen);
   7442 
   7443 				ip->ip_len = 0;
   7444 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7445 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7446 			} else {
   7447 				struct ip6_hdr *ip6 =
   7448 				    (void *)(mtod(m0, char *) + offset);
   7449 				th = (void *)(mtod(m0, char *) + hlen);
   7450 
   7451 				ip6->ip6_plen = 0;
   7452 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7453 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7454 			}
   7455 			hlen += th->th_off << 2;
   7456 		}
   7457 
   7458 		if (v4) {
   7459 			WM_Q_EVCNT_INCR(txq, tso);
   7460 			cmdlen |= WTX_TCPIP_CMD_IP;
   7461 		} else {
   7462 			WM_Q_EVCNT_INCR(txq, tso6);
   7463 			ipcse = 0;
   7464 		}
   7465 		cmd |= WTX_TCPIP_CMD_TSE;
   7466 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7467 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7468 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7469 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7470 	}
   7471 
   7472 	/*
   7473 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7474 	 * offload feature, if we load the context descriptor, we
   7475 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7476 	 */
   7477 
   7478 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7479 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7480 	    WTX_TCPIP_IPCSE(ipcse);
   7481 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7482 		WM_Q_EVCNT_INCR(txq, ipsum);
   7483 		fields |= WTX_IXSM;
   7484 	}
   7485 
   7486 	offset += iphl;
   7487 
   7488 	if (m0->m_pkthdr.csum_flags &
   7489 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7490 		WM_Q_EVCNT_INCR(txq, tusum);
   7491 		fields |= WTX_TXSM;
   7492 		tucs = WTX_TCPIP_TUCSS(offset) |
   7493 		    WTX_TCPIP_TUCSO(offset +
   7494 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7495 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7496 	} else if ((m0->m_pkthdr.csum_flags &
   7497 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7498 		WM_Q_EVCNT_INCR(txq, tusum6);
   7499 		fields |= WTX_TXSM;
   7500 		tucs = WTX_TCPIP_TUCSS(offset) |
   7501 		    WTX_TCPIP_TUCSO(offset +
   7502 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7503 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7504 	} else {
   7505 		/* Just initialize it to a valid TCP context. */
   7506 		tucs = WTX_TCPIP_TUCSS(offset) |
   7507 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7508 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7509 	}
   7510 
   7511 	/*
   7512 	 * We don't have to write context descriptor for every packet
   7513 	 * except for 82574. For 82574, we must write context descriptor
   7514 	 * for every packet when we use two descriptor queues.
   7515 	 * It would be overhead to write context descriptor for every packet,
   7516 	 * however it does not cause problems.
   7517 	 */
   7518 	/* Fill in the context descriptor. */
   7519 	t = (struct livengood_tcpip_ctxdesc *)
   7520 	    &txq->txq_descs[txq->txq_next];
   7521 	t->tcpip_ipcs = htole32(ipcs);
   7522 	t->tcpip_tucs = htole32(tucs);
   7523 	t->tcpip_cmdlen = htole32(cmdlen);
   7524 	t->tcpip_seg = htole32(seg);
   7525 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7526 
   7527 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7528 	txs->txs_ndesc++;
   7529 
   7530 	*cmdp = cmd;
   7531 	*fieldsp = fields;
   7532 
   7533 	return 0;
   7534 }
   7535 
   7536 static inline int
   7537 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7538 {
   7539 	struct wm_softc *sc = ifp->if_softc;
   7540 	u_int cpuid = cpu_index(curcpu());
   7541 
   7542 	/*
   7543 	 * Currently, simple distribute strategy.
   7544 	 * TODO:
   7545 	 * distribute by flowid(RSS has value).
   7546 	 */
   7547 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7548 }
   7549 
   7550 /*
   7551  * wm_start:		[ifnet interface function]
   7552  *
   7553  *	Start packet transmission on the interface.
   7554  */
   7555 static void
   7556 wm_start(struct ifnet *ifp)
   7557 {
   7558 	struct wm_softc *sc = ifp->if_softc;
   7559 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7560 
   7561 #ifdef WM_MPSAFE
   7562 	KASSERT(if_is_mpsafe(ifp));
   7563 #endif
   7564 	/*
   7565 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7566 	 */
   7567 
   7568 	mutex_enter(txq->txq_lock);
   7569 	if (!txq->txq_stopping)
   7570 		wm_start_locked(ifp);
   7571 	mutex_exit(txq->txq_lock);
   7572 }
   7573 
   7574 static void
   7575 wm_start_locked(struct ifnet *ifp)
   7576 {
   7577 	struct wm_softc *sc = ifp->if_softc;
   7578 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7579 
   7580 	wm_send_common_locked(ifp, txq, false);
   7581 }
   7582 
   7583 static int
   7584 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7585 {
   7586 	int qid;
   7587 	struct wm_softc *sc = ifp->if_softc;
   7588 	struct wm_txqueue *txq;
   7589 
   7590 	qid = wm_select_txqueue(ifp, m);
   7591 	txq = &sc->sc_queue[qid].wmq_txq;
   7592 
   7593 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7594 		m_freem(m);
   7595 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7596 		return ENOBUFS;
   7597 	}
   7598 
   7599 	/* XXX NOMPSAFE: ifp->if_data should be percpu. */
   7600 	ifp->if_obytes += m->m_pkthdr.len;
   7601 	if (m->m_flags & M_MCAST)
   7602 		ifp->if_omcasts++;
   7603 
   7604 	if (mutex_tryenter(txq->txq_lock)) {
   7605 		if (!txq->txq_stopping)
   7606 			wm_transmit_locked(ifp, txq);
   7607 		mutex_exit(txq->txq_lock);
   7608 	}
   7609 
   7610 	return 0;
   7611 }
   7612 
   7613 static void
   7614 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7615 {
   7616 
   7617 	wm_send_common_locked(ifp, txq, true);
   7618 }
   7619 
   7620 static void
   7621 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7622     bool is_transmit)
   7623 {
   7624 	struct wm_softc *sc = ifp->if_softc;
   7625 	struct mbuf *m0;
   7626 	struct wm_txsoft *txs;
   7627 	bus_dmamap_t dmamap;
   7628 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7629 	bus_addr_t curaddr;
   7630 	bus_size_t seglen, curlen;
   7631 	uint32_t cksumcmd;
   7632 	uint8_t cksumfields;
   7633 	bool remap = true;
   7634 
   7635 	KASSERT(mutex_owned(txq->txq_lock));
   7636 
   7637 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7638 		return;
   7639 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7640 		return;
   7641 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7642 		return;
   7643 
   7644 	/* Remember the previous number of free descriptors. */
   7645 	ofree = txq->txq_free;
   7646 
   7647 	/*
   7648 	 * Loop through the send queue, setting up transmit descriptors
   7649 	 * until we drain the queue, or use up all available transmit
   7650 	 * descriptors.
   7651 	 */
   7652 	for (;;) {
   7653 		m0 = NULL;
   7654 
   7655 		/* Get a work queue entry. */
   7656 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7657 			wm_txeof(txq, UINT_MAX);
   7658 			if (txq->txq_sfree == 0) {
   7659 				DPRINTF(WM_DEBUG_TX,
   7660 				    ("%s: TX: no free job descriptors\n",
   7661 					device_xname(sc->sc_dev)));
   7662 				WM_Q_EVCNT_INCR(txq, txsstall);
   7663 				break;
   7664 			}
   7665 		}
   7666 
   7667 		/* Grab a packet off the queue. */
   7668 		if (is_transmit)
   7669 			m0 = pcq_get(txq->txq_interq);
   7670 		else
   7671 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7672 		if (m0 == NULL)
   7673 			break;
   7674 
   7675 		DPRINTF(WM_DEBUG_TX,
   7676 		    ("%s: TX: have packet to transmit: %p\n",
   7677 			device_xname(sc->sc_dev), m0));
   7678 
   7679 		txs = &txq->txq_soft[txq->txq_snext];
   7680 		dmamap = txs->txs_dmamap;
   7681 
   7682 		use_tso = (m0->m_pkthdr.csum_flags &
   7683 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7684 
   7685 		/*
   7686 		 * So says the Linux driver:
   7687 		 * The controller does a simple calculation to make sure
   7688 		 * there is enough room in the FIFO before initiating the
   7689 		 * DMA for each buffer. The calc is:
   7690 		 *	4 = ceil(buffer len / MSS)
   7691 		 * To make sure we don't overrun the FIFO, adjust the max
   7692 		 * buffer len if the MSS drops.
   7693 		 */
   7694 		dmamap->dm_maxsegsz =
   7695 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7696 		    ? m0->m_pkthdr.segsz << 2
   7697 		    : WTX_MAX_LEN;
   7698 
   7699 		/*
   7700 		 * Load the DMA map.  If this fails, the packet either
   7701 		 * didn't fit in the allotted number of segments, or we
   7702 		 * were short on resources.  For the too-many-segments
   7703 		 * case, we simply report an error and drop the packet,
   7704 		 * since we can't sanely copy a jumbo packet to a single
   7705 		 * buffer.
   7706 		 */
   7707 retry:
   7708 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7709 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7710 		if (__predict_false(error)) {
   7711 			if (error == EFBIG) {
   7712 				if (remap == true) {
   7713 					struct mbuf *m;
   7714 
   7715 					remap = false;
   7716 					m = m_defrag(m0, M_NOWAIT);
   7717 					if (m != NULL) {
   7718 						WM_Q_EVCNT_INCR(txq, defrag);
   7719 						m0 = m;
   7720 						goto retry;
   7721 					}
   7722 				}
   7723 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7724 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7725 				    "DMA segments, dropping...\n",
   7726 				    device_xname(sc->sc_dev));
   7727 				wm_dump_mbuf_chain(sc, m0);
   7728 				m_freem(m0);
   7729 				continue;
   7730 			}
   7731 			/* Short on resources, just stop for now. */
   7732 			DPRINTF(WM_DEBUG_TX,
   7733 			    ("%s: TX: dmamap load failed: %d\n",
   7734 				device_xname(sc->sc_dev), error));
   7735 			break;
   7736 		}
   7737 
   7738 		segs_needed = dmamap->dm_nsegs;
   7739 		if (use_tso) {
   7740 			/* For sentinel descriptor; see below. */
   7741 			segs_needed++;
   7742 		}
   7743 
   7744 		/*
   7745 		 * Ensure we have enough descriptors free to describe
   7746 		 * the packet. Note, we always reserve one descriptor
   7747 		 * at the end of the ring due to the semantics of the
   7748 		 * TDT register, plus one more in the event we need
   7749 		 * to load offload context.
   7750 		 */
   7751 		if (segs_needed > txq->txq_free - 2) {
   7752 			/*
   7753 			 * Not enough free descriptors to transmit this
   7754 			 * packet.  We haven't committed anything yet,
   7755 			 * so just unload the DMA map, put the packet
   7756 			 * pack on the queue, and punt. Notify the upper
   7757 			 * layer that there are no more slots left.
   7758 			 */
   7759 			DPRINTF(WM_DEBUG_TX,
   7760 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7761 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7762 				segs_needed, txq->txq_free - 1));
   7763 			if (!is_transmit)
   7764 				ifp->if_flags |= IFF_OACTIVE;
   7765 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7766 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7767 			WM_Q_EVCNT_INCR(txq, txdstall);
   7768 			break;
   7769 		}
   7770 
   7771 		/*
   7772 		 * Check for 82547 Tx FIFO bug. We need to do this
   7773 		 * once we know we can transmit the packet, since we
   7774 		 * do some internal FIFO space accounting here.
   7775 		 */
   7776 		if (sc->sc_type == WM_T_82547 &&
   7777 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7778 			DPRINTF(WM_DEBUG_TX,
   7779 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7780 				device_xname(sc->sc_dev)));
   7781 			if (!is_transmit)
   7782 				ifp->if_flags |= IFF_OACTIVE;
   7783 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7784 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7785 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7786 			break;
   7787 		}
   7788 
   7789 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7790 
   7791 		DPRINTF(WM_DEBUG_TX,
   7792 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7793 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7794 
   7795 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7796 
   7797 		/*
   7798 		 * Store a pointer to the packet so that we can free it
   7799 		 * later.
   7800 		 *
   7801 		 * Initially, we consider the number of descriptors the
   7802 		 * packet uses the number of DMA segments.  This may be
   7803 		 * incremented by 1 if we do checksum offload (a descriptor
   7804 		 * is used to set the checksum context).
   7805 		 */
   7806 		txs->txs_mbuf = m0;
   7807 		txs->txs_firstdesc = txq->txq_next;
   7808 		txs->txs_ndesc = segs_needed;
   7809 
   7810 		/* Set up offload parameters for this packet. */
   7811 		if (m0->m_pkthdr.csum_flags &
   7812 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7813 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7814 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7815 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7816 					  &cksumfields) != 0) {
   7817 				/* Error message already displayed. */
   7818 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7819 				continue;
   7820 			}
   7821 		} else {
   7822 			cksumcmd = 0;
   7823 			cksumfields = 0;
   7824 		}
   7825 
   7826 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7827 
   7828 		/* Sync the DMA map. */
   7829 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7830 		    BUS_DMASYNC_PREWRITE);
   7831 
   7832 		/* Initialize the transmit descriptor. */
   7833 		for (nexttx = txq->txq_next, seg = 0;
   7834 		     seg < dmamap->dm_nsegs; seg++) {
   7835 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7836 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7837 			     seglen != 0;
   7838 			     curaddr += curlen, seglen -= curlen,
   7839 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7840 				curlen = seglen;
   7841 
   7842 				/*
   7843 				 * So says the Linux driver:
   7844 				 * Work around for premature descriptor
   7845 				 * write-backs in TSO mode.  Append a
   7846 				 * 4-byte sentinel descriptor.
   7847 				 */
   7848 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7849 				    curlen > 8)
   7850 					curlen -= 4;
   7851 
   7852 				wm_set_dma_addr(
   7853 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7854 				txq->txq_descs[nexttx].wtx_cmdlen
   7855 				    = htole32(cksumcmd | curlen);
   7856 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7857 				    = 0;
   7858 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7859 				    = cksumfields;
   7860 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7861 				lasttx = nexttx;
   7862 
   7863 				DPRINTF(WM_DEBUG_TX,
   7864 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7865 					"len %#04zx\n",
   7866 					device_xname(sc->sc_dev), nexttx,
   7867 					(uint64_t)curaddr, curlen));
   7868 			}
   7869 		}
   7870 
   7871 		KASSERT(lasttx != -1);
   7872 
   7873 		/*
   7874 		 * Set up the command byte on the last descriptor of
   7875 		 * the packet. If we're in the interrupt delay window,
   7876 		 * delay the interrupt.
   7877 		 */
   7878 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7879 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7880 
   7881 		/*
   7882 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7883 		 * up the descriptor to encapsulate the packet for us.
   7884 		 *
   7885 		 * This is only valid on the last descriptor of the packet.
   7886 		 */
   7887 		if (vlan_has_tag(m0)) {
   7888 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7889 			    htole32(WTX_CMD_VLE);
   7890 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7891 			    = htole16(vlan_get_tag(m0));
   7892 		}
   7893 
   7894 		txs->txs_lastdesc = lasttx;
   7895 
   7896 		DPRINTF(WM_DEBUG_TX,
   7897 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7898 			device_xname(sc->sc_dev),
   7899 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7900 
   7901 		/* Sync the descriptors we're using. */
   7902 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7903 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7904 
   7905 		/* Give the packet to the chip. */
   7906 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7907 
   7908 		DPRINTF(WM_DEBUG_TX,
   7909 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7910 
   7911 		DPRINTF(WM_DEBUG_TX,
   7912 		    ("%s: TX: finished transmitting packet, job %d\n",
   7913 			device_xname(sc->sc_dev), txq->txq_snext));
   7914 
   7915 		/* Advance the tx pointer. */
   7916 		txq->txq_free -= txs->txs_ndesc;
   7917 		txq->txq_next = nexttx;
   7918 
   7919 		txq->txq_sfree--;
   7920 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7921 
   7922 		/* Pass the packet to any BPF listeners. */
   7923 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7924 	}
   7925 
   7926 	if (m0 != NULL) {
   7927 		if (!is_transmit)
   7928 			ifp->if_flags |= IFF_OACTIVE;
   7929 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7930 		WM_Q_EVCNT_INCR(txq, descdrop);
   7931 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7932 			__func__));
   7933 		m_freem(m0);
   7934 	}
   7935 
   7936 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7937 		/* No more slots; notify upper layer. */
   7938 		if (!is_transmit)
   7939 			ifp->if_flags |= IFF_OACTIVE;
   7940 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7941 	}
   7942 
   7943 	if (txq->txq_free != ofree) {
   7944 		/* Set a watchdog timer in case the chip flakes out. */
   7945 		txq->txq_lastsent = time_uptime;
   7946 		txq->txq_sending = true;
   7947 	}
   7948 }
   7949 
   7950 /*
   7951  * wm_nq_tx_offload:
   7952  *
   7953  *	Set up TCP/IP checksumming parameters for the
   7954  *	specified packet, for NEWQUEUE devices
   7955  */
   7956 static int
   7957 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7958     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7959 {
   7960 	struct mbuf *m0 = txs->txs_mbuf;
   7961 	uint32_t vl_len, mssidx, cmdc;
   7962 	struct ether_header *eh;
   7963 	int offset, iphl;
   7964 
   7965 	/*
   7966 	 * XXX It would be nice if the mbuf pkthdr had offset
   7967 	 * fields for the protocol headers.
   7968 	 */
   7969 	*cmdlenp = 0;
   7970 	*fieldsp = 0;
   7971 
   7972 	eh = mtod(m0, struct ether_header *);
   7973 	switch (htons(eh->ether_type)) {
   7974 	case ETHERTYPE_IP:
   7975 	case ETHERTYPE_IPV6:
   7976 		offset = ETHER_HDR_LEN;
   7977 		break;
   7978 
   7979 	case ETHERTYPE_VLAN:
   7980 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7981 		break;
   7982 
   7983 	default:
   7984 		/* Don't support this protocol or encapsulation. */
   7985 		*do_csum = false;
   7986 		return 0;
   7987 	}
   7988 	*do_csum = true;
   7989 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7990 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7991 
   7992 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7993 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7994 
   7995 	if ((m0->m_pkthdr.csum_flags &
   7996 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7997 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7998 	} else {
   7999 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8000 	}
   8001 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8002 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8003 
   8004 	if (vlan_has_tag(m0)) {
   8005 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8006 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8007 		*cmdlenp |= NQTX_CMD_VLE;
   8008 	}
   8009 
   8010 	mssidx = 0;
   8011 
   8012 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8013 		int hlen = offset + iphl;
   8014 		int tcp_hlen;
   8015 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8016 
   8017 		if (__predict_false(m0->m_len <
   8018 				    (hlen + sizeof(struct tcphdr)))) {
   8019 			/*
   8020 			 * TCP/IP headers are not in the first mbuf; we need
   8021 			 * to do this the slow and painful way. Let's just
   8022 			 * hope this doesn't happen very often.
   8023 			 */
   8024 			struct tcphdr th;
   8025 
   8026 			WM_Q_EVCNT_INCR(txq, tsopain);
   8027 
   8028 			m_copydata(m0, hlen, sizeof(th), &th);
   8029 			if (v4) {
   8030 				struct ip ip;
   8031 
   8032 				m_copydata(m0, offset, sizeof(ip), &ip);
   8033 				ip.ip_len = 0;
   8034 				m_copyback(m0,
   8035 				    offset + offsetof(struct ip, ip_len),
   8036 				    sizeof(ip.ip_len), &ip.ip_len);
   8037 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8038 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8039 			} else {
   8040 				struct ip6_hdr ip6;
   8041 
   8042 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8043 				ip6.ip6_plen = 0;
   8044 				m_copyback(m0,
   8045 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8046 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8047 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8048 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8049 			}
   8050 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8051 			    sizeof(th.th_sum), &th.th_sum);
   8052 
   8053 			tcp_hlen = th.th_off << 2;
   8054 		} else {
   8055 			/*
   8056 			 * TCP/IP headers are in the first mbuf; we can do
   8057 			 * this the easy way.
   8058 			 */
   8059 			struct tcphdr *th;
   8060 
   8061 			if (v4) {
   8062 				struct ip *ip =
   8063 				    (void *)(mtod(m0, char *) + offset);
   8064 				th = (void *)(mtod(m0, char *) + hlen);
   8065 
   8066 				ip->ip_len = 0;
   8067 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8068 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8069 			} else {
   8070 				struct ip6_hdr *ip6 =
   8071 				    (void *)(mtod(m0, char *) + offset);
   8072 				th = (void *)(mtod(m0, char *) + hlen);
   8073 
   8074 				ip6->ip6_plen = 0;
   8075 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8076 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8077 			}
   8078 			tcp_hlen = th->th_off << 2;
   8079 		}
   8080 		hlen += tcp_hlen;
   8081 		*cmdlenp |= NQTX_CMD_TSE;
   8082 
   8083 		if (v4) {
   8084 			WM_Q_EVCNT_INCR(txq, tso);
   8085 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8086 		} else {
   8087 			WM_Q_EVCNT_INCR(txq, tso6);
   8088 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8089 		}
   8090 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8091 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8092 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8093 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8094 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8095 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8096 	} else {
   8097 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8098 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8099 	}
   8100 
   8101 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8102 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8103 		cmdc |= NQTXC_CMD_IP4;
   8104 	}
   8105 
   8106 	if (m0->m_pkthdr.csum_flags &
   8107 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8108 		WM_Q_EVCNT_INCR(txq, tusum);
   8109 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8110 			cmdc |= NQTXC_CMD_TCP;
   8111 		else
   8112 			cmdc |= NQTXC_CMD_UDP;
   8113 
   8114 		cmdc |= NQTXC_CMD_IP4;
   8115 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8116 	}
   8117 	if (m0->m_pkthdr.csum_flags &
   8118 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8119 		WM_Q_EVCNT_INCR(txq, tusum6);
   8120 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8121 			cmdc |= NQTXC_CMD_TCP;
   8122 		else
   8123 			cmdc |= NQTXC_CMD_UDP;
   8124 
   8125 		cmdc |= NQTXC_CMD_IP6;
   8126 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8127 	}
   8128 
   8129 	/*
   8130 	 * We don't have to write context descriptor for every packet to
   8131 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8132 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8133 	 * controllers.
   8134 	 * It would be overhead to write context descriptor for every packet,
   8135 	 * however it does not cause problems.
   8136 	 */
   8137 	/* Fill in the context descriptor. */
   8138 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8139 	    htole32(vl_len);
   8140 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8141 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8142 	    htole32(cmdc);
   8143 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8144 	    htole32(mssidx);
   8145 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8146 	DPRINTF(WM_DEBUG_TX,
   8147 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8148 		txq->txq_next, 0, vl_len));
   8149 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8150 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8151 	txs->txs_ndesc++;
   8152 	return 0;
   8153 }
   8154 
   8155 /*
   8156  * wm_nq_start:		[ifnet interface function]
   8157  *
   8158  *	Start packet transmission on the interface for NEWQUEUE devices
   8159  */
   8160 static void
   8161 wm_nq_start(struct ifnet *ifp)
   8162 {
   8163 	struct wm_softc *sc = ifp->if_softc;
   8164 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8165 
   8166 #ifdef WM_MPSAFE
   8167 	KASSERT(if_is_mpsafe(ifp));
   8168 #endif
   8169 	/*
   8170 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   8171 	 */
   8172 
   8173 	mutex_enter(txq->txq_lock);
   8174 	if (!txq->txq_stopping)
   8175 		wm_nq_start_locked(ifp);
   8176 	mutex_exit(txq->txq_lock);
   8177 }
   8178 
   8179 static void
   8180 wm_nq_start_locked(struct ifnet *ifp)
   8181 {
   8182 	struct wm_softc *sc = ifp->if_softc;
   8183 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8184 
   8185 	wm_nq_send_common_locked(ifp, txq, false);
   8186 }
   8187 
   8188 static int
   8189 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8190 {
   8191 	int qid;
   8192 	struct wm_softc *sc = ifp->if_softc;
   8193 	struct wm_txqueue *txq;
   8194 
   8195 	qid = wm_select_txqueue(ifp, m);
   8196 	txq = &sc->sc_queue[qid].wmq_txq;
   8197 
   8198 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8199 		m_freem(m);
   8200 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8201 		return ENOBUFS;
   8202 	}
   8203 
   8204 	/* XXX NOMPSAFE: ifp->if_data should be percpu. */
   8205 	ifp->if_obytes += m->m_pkthdr.len;
   8206 	if (m->m_flags & M_MCAST)
   8207 		ifp->if_omcasts++;
   8208 
   8209 	/*
   8210 	 * The situations which this mutex_tryenter() fails at running time
   8211 	 * are below two patterns.
   8212 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8213 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8214 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8215 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8216 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8217 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8218 	 * stuck, either.
   8219 	 */
   8220 	if (mutex_tryenter(txq->txq_lock)) {
   8221 		if (!txq->txq_stopping)
   8222 			wm_nq_transmit_locked(ifp, txq);
   8223 		mutex_exit(txq->txq_lock);
   8224 	}
   8225 
   8226 	return 0;
   8227 }
   8228 
   8229 static void
   8230 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8231 {
   8232 
   8233 	wm_nq_send_common_locked(ifp, txq, true);
   8234 }
   8235 
   8236 static void
   8237 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8238     bool is_transmit)
   8239 {
   8240 	struct wm_softc *sc = ifp->if_softc;
   8241 	struct mbuf *m0;
   8242 	struct wm_txsoft *txs;
   8243 	bus_dmamap_t dmamap;
   8244 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8245 	bool do_csum, sent;
   8246 	bool remap = true;
   8247 
   8248 	KASSERT(mutex_owned(txq->txq_lock));
   8249 
   8250 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8251 		return;
   8252 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   8253 		return;
   8254 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8255 		return;
   8256 
   8257 	sent = false;
   8258 
   8259 	/*
   8260 	 * Loop through the send queue, setting up transmit descriptors
   8261 	 * until we drain the queue, or use up all available transmit
   8262 	 * descriptors.
   8263 	 */
   8264 	for (;;) {
   8265 		m0 = NULL;
   8266 
   8267 		/* Get a work queue entry. */
   8268 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8269 			wm_txeof(txq, UINT_MAX);
   8270 			if (txq->txq_sfree == 0) {
   8271 				DPRINTF(WM_DEBUG_TX,
   8272 				    ("%s: TX: no free job descriptors\n",
   8273 					device_xname(sc->sc_dev)));
   8274 				WM_Q_EVCNT_INCR(txq, txsstall);
   8275 				break;
   8276 			}
   8277 		}
   8278 
   8279 		/* Grab a packet off the queue. */
   8280 		if (is_transmit)
   8281 			m0 = pcq_get(txq->txq_interq);
   8282 		else
   8283 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8284 		if (m0 == NULL)
   8285 			break;
   8286 
   8287 		DPRINTF(WM_DEBUG_TX,
   8288 		    ("%s: TX: have packet to transmit: %p\n",
   8289 		    device_xname(sc->sc_dev), m0));
   8290 
   8291 		txs = &txq->txq_soft[txq->txq_snext];
   8292 		dmamap = txs->txs_dmamap;
   8293 
   8294 		/*
   8295 		 * Load the DMA map.  If this fails, the packet either
   8296 		 * didn't fit in the allotted number of segments, or we
   8297 		 * were short on resources.  For the too-many-segments
   8298 		 * case, we simply report an error and drop the packet,
   8299 		 * since we can't sanely copy a jumbo packet to a single
   8300 		 * buffer.
   8301 		 */
   8302 retry:
   8303 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8304 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8305 		if (__predict_false(error)) {
   8306 			if (error == EFBIG) {
   8307 				if (remap == true) {
   8308 					struct mbuf *m;
   8309 
   8310 					remap = false;
   8311 					m = m_defrag(m0, M_NOWAIT);
   8312 					if (m != NULL) {
   8313 						WM_Q_EVCNT_INCR(txq, defrag);
   8314 						m0 = m;
   8315 						goto retry;
   8316 					}
   8317 				}
   8318 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8319 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8320 				    "DMA segments, dropping...\n",
   8321 				    device_xname(sc->sc_dev));
   8322 				wm_dump_mbuf_chain(sc, m0);
   8323 				m_freem(m0);
   8324 				continue;
   8325 			}
   8326 			/* Short on resources, just stop for now. */
   8327 			DPRINTF(WM_DEBUG_TX,
   8328 			    ("%s: TX: dmamap load failed: %d\n",
   8329 				device_xname(sc->sc_dev), error));
   8330 			break;
   8331 		}
   8332 
   8333 		segs_needed = dmamap->dm_nsegs;
   8334 
   8335 		/*
   8336 		 * Ensure we have enough descriptors free to describe
   8337 		 * the packet. Note, we always reserve one descriptor
   8338 		 * at the end of the ring due to the semantics of the
   8339 		 * TDT register, plus one more in the event we need
   8340 		 * to load offload context.
   8341 		 */
   8342 		if (segs_needed > txq->txq_free - 2) {
   8343 			/*
   8344 			 * Not enough free descriptors to transmit this
   8345 			 * packet.  We haven't committed anything yet,
   8346 			 * so just unload the DMA map, put the packet
   8347 			 * pack on the queue, and punt. Notify the upper
   8348 			 * layer that there are no more slots left.
   8349 			 */
   8350 			DPRINTF(WM_DEBUG_TX,
   8351 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8352 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8353 				segs_needed, txq->txq_free - 1));
   8354 			if (!is_transmit)
   8355 				ifp->if_flags |= IFF_OACTIVE;
   8356 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8357 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8358 			WM_Q_EVCNT_INCR(txq, txdstall);
   8359 			break;
   8360 		}
   8361 
   8362 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8363 
   8364 		DPRINTF(WM_DEBUG_TX,
   8365 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8366 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8367 
   8368 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8369 
   8370 		/*
   8371 		 * Store a pointer to the packet so that we can free it
   8372 		 * later.
   8373 		 *
   8374 		 * Initially, we consider the number of descriptors the
   8375 		 * packet uses the number of DMA segments.  This may be
   8376 		 * incremented by 1 if we do checksum offload (a descriptor
   8377 		 * is used to set the checksum context).
   8378 		 */
   8379 		txs->txs_mbuf = m0;
   8380 		txs->txs_firstdesc = txq->txq_next;
   8381 		txs->txs_ndesc = segs_needed;
   8382 
   8383 		/* Set up offload parameters for this packet. */
   8384 		uint32_t cmdlen, fields, dcmdlen;
   8385 		if (m0->m_pkthdr.csum_flags &
   8386 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8387 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8388 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8389 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8390 			    &do_csum) != 0) {
   8391 				/* Error message already displayed. */
   8392 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   8393 				continue;
   8394 			}
   8395 		} else {
   8396 			do_csum = false;
   8397 			cmdlen = 0;
   8398 			fields = 0;
   8399 		}
   8400 
   8401 		/* Sync the DMA map. */
   8402 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8403 		    BUS_DMASYNC_PREWRITE);
   8404 
   8405 		/* Initialize the first transmit descriptor. */
   8406 		nexttx = txq->txq_next;
   8407 		if (!do_csum) {
   8408 			/* Setup a legacy descriptor */
   8409 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8410 			    dmamap->dm_segs[0].ds_addr);
   8411 			txq->txq_descs[nexttx].wtx_cmdlen =
   8412 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8413 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8414 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8415 			if (vlan_has_tag(m0)) {
   8416 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8417 				    htole32(WTX_CMD_VLE);
   8418 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8419 				    htole16(vlan_get_tag(m0));
   8420 			} else
   8421 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8422 
   8423 			dcmdlen = 0;
   8424 		} else {
   8425 			/* Setup an advanced data descriptor */
   8426 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8427 			    htole64(dmamap->dm_segs[0].ds_addr);
   8428 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8429 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8430 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   8431 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8432 			    htole32(fields);
   8433 			DPRINTF(WM_DEBUG_TX,
   8434 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8435 				device_xname(sc->sc_dev), nexttx,
   8436 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8437 			DPRINTF(WM_DEBUG_TX,
   8438 			    ("\t 0x%08x%08x\n", fields,
   8439 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8440 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8441 		}
   8442 
   8443 		lasttx = nexttx;
   8444 		nexttx = WM_NEXTTX(txq, nexttx);
   8445 		/*
   8446 		 * Fill in the next descriptors. legacy or advanced format
   8447 		 * is the same here
   8448 		 */
   8449 		for (seg = 1; seg < dmamap->dm_nsegs;
   8450 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8451 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8452 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8453 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8454 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8455 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8456 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8457 			lasttx = nexttx;
   8458 
   8459 			DPRINTF(WM_DEBUG_TX,
   8460 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8461 				device_xname(sc->sc_dev), nexttx,
   8462 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8463 				dmamap->dm_segs[seg].ds_len));
   8464 		}
   8465 
   8466 		KASSERT(lasttx != -1);
   8467 
   8468 		/*
   8469 		 * Set up the command byte on the last descriptor of
   8470 		 * the packet. If we're in the interrupt delay window,
   8471 		 * delay the interrupt.
   8472 		 */
   8473 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8474 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8475 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8476 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8477 
   8478 		txs->txs_lastdesc = lasttx;
   8479 
   8480 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8481 		    device_xname(sc->sc_dev),
   8482 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8483 
   8484 		/* Sync the descriptors we're using. */
   8485 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8486 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8487 
   8488 		/* Give the packet to the chip. */
   8489 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8490 		sent = true;
   8491 
   8492 		DPRINTF(WM_DEBUG_TX,
   8493 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8494 
   8495 		DPRINTF(WM_DEBUG_TX,
   8496 		    ("%s: TX: finished transmitting packet, job %d\n",
   8497 			device_xname(sc->sc_dev), txq->txq_snext));
   8498 
   8499 		/* Advance the tx pointer. */
   8500 		txq->txq_free -= txs->txs_ndesc;
   8501 		txq->txq_next = nexttx;
   8502 
   8503 		txq->txq_sfree--;
   8504 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8505 
   8506 		/* Pass the packet to any BPF listeners. */
   8507 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8508 	}
   8509 
   8510 	if (m0 != NULL) {
   8511 		if (!is_transmit)
   8512 			ifp->if_flags |= IFF_OACTIVE;
   8513 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8514 		WM_Q_EVCNT_INCR(txq, descdrop);
   8515 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8516 			__func__));
   8517 		m_freem(m0);
   8518 	}
   8519 
   8520 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8521 		/* No more slots; notify upper layer. */
   8522 		if (!is_transmit)
   8523 			ifp->if_flags |= IFF_OACTIVE;
   8524 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8525 	}
   8526 
   8527 	if (sent) {
   8528 		/* Set a watchdog timer in case the chip flakes out. */
   8529 		txq->txq_lastsent = time_uptime;
   8530 		txq->txq_sending = true;
   8531 	}
   8532 }
   8533 
   8534 static void
   8535 wm_deferred_start_locked(struct wm_txqueue *txq)
   8536 {
   8537 	struct wm_softc *sc = txq->txq_sc;
   8538 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8539 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8540 	int qid = wmq->wmq_id;
   8541 
   8542 	KASSERT(mutex_owned(txq->txq_lock));
   8543 
   8544 	if (txq->txq_stopping) {
   8545 		mutex_exit(txq->txq_lock);
   8546 		return;
   8547 	}
   8548 
   8549 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8550 		/* XXX need for ALTQ or one CPU system */
   8551 		if (qid == 0)
   8552 			wm_nq_start_locked(ifp);
   8553 		wm_nq_transmit_locked(ifp, txq);
   8554 	} else {
   8555 		/* XXX need for ALTQ or one CPU system */
   8556 		if (qid == 0)
   8557 			wm_start_locked(ifp);
   8558 		wm_transmit_locked(ifp, txq);
   8559 	}
   8560 }
   8561 
   8562 /* Interrupt */
   8563 
   8564 /*
   8565  * wm_txeof:
   8566  *
   8567  *	Helper; handle transmit interrupts.
   8568  */
   8569 static bool
   8570 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8571 {
   8572 	struct wm_softc *sc = txq->txq_sc;
   8573 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8574 	struct wm_txsoft *txs;
   8575 	int count = 0;
   8576 	int i;
   8577 	uint8_t status;
   8578 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8579 	bool more = false;
   8580 
   8581 	KASSERT(mutex_owned(txq->txq_lock));
   8582 
   8583 	if (txq->txq_stopping)
   8584 		return false;
   8585 
   8586 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8587 	/* For ALTQ and legacy(not use multiqueue) ethernet controller */
   8588 	if (wmq->wmq_id == 0)
   8589 		ifp->if_flags &= ~IFF_OACTIVE;
   8590 
   8591 	/*
   8592 	 * Go through the Tx list and free mbufs for those
   8593 	 * frames which have been transmitted.
   8594 	 */
   8595 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8596 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8597 		if (limit-- == 0) {
   8598 			more = true;
   8599 			DPRINTF(WM_DEBUG_TX,
   8600 			    ("%s: TX: loop limited, job %d is not processed\n",
   8601 				device_xname(sc->sc_dev), i));
   8602 			break;
   8603 		}
   8604 
   8605 		txs = &txq->txq_soft[i];
   8606 
   8607 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8608 			device_xname(sc->sc_dev), i));
   8609 
   8610 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8611 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8612 
   8613 		status =
   8614 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8615 		if ((status & WTX_ST_DD) == 0) {
   8616 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8617 			    BUS_DMASYNC_PREREAD);
   8618 			break;
   8619 		}
   8620 
   8621 		count++;
   8622 		DPRINTF(WM_DEBUG_TX,
   8623 		    ("%s: TX: job %d done: descs %d..%d\n",
   8624 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8625 		    txs->txs_lastdesc));
   8626 
   8627 		/*
   8628 		 * XXX We should probably be using the statistics
   8629 		 * XXX registers, but I don't know if they exist
   8630 		 * XXX on chips before the i82544.
   8631 		 */
   8632 
   8633 #ifdef WM_EVENT_COUNTERS
   8634 		if (status & WTX_ST_TU)
   8635 			WM_Q_EVCNT_INCR(txq, underrun);
   8636 #endif /* WM_EVENT_COUNTERS */
   8637 
   8638 		/*
   8639 		 * 82574 and newer's document says the status field has neither
   8640 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8641 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8642 		 * Developer's Manual", 82574 datasheet and newer.
   8643 		 *
   8644 		 * XXX I saw the LC bit was set on I218 even though the media
   8645 		 * was full duplex, so the bit might be used for other
   8646 		 * meaning ...(I have no document).
   8647 		 */
   8648 
   8649 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8650 		    && ((sc->sc_type < WM_T_82574)
   8651 			|| (sc->sc_type == WM_T_80003))) {
   8652 			ifp->if_oerrors++;
   8653 			if (status & WTX_ST_LC)
   8654 				log(LOG_WARNING, "%s: late collision\n",
   8655 				    device_xname(sc->sc_dev));
   8656 			else if (status & WTX_ST_EC) {
   8657 				ifp->if_collisions +=
   8658 				    TX_COLLISION_THRESHOLD + 1;
   8659 				log(LOG_WARNING, "%s: excessive collisions\n",
   8660 				    device_xname(sc->sc_dev));
   8661 			}
   8662 		} else
   8663 			ifp->if_opackets++;
   8664 
   8665 		txq->txq_packets++;
   8666 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8667 
   8668 		txq->txq_free += txs->txs_ndesc;
   8669 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8670 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8671 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8672 		m_freem(txs->txs_mbuf);
   8673 		txs->txs_mbuf = NULL;
   8674 	}
   8675 
   8676 	/* Update the dirty transmit buffer pointer. */
   8677 	txq->txq_sdirty = i;
   8678 	DPRINTF(WM_DEBUG_TX,
   8679 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8680 
   8681 	/*
   8682 	 * If there are no more pending transmissions, cancel the watchdog
   8683 	 * timer.
   8684 	 */
   8685 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8686 		txq->txq_sending = false;
   8687 
   8688 	return more;
   8689 }
   8690 
   8691 static inline uint32_t
   8692 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8693 {
   8694 	struct wm_softc *sc = rxq->rxq_sc;
   8695 
   8696 	if (sc->sc_type == WM_T_82574)
   8697 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8698 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8699 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8700 	else
   8701 		return rxq->rxq_descs[idx].wrx_status;
   8702 }
   8703 
   8704 static inline uint32_t
   8705 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8706 {
   8707 	struct wm_softc *sc = rxq->rxq_sc;
   8708 
   8709 	if (sc->sc_type == WM_T_82574)
   8710 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8711 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8712 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8713 	else
   8714 		return rxq->rxq_descs[idx].wrx_errors;
   8715 }
   8716 
   8717 static inline uint16_t
   8718 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8719 {
   8720 	struct wm_softc *sc = rxq->rxq_sc;
   8721 
   8722 	if (sc->sc_type == WM_T_82574)
   8723 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8724 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8725 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8726 	else
   8727 		return rxq->rxq_descs[idx].wrx_special;
   8728 }
   8729 
   8730 static inline int
   8731 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8732 {
   8733 	struct wm_softc *sc = rxq->rxq_sc;
   8734 
   8735 	if (sc->sc_type == WM_T_82574)
   8736 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8737 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8738 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8739 	else
   8740 		return rxq->rxq_descs[idx].wrx_len;
   8741 }
   8742 
   8743 #ifdef WM_DEBUG
   8744 static inline uint32_t
   8745 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8746 {
   8747 	struct wm_softc *sc = rxq->rxq_sc;
   8748 
   8749 	if (sc->sc_type == WM_T_82574)
   8750 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8751 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8752 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8753 	else
   8754 		return 0;
   8755 }
   8756 
   8757 static inline uint8_t
   8758 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8759 {
   8760 	struct wm_softc *sc = rxq->rxq_sc;
   8761 
   8762 	if (sc->sc_type == WM_T_82574)
   8763 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8764 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8765 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8766 	else
   8767 		return 0;
   8768 }
   8769 #endif /* WM_DEBUG */
   8770 
   8771 static inline bool
   8772 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8773     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8774 {
   8775 
   8776 	if (sc->sc_type == WM_T_82574)
   8777 		return (status & ext_bit) != 0;
   8778 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8779 		return (status & nq_bit) != 0;
   8780 	else
   8781 		return (status & legacy_bit) != 0;
   8782 }
   8783 
   8784 static inline bool
   8785 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8786     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8787 {
   8788 
   8789 	if (sc->sc_type == WM_T_82574)
   8790 		return (error & ext_bit) != 0;
   8791 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8792 		return (error & nq_bit) != 0;
   8793 	else
   8794 		return (error & legacy_bit) != 0;
   8795 }
   8796 
   8797 static inline bool
   8798 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8799 {
   8800 
   8801 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8802 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8803 		return true;
   8804 	else
   8805 		return false;
   8806 }
   8807 
   8808 static inline bool
   8809 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8810 {
   8811 	struct wm_softc *sc = rxq->rxq_sc;
   8812 
   8813 	/* XXX missing error bit for newqueue? */
   8814 	if (wm_rxdesc_is_set_error(sc, errors,
   8815 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8816 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8817 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8818 		NQRXC_ERROR_RXE)) {
   8819 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8820 		    EXTRXC_ERROR_SE, 0))
   8821 			log(LOG_WARNING, "%s: symbol error\n",
   8822 			    device_xname(sc->sc_dev));
   8823 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8824 		    EXTRXC_ERROR_SEQ, 0))
   8825 			log(LOG_WARNING, "%s: receive sequence error\n",
   8826 			    device_xname(sc->sc_dev));
   8827 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8828 		    EXTRXC_ERROR_CE, 0))
   8829 			log(LOG_WARNING, "%s: CRC error\n",
   8830 			    device_xname(sc->sc_dev));
   8831 		return true;
   8832 	}
   8833 
   8834 	return false;
   8835 }
   8836 
   8837 static inline bool
   8838 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8839 {
   8840 	struct wm_softc *sc = rxq->rxq_sc;
   8841 
   8842 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8843 		NQRXC_STATUS_DD)) {
   8844 		/* We have processed all of the receive descriptors. */
   8845 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8846 		return false;
   8847 	}
   8848 
   8849 	return true;
   8850 }
   8851 
   8852 static inline bool
   8853 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8854     uint16_t vlantag, struct mbuf *m)
   8855 {
   8856 
   8857 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8858 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8859 		vlan_set_tag(m, le16toh(vlantag));
   8860 	}
   8861 
   8862 	return true;
   8863 }
   8864 
   8865 static inline void
   8866 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8867     uint32_t errors, struct mbuf *m)
   8868 {
   8869 	struct wm_softc *sc = rxq->rxq_sc;
   8870 
   8871 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8872 		if (wm_rxdesc_is_set_status(sc, status,
   8873 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8874 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8875 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8876 			if (wm_rxdesc_is_set_error(sc, errors,
   8877 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8878 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8879 		}
   8880 		if (wm_rxdesc_is_set_status(sc, status,
   8881 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8882 			/*
   8883 			 * Note: we don't know if this was TCP or UDP,
   8884 			 * so we just set both bits, and expect the
   8885 			 * upper layers to deal.
   8886 			 */
   8887 			WM_Q_EVCNT_INCR(rxq, tusum);
   8888 			m->m_pkthdr.csum_flags |=
   8889 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8890 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8891 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8892 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8893 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8894 		}
   8895 	}
   8896 }
   8897 
   8898 /*
   8899  * wm_rxeof:
   8900  *
   8901  *	Helper; handle receive interrupts.
   8902  */
   8903 static bool
   8904 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8905 {
   8906 	struct wm_softc *sc = rxq->rxq_sc;
   8907 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8908 	struct wm_rxsoft *rxs;
   8909 	struct mbuf *m;
   8910 	int i, len;
   8911 	int count = 0;
   8912 	uint32_t status, errors;
   8913 	uint16_t vlantag;
   8914 	bool more = false;
   8915 
   8916 	KASSERT(mutex_owned(rxq->rxq_lock));
   8917 
   8918 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8919 		if (limit-- == 0) {
   8920 			rxq->rxq_ptr = i;
   8921 			more = true;
   8922 			DPRINTF(WM_DEBUG_RX,
   8923 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8924 				device_xname(sc->sc_dev), i));
   8925 			break;
   8926 		}
   8927 
   8928 		rxs = &rxq->rxq_soft[i];
   8929 
   8930 		DPRINTF(WM_DEBUG_RX,
   8931 		    ("%s: RX: checking descriptor %d\n",
   8932 			device_xname(sc->sc_dev), i));
   8933 		wm_cdrxsync(rxq, i,
   8934 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8935 
   8936 		status = wm_rxdesc_get_status(rxq, i);
   8937 		errors = wm_rxdesc_get_errors(rxq, i);
   8938 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8939 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8940 #ifdef WM_DEBUG
   8941 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8942 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8943 #endif
   8944 
   8945 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8946 			/*
   8947 			 * Update the receive pointer holding rxq_lock
   8948 			 * consistent with increment counter.
   8949 			 */
   8950 			rxq->rxq_ptr = i;
   8951 			break;
   8952 		}
   8953 
   8954 		count++;
   8955 		if (__predict_false(rxq->rxq_discard)) {
   8956 			DPRINTF(WM_DEBUG_RX,
   8957 			    ("%s: RX: discarding contents of descriptor %d\n",
   8958 				device_xname(sc->sc_dev), i));
   8959 			wm_init_rxdesc(rxq, i);
   8960 			if (wm_rxdesc_is_eop(rxq, status)) {
   8961 				/* Reset our state. */
   8962 				DPRINTF(WM_DEBUG_RX,
   8963 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8964 					device_xname(sc->sc_dev)));
   8965 				rxq->rxq_discard = 0;
   8966 			}
   8967 			continue;
   8968 		}
   8969 
   8970 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8971 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8972 
   8973 		m = rxs->rxs_mbuf;
   8974 
   8975 		/*
   8976 		 * Add a new receive buffer to the ring, unless of
   8977 		 * course the length is zero. Treat the latter as a
   8978 		 * failed mapping.
   8979 		 */
   8980 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8981 			/*
   8982 			 * Failed, throw away what we've done so
   8983 			 * far, and discard the rest of the packet.
   8984 			 */
   8985 			ifp->if_ierrors++;
   8986 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8987 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8988 			wm_init_rxdesc(rxq, i);
   8989 			if (!wm_rxdesc_is_eop(rxq, status))
   8990 				rxq->rxq_discard = 1;
   8991 			if (rxq->rxq_head != NULL)
   8992 				m_freem(rxq->rxq_head);
   8993 			WM_RXCHAIN_RESET(rxq);
   8994 			DPRINTF(WM_DEBUG_RX,
   8995 			    ("%s: RX: Rx buffer allocation failed, "
   8996 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8997 				rxq->rxq_discard ? " (discard)" : ""));
   8998 			continue;
   8999 		}
   9000 
   9001 		m->m_len = len;
   9002 		rxq->rxq_len += len;
   9003 		DPRINTF(WM_DEBUG_RX,
   9004 		    ("%s: RX: buffer at %p len %d\n",
   9005 			device_xname(sc->sc_dev), m->m_data, len));
   9006 
   9007 		/* If this is not the end of the packet, keep looking. */
   9008 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9009 			WM_RXCHAIN_LINK(rxq, m);
   9010 			DPRINTF(WM_DEBUG_RX,
   9011 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9012 				device_xname(sc->sc_dev), rxq->rxq_len));
   9013 			continue;
   9014 		}
   9015 
   9016 		/*
   9017 		 * Okay, we have the entire packet now. The chip is
   9018 		 * configured to include the FCS except I350 and I21[01]
   9019 		 * (not all chips can be configured to strip it),
   9020 		 * so we need to trim it.
   9021 		 * May need to adjust length of previous mbuf in the
   9022 		 * chain if the current mbuf is too short.
   9023 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   9024 		 * is always set in I350, so we don't trim it.
   9025 		 */
   9026 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   9027 		    && (sc->sc_type != WM_T_I210)
   9028 		    && (sc->sc_type != WM_T_I211)) {
   9029 			if (m->m_len < ETHER_CRC_LEN) {
   9030 				rxq->rxq_tail->m_len
   9031 				    -= (ETHER_CRC_LEN - m->m_len);
   9032 				m->m_len = 0;
   9033 			} else
   9034 				m->m_len -= ETHER_CRC_LEN;
   9035 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9036 		} else
   9037 			len = rxq->rxq_len;
   9038 
   9039 		WM_RXCHAIN_LINK(rxq, m);
   9040 
   9041 		*rxq->rxq_tailp = NULL;
   9042 		m = rxq->rxq_head;
   9043 
   9044 		WM_RXCHAIN_RESET(rxq);
   9045 
   9046 		DPRINTF(WM_DEBUG_RX,
   9047 		    ("%s: RX: have entire packet, len -> %d\n",
   9048 			device_xname(sc->sc_dev), len));
   9049 
   9050 		/* If an error occurred, update stats and drop the packet. */
   9051 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9052 			m_freem(m);
   9053 			continue;
   9054 		}
   9055 
   9056 		/* No errors.  Receive the packet. */
   9057 		m_set_rcvif(m, ifp);
   9058 		m->m_pkthdr.len = len;
   9059 		/*
   9060 		 * TODO
   9061 		 * should be save rsshash and rsstype to this mbuf.
   9062 		 */
   9063 		DPRINTF(WM_DEBUG_RX,
   9064 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9065 			device_xname(sc->sc_dev), rsstype, rsshash));
   9066 
   9067 		/*
   9068 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9069 		 * for us.  Associate the tag with the packet.
   9070 		 */
   9071 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9072 			continue;
   9073 
   9074 		/* Set up checksum info for this packet. */
   9075 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9076 		/*
   9077 		 * Update the receive pointer holding rxq_lock consistent with
   9078 		 * increment counter.
   9079 		 */
   9080 		rxq->rxq_ptr = i;
   9081 		rxq->rxq_packets++;
   9082 		rxq->rxq_bytes += len;
   9083 		mutex_exit(rxq->rxq_lock);
   9084 
   9085 		/* Pass it on. */
   9086 		if_percpuq_enqueue(sc->sc_ipq, m);
   9087 
   9088 		mutex_enter(rxq->rxq_lock);
   9089 
   9090 		if (rxq->rxq_stopping)
   9091 			break;
   9092 	}
   9093 
   9094 	DPRINTF(WM_DEBUG_RX,
   9095 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9096 
   9097 	return more;
   9098 }
   9099 
   9100 /*
   9101  * wm_linkintr_gmii:
   9102  *
   9103  *	Helper; handle link interrupts for GMII.
   9104  */
   9105 static void
   9106 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9107 {
   9108 	device_t dev = sc->sc_dev;
   9109 	uint32_t status, reg;
   9110 	bool link;
   9111 	int rv;
   9112 
   9113 	KASSERT(WM_CORE_LOCKED(sc));
   9114 
   9115 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9116 		__func__));
   9117 
   9118 	if ((icr & ICR_LSC) == 0) {
   9119 		if (icr & ICR_RXSEQ)
   9120 			DPRINTF(WM_DEBUG_LINK,
   9121 			    ("%s: LINK Receive sequence error\n",
   9122 				device_xname(dev)));
   9123 		return;
   9124 	}
   9125 
   9126 	/* Link status changed */
   9127 	status = CSR_READ(sc, WMREG_STATUS);
   9128 	link = status & STATUS_LU;
   9129 	if (link) {
   9130 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9131 			device_xname(dev),
   9132 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9133 	} else {
   9134 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9135 			device_xname(dev)));
   9136 	}
   9137 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9138 		wm_gig_downshift_workaround_ich8lan(sc);
   9139 
   9140 	if ((sc->sc_type == WM_T_ICH8)
   9141 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9142 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9143 	}
   9144 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9145 		device_xname(dev)));
   9146 	mii_pollstat(&sc->sc_mii);
   9147 	if (sc->sc_type == WM_T_82543) {
   9148 		int miistatus, active;
   9149 
   9150 		/*
   9151 		 * With 82543, we need to force speed and
   9152 		 * duplex on the MAC equal to what the PHY
   9153 		 * speed and duplex configuration is.
   9154 		 */
   9155 		miistatus = sc->sc_mii.mii_media_status;
   9156 
   9157 		if (miistatus & IFM_ACTIVE) {
   9158 			active = sc->sc_mii.mii_media_active;
   9159 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9160 			switch (IFM_SUBTYPE(active)) {
   9161 			case IFM_10_T:
   9162 				sc->sc_ctrl |= CTRL_SPEED_10;
   9163 				break;
   9164 			case IFM_100_TX:
   9165 				sc->sc_ctrl |= CTRL_SPEED_100;
   9166 				break;
   9167 			case IFM_1000_T:
   9168 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9169 				break;
   9170 			default:
   9171 				/*
   9172 				 * Fiber?
   9173 				 * Shoud not enter here.
   9174 				 */
   9175 				device_printf(dev, "unknown media (%x)\n",
   9176 				    active);
   9177 				break;
   9178 			}
   9179 			if (active & IFM_FDX)
   9180 				sc->sc_ctrl |= CTRL_FD;
   9181 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9182 		}
   9183 	} else if (sc->sc_type == WM_T_PCH) {
   9184 		wm_k1_gig_workaround_hv(sc,
   9185 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9186 	}
   9187 
   9188 	/*
   9189 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9190 	 * aggressive resulting in many collisions. To avoid this, increase
   9191 	 * the IPG and reduce Rx latency in the PHY.
   9192 	 */
   9193 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9194 	    && link) {
   9195 		uint32_t tipg_reg;
   9196 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9197 		bool fdx;
   9198 		uint16_t emi_addr, emi_val;
   9199 
   9200 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9201 		tipg_reg &= ~TIPG_IPGT_MASK;
   9202 		fdx = status & STATUS_FD;
   9203 
   9204 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9205 			tipg_reg |= 0xff;
   9206 			/* Reduce Rx latency in analog PHY */
   9207 			emi_val = 0;
   9208 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9209 		    fdx && speed != STATUS_SPEED_1000) {
   9210 			tipg_reg |= 0xc;
   9211 			emi_val = 1;
   9212 		} else {
   9213 			/* Roll back the default values */
   9214 			tipg_reg |= 0x08;
   9215 			emi_val = 1;
   9216 		}
   9217 
   9218 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9219 
   9220 		rv = sc->phy.acquire(sc);
   9221 		if (rv)
   9222 			return;
   9223 
   9224 		if (sc->sc_type == WM_T_PCH2)
   9225 			emi_addr = I82579_RX_CONFIG;
   9226 		else
   9227 			emi_addr = I217_RX_CONFIG;
   9228 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9229 
   9230 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9231 			uint16_t phy_reg;
   9232 
   9233 			sc->phy.readreg_locked(dev, 2,
   9234 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9235 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9236 			if (speed == STATUS_SPEED_100
   9237 			    || speed == STATUS_SPEED_10)
   9238 				phy_reg |= 0x3e8;
   9239 			else
   9240 				phy_reg |= 0xfa;
   9241 			sc->phy.writereg_locked(dev, 2,
   9242 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9243 
   9244 			if (speed == STATUS_SPEED_1000) {
   9245 				sc->phy.readreg_locked(dev, 2,
   9246 				    HV_PM_CTRL, &phy_reg);
   9247 
   9248 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9249 
   9250 				sc->phy.writereg_locked(dev, 2,
   9251 				    HV_PM_CTRL, phy_reg);
   9252 			}
   9253 		}
   9254 		sc->phy.release(sc);
   9255 
   9256 		if (rv)
   9257 			return;
   9258 
   9259 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9260 			uint16_t data, ptr_gap;
   9261 
   9262 			if (speed == STATUS_SPEED_1000) {
   9263 				rv = sc->phy.acquire(sc);
   9264 				if (rv)
   9265 					return;
   9266 
   9267 				rv = sc->phy.readreg_locked(dev, 2,
   9268 				    I219_UNKNOWN1, &data);
   9269 				if (rv) {
   9270 					sc->phy.release(sc);
   9271 					return;
   9272 				}
   9273 
   9274 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9275 				if (ptr_gap < 0x18) {
   9276 					data &= ~(0x3ff << 2);
   9277 					data |= (0x18 << 2);
   9278 					rv = sc->phy.writereg_locked(dev,
   9279 					    2, I219_UNKNOWN1, data);
   9280 				}
   9281 				sc->phy.release(sc);
   9282 				if (rv)
   9283 					return;
   9284 			} else {
   9285 				rv = sc->phy.acquire(sc);
   9286 				if (rv)
   9287 					return;
   9288 
   9289 				rv = sc->phy.writereg_locked(dev, 2,
   9290 				    I219_UNKNOWN1, 0xc023);
   9291 				sc->phy.release(sc);
   9292 				if (rv)
   9293 					return;
   9294 
   9295 			}
   9296 		}
   9297 	}
   9298 
   9299 	/*
   9300 	 * I217 Packet Loss issue:
   9301 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9302 	 * on power up.
   9303 	 * Set the Beacon Duration for I217 to 8 usec
   9304 	 */
   9305 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9306 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9307 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9308 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9309 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9310 	}
   9311 
   9312 	/* Work-around I218 hang issue */
   9313 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9314 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9315 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9316 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9317 		wm_k1_workaround_lpt_lp(sc, link);
   9318 
   9319 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9320 		/*
   9321 		 * Set platform power management values for Latency
   9322 		 * Tolerance Reporting (LTR)
   9323 		 */
   9324 		wm_platform_pm_pch_lpt(sc,
   9325 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9326 	}
   9327 
   9328 	/* Clear link partner's EEE ability */
   9329 	sc->eee_lp_ability = 0;
   9330 
   9331 	/* FEXTNVM6 K1-off workaround */
   9332 	if (sc->sc_type == WM_T_PCH_SPT) {
   9333 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9334 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9335 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9336 		else
   9337 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9338 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9339 	}
   9340 
   9341 	if (!link)
   9342 		return;
   9343 
   9344 	switch (sc->sc_type) {
   9345 	case WM_T_PCH2:
   9346 		wm_k1_workaround_lv(sc);
   9347 		/* FALLTHROUGH */
   9348 	case WM_T_PCH:
   9349 		if (sc->sc_phytype == WMPHY_82578)
   9350 			wm_link_stall_workaround_hv(sc);
   9351 		break;
   9352 	default:
   9353 		break;
   9354 	}
   9355 
   9356 	/* Enable/Disable EEE after link up */
   9357 	if (sc->sc_phytype > WMPHY_82579)
   9358 		wm_set_eee_pchlan(sc);
   9359 }
   9360 
   9361 /*
   9362  * wm_linkintr_tbi:
   9363  *
   9364  *	Helper; handle link interrupts for TBI mode.
   9365  */
   9366 static void
   9367 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9368 {
   9369 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9370 	uint32_t status;
   9371 
   9372 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9373 		__func__));
   9374 
   9375 	status = CSR_READ(sc, WMREG_STATUS);
   9376 	if (icr & ICR_LSC) {
   9377 		wm_check_for_link(sc);
   9378 		if (status & STATUS_LU) {
   9379 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9380 				device_xname(sc->sc_dev),
   9381 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9382 			/*
   9383 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9384 			 * so we should update sc->sc_ctrl
   9385 			 */
   9386 
   9387 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9388 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9389 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9390 			if (status & STATUS_FD)
   9391 				sc->sc_tctl |=
   9392 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9393 			else
   9394 				sc->sc_tctl |=
   9395 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9396 			if (sc->sc_ctrl & CTRL_TFCE)
   9397 				sc->sc_fcrtl |= FCRTL_XONE;
   9398 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9399 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9400 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9401 			sc->sc_tbi_linkup = 1;
   9402 			if_link_state_change(ifp, LINK_STATE_UP);
   9403 		} else {
   9404 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9405 				device_xname(sc->sc_dev)));
   9406 			sc->sc_tbi_linkup = 0;
   9407 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9408 		}
   9409 		/* Update LED */
   9410 		wm_tbi_serdes_set_linkled(sc);
   9411 	} else if (icr & ICR_RXSEQ)
   9412 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9413 			device_xname(sc->sc_dev)));
   9414 }
   9415 
   9416 /*
   9417  * wm_linkintr_serdes:
   9418  *
   9419  *	Helper; handle link interrupts for TBI mode.
   9420  */
   9421 static void
   9422 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9423 {
   9424 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9425 	struct mii_data *mii = &sc->sc_mii;
   9426 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9427 	uint32_t pcs_adv, pcs_lpab, reg;
   9428 
   9429 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9430 		__func__));
   9431 
   9432 	if (icr & ICR_LSC) {
   9433 		/* Check PCS */
   9434 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9435 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9436 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9437 				device_xname(sc->sc_dev)));
   9438 			mii->mii_media_status |= IFM_ACTIVE;
   9439 			sc->sc_tbi_linkup = 1;
   9440 			if_link_state_change(ifp, LINK_STATE_UP);
   9441 		} else {
   9442 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9443 				device_xname(sc->sc_dev)));
   9444 			mii->mii_media_status |= IFM_NONE;
   9445 			sc->sc_tbi_linkup = 0;
   9446 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9447 			wm_tbi_serdes_set_linkled(sc);
   9448 			return;
   9449 		}
   9450 		mii->mii_media_active |= IFM_1000_SX;
   9451 		if ((reg & PCS_LSTS_FDX) != 0)
   9452 			mii->mii_media_active |= IFM_FDX;
   9453 		else
   9454 			mii->mii_media_active |= IFM_HDX;
   9455 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9456 			/* Check flow */
   9457 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9458 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9459 				DPRINTF(WM_DEBUG_LINK,
   9460 				    ("XXX LINKOK but not ACOMP\n"));
   9461 				return;
   9462 			}
   9463 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9464 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9465 			DPRINTF(WM_DEBUG_LINK,
   9466 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9467 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9468 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9469 				mii->mii_media_active |= IFM_FLOW
   9470 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9471 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9472 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9473 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9474 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9475 				mii->mii_media_active |= IFM_FLOW
   9476 				    | IFM_ETH_TXPAUSE;
   9477 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9478 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9479 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9480 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9481 				mii->mii_media_active |= IFM_FLOW
   9482 				    | IFM_ETH_RXPAUSE;
   9483 		}
   9484 		/* Update LED */
   9485 		wm_tbi_serdes_set_linkled(sc);
   9486 	} else
   9487 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9488 		    device_xname(sc->sc_dev)));
   9489 }
   9490 
   9491 /*
   9492  * wm_linkintr:
   9493  *
   9494  *	Helper; handle link interrupts.
   9495  */
   9496 static void
   9497 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9498 {
   9499 
   9500 	KASSERT(WM_CORE_LOCKED(sc));
   9501 
   9502 	if (sc->sc_flags & WM_F_HAS_MII)
   9503 		wm_linkintr_gmii(sc, icr);
   9504 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9505 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9506 		wm_linkintr_serdes(sc, icr);
   9507 	else
   9508 		wm_linkintr_tbi(sc, icr);
   9509 }
   9510 
   9511 
   9512 static inline void
   9513 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   9514 {
   9515 
   9516 	if (wmq->wmq_txrx_use_workqueue)
   9517 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   9518 	else
   9519 		softint_schedule(wmq->wmq_si);
   9520 }
   9521 
   9522 /*
   9523  * wm_intr_legacy:
   9524  *
   9525  *	Interrupt service routine for INTx and MSI.
   9526  */
   9527 static int
   9528 wm_intr_legacy(void *arg)
   9529 {
   9530 	struct wm_softc *sc = arg;
   9531 	struct wm_queue *wmq = &sc->sc_queue[0];
   9532 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9533 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9534 	uint32_t icr, rndval = 0;
   9535 	int handled = 0;
   9536 
   9537 	while (1 /* CONSTCOND */) {
   9538 		icr = CSR_READ(sc, WMREG_ICR);
   9539 		if ((icr & sc->sc_icr) == 0)
   9540 			break;
   9541 		if (handled == 0)
   9542 			DPRINTF(WM_DEBUG_TX,
   9543 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9544 		if (rndval == 0)
   9545 			rndval = icr;
   9546 
   9547 		mutex_enter(rxq->rxq_lock);
   9548 
   9549 		if (rxq->rxq_stopping) {
   9550 			mutex_exit(rxq->rxq_lock);
   9551 			break;
   9552 		}
   9553 
   9554 		handled = 1;
   9555 
   9556 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9557 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9558 			DPRINTF(WM_DEBUG_RX,
   9559 			    ("%s: RX: got Rx intr 0x%08x\n",
   9560 				device_xname(sc->sc_dev),
   9561 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9562 			WM_Q_EVCNT_INCR(rxq, intr);
   9563 		}
   9564 #endif
   9565 		/*
   9566 		 * wm_rxeof() does *not* call upper layer functions directly,
   9567 		 * as if_percpuq_enqueue() just call softint_schedule().
   9568 		 * So, we can call wm_rxeof() in interrupt context.
   9569 		 */
   9570 		wm_rxeof(rxq, UINT_MAX);
   9571 		/* Fill lower bits with RX index. See below for the upper. */
   9572 		rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
   9573 
   9574 		mutex_exit(rxq->rxq_lock);
   9575 		mutex_enter(txq->txq_lock);
   9576 
   9577 		if (txq->txq_stopping) {
   9578 			mutex_exit(txq->txq_lock);
   9579 			break;
   9580 		}
   9581 
   9582 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9583 		if (icr & ICR_TXDW) {
   9584 			DPRINTF(WM_DEBUG_TX,
   9585 			    ("%s: TX: got TXDW interrupt\n",
   9586 				device_xname(sc->sc_dev)));
   9587 			WM_Q_EVCNT_INCR(txq, txdw);
   9588 		}
   9589 #endif
   9590 		wm_txeof(txq, UINT_MAX);
   9591 		/* Fill upper bits with TX index. See above for the lower. */
   9592 		rndval = txq->txq_next * WM_NRXDESC;
   9593 
   9594 		mutex_exit(txq->txq_lock);
   9595 		WM_CORE_LOCK(sc);
   9596 
   9597 		if (sc->sc_core_stopping) {
   9598 			WM_CORE_UNLOCK(sc);
   9599 			break;
   9600 		}
   9601 
   9602 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9603 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9604 			wm_linkintr(sc, icr);
   9605 		}
   9606 		if ((icr & ICR_GPI(0)) != 0)
   9607 			device_printf(sc->sc_dev, "got module interrupt\n");
   9608 
   9609 		WM_CORE_UNLOCK(sc);
   9610 
   9611 		if (icr & ICR_RXO) {
   9612 #if defined(WM_DEBUG)
   9613 			log(LOG_WARNING, "%s: Receive overrun\n",
   9614 			    device_xname(sc->sc_dev));
   9615 #endif /* defined(WM_DEBUG) */
   9616 		}
   9617 	}
   9618 
   9619 	rnd_add_uint32(&sc->sc_queue[0].rnd_source, rndval);
   9620 
   9621 	if (handled) {
   9622 		/* Try to get more packets going. */
   9623 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9624 		wm_sched_handle_queue(sc, wmq);
   9625 	}
   9626 
   9627 	return handled;
   9628 }
   9629 
   9630 static inline void
   9631 wm_txrxintr_disable(struct wm_queue *wmq)
   9632 {
   9633 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9634 
   9635 	if (sc->sc_type == WM_T_82574)
   9636 		CSR_WRITE(sc, WMREG_IMC,
   9637 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9638 	else if (sc->sc_type == WM_T_82575)
   9639 		CSR_WRITE(sc, WMREG_EIMC,
   9640 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9641 	else
   9642 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9643 }
   9644 
   9645 static inline void
   9646 wm_txrxintr_enable(struct wm_queue *wmq)
   9647 {
   9648 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9649 
   9650 	wm_itrs_calculate(sc, wmq);
   9651 
   9652 	/*
   9653 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9654 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9655 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9656 	 * while each wm_handle_queue(wmq) is runnig.
   9657 	 */
   9658 	if (sc->sc_type == WM_T_82574)
   9659 		CSR_WRITE(sc, WMREG_IMS,
   9660 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9661 	else if (sc->sc_type == WM_T_82575)
   9662 		CSR_WRITE(sc, WMREG_EIMS,
   9663 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9664 	else
   9665 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9666 }
   9667 
   9668 static int
   9669 wm_txrxintr_msix(void *arg)
   9670 {
   9671 	struct wm_queue *wmq = arg;
   9672 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9673 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9674 	struct wm_softc *sc = txq->txq_sc;
   9675 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9676 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9677 	uint32_t rndval = 0;
   9678 	bool txmore;
   9679 	bool rxmore;
   9680 
   9681 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9682 
   9683 	DPRINTF(WM_DEBUG_TX,
   9684 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9685 
   9686 	wm_txrxintr_disable(wmq);
   9687 
   9688 	mutex_enter(txq->txq_lock);
   9689 
   9690 	if (txq->txq_stopping) {
   9691 		mutex_exit(txq->txq_lock);
   9692 		return 0;
   9693 	}
   9694 
   9695 	WM_Q_EVCNT_INCR(txq, txdw);
   9696 	txmore = wm_txeof(txq, txlimit);
   9697 	/* Fill upper bits with TX index. See below for the lower. */
   9698 	rndval = txq->txq_next * WM_NRXDESC;
   9699 	/* wm_deferred start() is done in wm_handle_queue(). */
   9700 	mutex_exit(txq->txq_lock);
   9701 
   9702 	DPRINTF(WM_DEBUG_RX,
   9703 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9704 	mutex_enter(rxq->rxq_lock);
   9705 
   9706 	if (rxq->rxq_stopping) {
   9707 		mutex_exit(rxq->rxq_lock);
   9708 		return 0;
   9709 	}
   9710 
   9711 	WM_Q_EVCNT_INCR(rxq, intr);
   9712 	rxmore = wm_rxeof(rxq, rxlimit);
   9713 
   9714 	/* Fill lower bits with RX index. See above for the upper. */
   9715 	rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
   9716 	mutex_exit(rxq->rxq_lock);
   9717 
   9718 	wm_itrs_writereg(sc, wmq);
   9719 
   9720 	/*
   9721 	 * This function is called in the hardware interrupt context and
   9722 	 * per-CPU, so it's not required to take a lock.
   9723 	 */
   9724 	if (rndval != 0)
   9725 		rnd_add_uint32(&sc->sc_queue[wmq->wmq_id].rnd_source, rndval);
   9726 
   9727 	if (txmore || rxmore) {
   9728 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9729 		wm_sched_handle_queue(sc, wmq);
   9730 	} else
   9731 		wm_txrxintr_enable(wmq);
   9732 
   9733 	return 1;
   9734 }
   9735 
   9736 static void
   9737 wm_handle_queue(void *arg)
   9738 {
   9739 	struct wm_queue *wmq = arg;
   9740 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9741 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9742 	struct wm_softc *sc = txq->txq_sc;
   9743 	u_int txlimit = sc->sc_tx_process_limit;
   9744 	u_int rxlimit = sc->sc_rx_process_limit;
   9745 	bool txmore;
   9746 	bool rxmore;
   9747 
   9748 	mutex_enter(txq->txq_lock);
   9749 	if (txq->txq_stopping) {
   9750 		mutex_exit(txq->txq_lock);
   9751 		return;
   9752 	}
   9753 	txmore = wm_txeof(txq, txlimit);
   9754 	wm_deferred_start_locked(txq);
   9755 	mutex_exit(txq->txq_lock);
   9756 
   9757 	mutex_enter(rxq->rxq_lock);
   9758 	if (rxq->rxq_stopping) {
   9759 		mutex_exit(rxq->rxq_lock);
   9760 		return;
   9761 	}
   9762 	WM_Q_EVCNT_INCR(rxq, defer);
   9763 	rxmore = wm_rxeof(rxq, rxlimit);
   9764 	mutex_exit(rxq->rxq_lock);
   9765 
   9766 	if (txmore || rxmore) {
   9767 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9768 		wm_sched_handle_queue(sc, wmq);
   9769 	} else
   9770 		wm_txrxintr_enable(wmq);
   9771 }
   9772 
   9773 static void
   9774 wm_handle_queue_work(struct work *wk, void *context)
   9775 {
   9776 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   9777 
   9778 	/*
   9779 	 * "enqueued flag" is not required here.
   9780 	 */
   9781 	wm_handle_queue(wmq);
   9782 }
   9783 
   9784 /*
   9785  * wm_linkintr_msix:
   9786  *
   9787  *	Interrupt service routine for link status change for MSI-X.
   9788  */
   9789 static int
   9790 wm_linkintr_msix(void *arg)
   9791 {
   9792 	struct wm_softc *sc = arg;
   9793 	uint32_t reg;
   9794 	bool has_rxo;
   9795 
   9796 	reg = CSR_READ(sc, WMREG_ICR);
   9797 	WM_CORE_LOCK(sc);
   9798 	DPRINTF(WM_DEBUG_LINK,
   9799 	    ("%s: LINK: got link intr. ICR = %08x\n",
   9800 		device_xname(sc->sc_dev), reg));
   9801 
   9802 	if (sc->sc_core_stopping)
   9803 		goto out;
   9804 
   9805 	if ((reg & ICR_LSC) != 0) {
   9806 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9807 		wm_linkintr(sc, ICR_LSC);
   9808 	}
   9809 	if ((reg & ICR_GPI(0)) != 0)
   9810 		device_printf(sc->sc_dev, "got module interrupt\n");
   9811 
   9812 	/*
   9813 	 * XXX 82574 MSI-X mode workaround
   9814 	 *
   9815 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9816 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9817 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9818 	 * interrupts by writing WMREG_ICS to process receive packets.
   9819 	 */
   9820 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9821 #if defined(WM_DEBUG)
   9822 		log(LOG_WARNING, "%s: Receive overrun\n",
   9823 		    device_xname(sc->sc_dev));
   9824 #endif /* defined(WM_DEBUG) */
   9825 
   9826 		has_rxo = true;
   9827 		/*
   9828 		 * The RXO interrupt is very high rate when receive traffic is
   9829 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9830 		 * interrupts. ICR_OTHER will be enabled at the end of
   9831 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9832 		 * ICR_RXQ(1) interrupts.
   9833 		 */
   9834 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9835 
   9836 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9837 	}
   9838 
   9839 
   9840 
   9841 out:
   9842 	WM_CORE_UNLOCK(sc);
   9843 
   9844 	if (sc->sc_type == WM_T_82574) {
   9845 		if (!has_rxo)
   9846 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9847 		else
   9848 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9849 	} else if (sc->sc_type == WM_T_82575)
   9850 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9851 	else
   9852 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9853 
   9854 	return 1;
   9855 }
   9856 
   9857 /*
   9858  * Media related.
   9859  * GMII, SGMII, TBI (and SERDES)
   9860  */
   9861 
   9862 /* Common */
   9863 
   9864 /*
   9865  * wm_tbi_serdes_set_linkled:
   9866  *
   9867  *	Update the link LED on TBI and SERDES devices.
   9868  */
   9869 static void
   9870 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9871 {
   9872 
   9873 	if (sc->sc_tbi_linkup)
   9874 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9875 	else
   9876 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9877 
   9878 	/* 82540 or newer devices are active low */
   9879 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9880 
   9881 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9882 }
   9883 
   9884 /* GMII related */
   9885 
   9886 /*
   9887  * wm_gmii_reset:
   9888  *
   9889  *	Reset the PHY.
   9890  */
   9891 static void
   9892 wm_gmii_reset(struct wm_softc *sc)
   9893 {
   9894 	uint32_t reg;
   9895 	int rv;
   9896 
   9897 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9898 		device_xname(sc->sc_dev), __func__));
   9899 
   9900 	rv = sc->phy.acquire(sc);
   9901 	if (rv != 0) {
   9902 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9903 		    __func__);
   9904 		return;
   9905 	}
   9906 
   9907 	switch (sc->sc_type) {
   9908 	case WM_T_82542_2_0:
   9909 	case WM_T_82542_2_1:
   9910 		/* null */
   9911 		break;
   9912 	case WM_T_82543:
   9913 		/*
   9914 		 * With 82543, we need to force speed and duplex on the MAC
   9915 		 * equal to what the PHY speed and duplex configuration is.
   9916 		 * In addition, we need to perform a hardware reset on the PHY
   9917 		 * to take it out of reset.
   9918 		 */
   9919 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9920 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9921 
   9922 		/* The PHY reset pin is active-low. */
   9923 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9924 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9925 		    CTRL_EXT_SWDPIN(4));
   9926 		reg |= CTRL_EXT_SWDPIO(4);
   9927 
   9928 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9929 		CSR_WRITE_FLUSH(sc);
   9930 		delay(10*1000);
   9931 
   9932 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9933 		CSR_WRITE_FLUSH(sc);
   9934 		delay(150);
   9935 #if 0
   9936 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9937 #endif
   9938 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9939 		break;
   9940 	case WM_T_82544:	/* Reset 10000us */
   9941 	case WM_T_82540:
   9942 	case WM_T_82545:
   9943 	case WM_T_82545_3:
   9944 	case WM_T_82546:
   9945 	case WM_T_82546_3:
   9946 	case WM_T_82541:
   9947 	case WM_T_82541_2:
   9948 	case WM_T_82547:
   9949 	case WM_T_82547_2:
   9950 	case WM_T_82571:	/* Reset 100us */
   9951 	case WM_T_82572:
   9952 	case WM_T_82573:
   9953 	case WM_T_82574:
   9954 	case WM_T_82575:
   9955 	case WM_T_82576:
   9956 	case WM_T_82580:
   9957 	case WM_T_I350:
   9958 	case WM_T_I354:
   9959 	case WM_T_I210:
   9960 	case WM_T_I211:
   9961 	case WM_T_82583:
   9962 	case WM_T_80003:
   9963 		/* Generic reset */
   9964 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9965 		CSR_WRITE_FLUSH(sc);
   9966 		delay(20000);
   9967 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9968 		CSR_WRITE_FLUSH(sc);
   9969 		delay(20000);
   9970 
   9971 		if ((sc->sc_type == WM_T_82541)
   9972 		    || (sc->sc_type == WM_T_82541_2)
   9973 		    || (sc->sc_type == WM_T_82547)
   9974 		    || (sc->sc_type == WM_T_82547_2)) {
   9975 			/* Workaround for igp are done in igp_reset() */
   9976 			/* XXX add code to set LED after phy reset */
   9977 		}
   9978 		break;
   9979 	case WM_T_ICH8:
   9980 	case WM_T_ICH9:
   9981 	case WM_T_ICH10:
   9982 	case WM_T_PCH:
   9983 	case WM_T_PCH2:
   9984 	case WM_T_PCH_LPT:
   9985 	case WM_T_PCH_SPT:
   9986 	case WM_T_PCH_CNP:
   9987 		/* Generic reset */
   9988 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9989 		CSR_WRITE_FLUSH(sc);
   9990 		delay(100);
   9991 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9992 		CSR_WRITE_FLUSH(sc);
   9993 		delay(150);
   9994 		break;
   9995 	default:
   9996 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9997 		    __func__);
   9998 		break;
   9999 	}
   10000 
   10001 	sc->phy.release(sc);
   10002 
   10003 	/* get_cfg_done */
   10004 	wm_get_cfg_done(sc);
   10005 
   10006 	/* Extra setup */
   10007 	switch (sc->sc_type) {
   10008 	case WM_T_82542_2_0:
   10009 	case WM_T_82542_2_1:
   10010 	case WM_T_82543:
   10011 	case WM_T_82544:
   10012 	case WM_T_82540:
   10013 	case WM_T_82545:
   10014 	case WM_T_82545_3:
   10015 	case WM_T_82546:
   10016 	case WM_T_82546_3:
   10017 	case WM_T_82541_2:
   10018 	case WM_T_82547_2:
   10019 	case WM_T_82571:
   10020 	case WM_T_82572:
   10021 	case WM_T_82573:
   10022 	case WM_T_82574:
   10023 	case WM_T_82583:
   10024 	case WM_T_82575:
   10025 	case WM_T_82576:
   10026 	case WM_T_82580:
   10027 	case WM_T_I350:
   10028 	case WM_T_I354:
   10029 	case WM_T_I210:
   10030 	case WM_T_I211:
   10031 	case WM_T_80003:
   10032 		/* Null */
   10033 		break;
   10034 	case WM_T_82541:
   10035 	case WM_T_82547:
   10036 		/* XXX Configure actively LED after PHY reset */
   10037 		break;
   10038 	case WM_T_ICH8:
   10039 	case WM_T_ICH9:
   10040 	case WM_T_ICH10:
   10041 	case WM_T_PCH:
   10042 	case WM_T_PCH2:
   10043 	case WM_T_PCH_LPT:
   10044 	case WM_T_PCH_SPT:
   10045 	case WM_T_PCH_CNP:
   10046 		wm_phy_post_reset(sc);
   10047 		break;
   10048 	default:
   10049 		panic("%s: unknown type\n", __func__);
   10050 		break;
   10051 	}
   10052 }
   10053 
   10054 /*
   10055  * Setup sc_phytype and mii_{read|write}reg.
   10056  *
   10057  *  To identify PHY type, correct read/write function should be selected.
   10058  * To select correct read/write function, PCI ID or MAC type are required
   10059  * without accessing PHY registers.
   10060  *
   10061  *  On the first call of this function, PHY ID is not known yet. Check
   10062  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10063  * result might be incorrect.
   10064  *
   10065  *  In the second call, PHY OUI and model is used to identify PHY type.
   10066  * It might not be perfect because of the lack of compared entry, but it
   10067  * would be better than the first call.
   10068  *
   10069  *  If the detected new result and previous assumption is different,
   10070  * diagnous message will be printed.
   10071  */
   10072 static void
   10073 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10074     uint16_t phy_model)
   10075 {
   10076 	device_t dev = sc->sc_dev;
   10077 	struct mii_data *mii = &sc->sc_mii;
   10078 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10079 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10080 	mii_readreg_t new_readreg;
   10081 	mii_writereg_t new_writereg;
   10082 	bool dodiag = true;
   10083 
   10084 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10085 		device_xname(sc->sc_dev), __func__));
   10086 
   10087 	/*
   10088 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10089 	 * incorrect. So don't print diag output when it's 2nd call.
   10090 	 */
   10091 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10092 		dodiag = false;
   10093 
   10094 	if (mii->mii_readreg == NULL) {
   10095 		/*
   10096 		 *  This is the first call of this function. For ICH and PCH
   10097 		 * variants, it's difficult to determine the PHY access method
   10098 		 * by sc_type, so use the PCI product ID for some devices.
   10099 		 */
   10100 
   10101 		switch (sc->sc_pcidevid) {
   10102 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10103 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10104 			/* 82577 */
   10105 			new_phytype = WMPHY_82577;
   10106 			break;
   10107 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10108 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10109 			/* 82578 */
   10110 			new_phytype = WMPHY_82578;
   10111 			break;
   10112 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10113 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10114 			/* 82579 */
   10115 			new_phytype = WMPHY_82579;
   10116 			break;
   10117 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10118 		case PCI_PRODUCT_INTEL_82801I_BM:
   10119 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10120 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10121 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10122 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10123 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10124 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10125 			/* ICH8, 9, 10 with 82567 */
   10126 			new_phytype = WMPHY_BM;
   10127 			break;
   10128 		default:
   10129 			break;
   10130 		}
   10131 	} else {
   10132 		/* It's not the first call. Use PHY OUI and model */
   10133 		switch (phy_oui) {
   10134 		case MII_OUI_ATTANSIC: /* XXX ??? */
   10135 			switch (phy_model) {
   10136 			case 0x0004: /* XXX */
   10137 				new_phytype = WMPHY_82578;
   10138 				break;
   10139 			default:
   10140 				break;
   10141 			}
   10142 			break;
   10143 		case MII_OUI_xxMARVELL:
   10144 			switch (phy_model) {
   10145 			case MII_MODEL_xxMARVELL_I210:
   10146 				new_phytype = WMPHY_I210;
   10147 				break;
   10148 			case MII_MODEL_xxMARVELL_E1011:
   10149 			case MII_MODEL_xxMARVELL_E1000_3:
   10150 			case MII_MODEL_xxMARVELL_E1000_5:
   10151 			case MII_MODEL_xxMARVELL_E1112:
   10152 				new_phytype = WMPHY_M88;
   10153 				break;
   10154 			case MII_MODEL_xxMARVELL_E1149:
   10155 				new_phytype = WMPHY_BM;
   10156 				break;
   10157 			case MII_MODEL_xxMARVELL_E1111:
   10158 			case MII_MODEL_xxMARVELL_I347:
   10159 			case MII_MODEL_xxMARVELL_E1512:
   10160 			case MII_MODEL_xxMARVELL_E1340M:
   10161 			case MII_MODEL_xxMARVELL_E1543:
   10162 				new_phytype = WMPHY_M88;
   10163 				break;
   10164 			case MII_MODEL_xxMARVELL_I82563:
   10165 				new_phytype = WMPHY_GG82563;
   10166 				break;
   10167 			default:
   10168 				break;
   10169 			}
   10170 			break;
   10171 		case MII_OUI_INTEL:
   10172 			switch (phy_model) {
   10173 			case MII_MODEL_INTEL_I82577:
   10174 				new_phytype = WMPHY_82577;
   10175 				break;
   10176 			case MII_MODEL_INTEL_I82579:
   10177 				new_phytype = WMPHY_82579;
   10178 				break;
   10179 			case MII_MODEL_INTEL_I217:
   10180 				new_phytype = WMPHY_I217;
   10181 				break;
   10182 			case MII_MODEL_INTEL_I82580:
   10183 			case MII_MODEL_INTEL_I350:
   10184 				new_phytype = WMPHY_82580;
   10185 				break;
   10186 			default:
   10187 				break;
   10188 			}
   10189 			break;
   10190 		case MII_OUI_yyINTEL:
   10191 			switch (phy_model) {
   10192 			case MII_MODEL_yyINTEL_I82562G:
   10193 			case MII_MODEL_yyINTEL_I82562EM:
   10194 			case MII_MODEL_yyINTEL_I82562ET:
   10195 				new_phytype = WMPHY_IFE;
   10196 				break;
   10197 			case MII_MODEL_yyINTEL_IGP01E1000:
   10198 				new_phytype = WMPHY_IGP;
   10199 				break;
   10200 			case MII_MODEL_yyINTEL_I82566:
   10201 				new_phytype = WMPHY_IGP_3;
   10202 				break;
   10203 			default:
   10204 				break;
   10205 			}
   10206 			break;
   10207 		default:
   10208 			break;
   10209 		}
   10210 
   10211 		if (dodiag) {
   10212 			if (new_phytype == WMPHY_UNKNOWN)
   10213 				aprint_verbose_dev(dev,
   10214 				    "%s: Unknown PHY model. OUI=%06x, "
   10215 				    "model=%04x\n", __func__, phy_oui,
   10216 				    phy_model);
   10217 
   10218 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10219 			    && (sc->sc_phytype != new_phytype)) {
   10220 				aprint_error_dev(dev, "Previously assumed PHY "
   10221 				    "type(%u) was incorrect. PHY type from PHY"
   10222 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   10223 			}
   10224 		}
   10225 	}
   10226 
   10227 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10228 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10229 		/* SGMII */
   10230 		new_readreg = wm_sgmii_readreg;
   10231 		new_writereg = wm_sgmii_writereg;
   10232 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10233 		/* BM2 (phyaddr == 1) */
   10234 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10235 		    && (new_phytype != WMPHY_BM)
   10236 		    && (new_phytype != WMPHY_UNKNOWN))
   10237 			doubt_phytype = new_phytype;
   10238 		new_phytype = WMPHY_BM;
   10239 		new_readreg = wm_gmii_bm_readreg;
   10240 		new_writereg = wm_gmii_bm_writereg;
   10241 	} else if (sc->sc_type >= WM_T_PCH) {
   10242 		/* All PCH* use _hv_ */
   10243 		new_readreg = wm_gmii_hv_readreg;
   10244 		new_writereg = wm_gmii_hv_writereg;
   10245 	} else if (sc->sc_type >= WM_T_ICH8) {
   10246 		/* non-82567 ICH8, 9 and 10 */
   10247 		new_readreg = wm_gmii_i82544_readreg;
   10248 		new_writereg = wm_gmii_i82544_writereg;
   10249 	} else if (sc->sc_type >= WM_T_80003) {
   10250 		/* 80003 */
   10251 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10252 		    && (new_phytype != WMPHY_GG82563)
   10253 		    && (new_phytype != WMPHY_UNKNOWN))
   10254 			doubt_phytype = new_phytype;
   10255 		new_phytype = WMPHY_GG82563;
   10256 		new_readreg = wm_gmii_i80003_readreg;
   10257 		new_writereg = wm_gmii_i80003_writereg;
   10258 	} else if (sc->sc_type >= WM_T_I210) {
   10259 		/* I210 and I211 */
   10260 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10261 		    && (new_phytype != WMPHY_I210)
   10262 		    && (new_phytype != WMPHY_UNKNOWN))
   10263 			doubt_phytype = new_phytype;
   10264 		new_phytype = WMPHY_I210;
   10265 		new_readreg = wm_gmii_gs40g_readreg;
   10266 		new_writereg = wm_gmii_gs40g_writereg;
   10267 	} else if (sc->sc_type >= WM_T_82580) {
   10268 		/* 82580, I350 and I354 */
   10269 		new_readreg = wm_gmii_82580_readreg;
   10270 		new_writereg = wm_gmii_82580_writereg;
   10271 	} else if (sc->sc_type >= WM_T_82544) {
   10272 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10273 		new_readreg = wm_gmii_i82544_readreg;
   10274 		new_writereg = wm_gmii_i82544_writereg;
   10275 	} else {
   10276 		new_readreg = wm_gmii_i82543_readreg;
   10277 		new_writereg = wm_gmii_i82543_writereg;
   10278 	}
   10279 
   10280 	if (new_phytype == WMPHY_BM) {
   10281 		/* All BM use _bm_ */
   10282 		new_readreg = wm_gmii_bm_readreg;
   10283 		new_writereg = wm_gmii_bm_writereg;
   10284 	}
   10285 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10286 		/* All PCH* use _hv_ */
   10287 		new_readreg = wm_gmii_hv_readreg;
   10288 		new_writereg = wm_gmii_hv_writereg;
   10289 	}
   10290 
   10291 	/* Diag output */
   10292 	if (dodiag) {
   10293 		if (doubt_phytype != WMPHY_UNKNOWN)
   10294 			aprint_error_dev(dev, "Assumed new PHY type was "
   10295 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10296 			    new_phytype);
   10297 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10298 		    && (sc->sc_phytype != new_phytype))
   10299 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10300 			    "was incorrect. New PHY type = %u\n",
   10301 			    sc->sc_phytype, new_phytype);
   10302 
   10303 		if ((mii->mii_readreg != NULL) &&
   10304 		    (new_phytype == WMPHY_UNKNOWN))
   10305 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   10306 
   10307 		if ((mii->mii_readreg != NULL) &&
   10308 		    (mii->mii_readreg != new_readreg))
   10309 			aprint_error_dev(dev, "Previously assumed PHY "
   10310 			    "read/write function was incorrect.\n");
   10311 	}
   10312 
   10313 	/* Update now */
   10314 	sc->sc_phytype = new_phytype;
   10315 	mii->mii_readreg = new_readreg;
   10316 	mii->mii_writereg = new_writereg;
   10317 	if (new_readreg == wm_gmii_hv_readreg) {
   10318 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10319 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10320 	} else if (new_readreg == wm_sgmii_readreg) {
   10321 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10322 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10323 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10324 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10325 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10326 	}
   10327 }
   10328 
   10329 /*
   10330  * wm_get_phy_id_82575:
   10331  *
   10332  * Return PHY ID. Return -1 if it failed.
   10333  */
   10334 static int
   10335 wm_get_phy_id_82575(struct wm_softc *sc)
   10336 {
   10337 	uint32_t reg;
   10338 	int phyid = -1;
   10339 
   10340 	/* XXX */
   10341 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10342 		return -1;
   10343 
   10344 	if (wm_sgmii_uses_mdio(sc)) {
   10345 		switch (sc->sc_type) {
   10346 		case WM_T_82575:
   10347 		case WM_T_82576:
   10348 			reg = CSR_READ(sc, WMREG_MDIC);
   10349 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10350 			break;
   10351 		case WM_T_82580:
   10352 		case WM_T_I350:
   10353 		case WM_T_I354:
   10354 		case WM_T_I210:
   10355 		case WM_T_I211:
   10356 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10357 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10358 			break;
   10359 		default:
   10360 			return -1;
   10361 		}
   10362 	}
   10363 
   10364 	return phyid;
   10365 }
   10366 
   10367 
   10368 /*
   10369  * wm_gmii_mediainit:
   10370  *
   10371  *	Initialize media for use on 1000BASE-T devices.
   10372  */
   10373 static void
   10374 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10375 {
   10376 	device_t dev = sc->sc_dev;
   10377 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10378 	struct mii_data *mii = &sc->sc_mii;
   10379 
   10380 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10381 		device_xname(sc->sc_dev), __func__));
   10382 
   10383 	/* We have GMII. */
   10384 	sc->sc_flags |= WM_F_HAS_MII;
   10385 
   10386 	if (sc->sc_type == WM_T_80003)
   10387 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10388 	else
   10389 		sc->sc_tipg = TIPG_1000T_DFLT;
   10390 
   10391 	/*
   10392 	 * Let the chip set speed/duplex on its own based on
   10393 	 * signals from the PHY.
   10394 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10395 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10396 	 */
   10397 	sc->sc_ctrl |= CTRL_SLU;
   10398 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10399 
   10400 	/* Initialize our media structures and probe the GMII. */
   10401 	mii->mii_ifp = ifp;
   10402 
   10403 	mii->mii_statchg = wm_gmii_statchg;
   10404 
   10405 	/* get PHY control from SMBus to PCIe */
   10406 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10407 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10408 	    || (sc->sc_type == WM_T_PCH_CNP))
   10409 		wm_init_phy_workarounds_pchlan(sc);
   10410 
   10411 	wm_gmii_reset(sc);
   10412 
   10413 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10414 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10415 	    wm_gmii_mediastatus);
   10416 
   10417 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10418 	    || (sc->sc_type == WM_T_82580)
   10419 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10420 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10421 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10422 			/* Attach only one port */
   10423 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10424 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10425 		} else {
   10426 			int i, id;
   10427 			uint32_t ctrl_ext;
   10428 
   10429 			id = wm_get_phy_id_82575(sc);
   10430 			if (id != -1) {
   10431 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10432 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10433 			}
   10434 			if ((id == -1)
   10435 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10436 				/* Power on sgmii phy if it is disabled */
   10437 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10438 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10439 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10440 				CSR_WRITE_FLUSH(sc);
   10441 				delay(300*1000); /* XXX too long */
   10442 
   10443 				/*
   10444 				 * From 1 to 8.
   10445 				 *
   10446 				 * I2C access fails with I2C register's ERROR
   10447 				 * bit set, so prevent error message while
   10448 				 * scanning.
   10449 				 */
   10450 				sc->phy.no_errprint = true;
   10451 				for (i = 1; i < 8; i++)
   10452 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10453 					    0xffffffff, i, MII_OFFSET_ANY,
   10454 					    MIIF_DOPAUSE);
   10455 				sc->phy.no_errprint = false;
   10456 
   10457 				/* Restore previous sfp cage power state */
   10458 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10459 			}
   10460 		}
   10461 	} else
   10462 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10463 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10464 
   10465 	/*
   10466 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10467 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10468 	 */
   10469 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10470 		|| (sc->sc_type == WM_T_PCH_SPT)
   10471 		|| (sc->sc_type == WM_T_PCH_CNP))
   10472 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10473 		wm_set_mdio_slow_mode_hv(sc);
   10474 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10475 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10476 	}
   10477 
   10478 	/*
   10479 	 * (For ICH8 variants)
   10480 	 * If PHY detection failed, use BM's r/w function and retry.
   10481 	 */
   10482 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10483 		/* if failed, retry with *_bm_* */
   10484 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10485 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10486 		    sc->sc_phytype);
   10487 		sc->sc_phytype = WMPHY_BM;
   10488 		mii->mii_readreg = wm_gmii_bm_readreg;
   10489 		mii->mii_writereg = wm_gmii_bm_writereg;
   10490 
   10491 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10492 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10493 	}
   10494 
   10495 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10496 		/* Any PHY wasn't find */
   10497 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10498 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10499 		sc->sc_phytype = WMPHY_NONE;
   10500 	} else {
   10501 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10502 
   10503 		/*
   10504 		 * PHY Found! Check PHY type again by the second call of
   10505 		 * wm_gmii_setup_phytype.
   10506 		 */
   10507 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10508 		    child->mii_mpd_model);
   10509 
   10510 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10511 	}
   10512 }
   10513 
   10514 /*
   10515  * wm_gmii_mediachange:	[ifmedia interface function]
   10516  *
   10517  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10518  */
   10519 static int
   10520 wm_gmii_mediachange(struct ifnet *ifp)
   10521 {
   10522 	struct wm_softc *sc = ifp->if_softc;
   10523 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10524 	uint32_t reg;
   10525 	int rc;
   10526 
   10527 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10528 		device_xname(sc->sc_dev), __func__));
   10529 	if ((ifp->if_flags & IFF_UP) == 0)
   10530 		return 0;
   10531 
   10532 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10533 	if ((sc->sc_type == WM_T_82580)
   10534 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10535 	    || (sc->sc_type == WM_T_I211)) {
   10536 		reg = CSR_READ(sc, WMREG_PHPM);
   10537 		reg &= ~PHPM_GO_LINK_D;
   10538 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10539 	}
   10540 
   10541 	/* Disable D0 LPLU. */
   10542 	wm_lplu_d0_disable(sc);
   10543 
   10544 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10545 	sc->sc_ctrl |= CTRL_SLU;
   10546 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10547 	    || (sc->sc_type > WM_T_82543)) {
   10548 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10549 	} else {
   10550 		sc->sc_ctrl &= ~CTRL_ASDE;
   10551 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10552 		if (ife->ifm_media & IFM_FDX)
   10553 			sc->sc_ctrl |= CTRL_FD;
   10554 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10555 		case IFM_10_T:
   10556 			sc->sc_ctrl |= CTRL_SPEED_10;
   10557 			break;
   10558 		case IFM_100_TX:
   10559 			sc->sc_ctrl |= CTRL_SPEED_100;
   10560 			break;
   10561 		case IFM_1000_T:
   10562 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10563 			break;
   10564 		case IFM_NONE:
   10565 			/* There is no specific setting for IFM_NONE */
   10566 			break;
   10567 		default:
   10568 			panic("wm_gmii_mediachange: bad media 0x%x",
   10569 			    ife->ifm_media);
   10570 		}
   10571 	}
   10572 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10573 	CSR_WRITE_FLUSH(sc);
   10574 
   10575 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   10576 		wm_serdes_mediachange(ifp);
   10577 
   10578 	if (sc->sc_type <= WM_T_82543)
   10579 		wm_gmii_reset(sc);
   10580 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   10581 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   10582 		/* allow time for SFP cage time to power up phy */
   10583 		delay(300 * 1000);
   10584 		wm_gmii_reset(sc);
   10585 	}
   10586 
   10587 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10588 		return 0;
   10589 	return rc;
   10590 }
   10591 
   10592 /*
   10593  * wm_gmii_mediastatus:	[ifmedia interface function]
   10594  *
   10595  *	Get the current interface media status on a 1000BASE-T device.
   10596  */
   10597 static void
   10598 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10599 {
   10600 	struct wm_softc *sc = ifp->if_softc;
   10601 
   10602 	ether_mediastatus(ifp, ifmr);
   10603 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10604 	    | sc->sc_flowflags;
   10605 }
   10606 
   10607 #define	MDI_IO		CTRL_SWDPIN(2)
   10608 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10609 #define	MDI_CLK		CTRL_SWDPIN(3)
   10610 
   10611 static void
   10612 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10613 {
   10614 	uint32_t i, v;
   10615 
   10616 	v = CSR_READ(sc, WMREG_CTRL);
   10617 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10618 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10619 
   10620 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10621 		if (data & i)
   10622 			v |= MDI_IO;
   10623 		else
   10624 			v &= ~MDI_IO;
   10625 		CSR_WRITE(sc, WMREG_CTRL, v);
   10626 		CSR_WRITE_FLUSH(sc);
   10627 		delay(10);
   10628 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10629 		CSR_WRITE_FLUSH(sc);
   10630 		delay(10);
   10631 		CSR_WRITE(sc, WMREG_CTRL, v);
   10632 		CSR_WRITE_FLUSH(sc);
   10633 		delay(10);
   10634 	}
   10635 }
   10636 
   10637 static uint16_t
   10638 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10639 {
   10640 	uint32_t v, i;
   10641 	uint16_t data = 0;
   10642 
   10643 	v = CSR_READ(sc, WMREG_CTRL);
   10644 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10645 	v |= CTRL_SWDPIO(3);
   10646 
   10647 	CSR_WRITE(sc, WMREG_CTRL, v);
   10648 	CSR_WRITE_FLUSH(sc);
   10649 	delay(10);
   10650 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10651 	CSR_WRITE_FLUSH(sc);
   10652 	delay(10);
   10653 	CSR_WRITE(sc, WMREG_CTRL, v);
   10654 	CSR_WRITE_FLUSH(sc);
   10655 	delay(10);
   10656 
   10657 	for (i = 0; i < 16; i++) {
   10658 		data <<= 1;
   10659 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10660 		CSR_WRITE_FLUSH(sc);
   10661 		delay(10);
   10662 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10663 			data |= 1;
   10664 		CSR_WRITE(sc, WMREG_CTRL, v);
   10665 		CSR_WRITE_FLUSH(sc);
   10666 		delay(10);
   10667 	}
   10668 
   10669 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10670 	CSR_WRITE_FLUSH(sc);
   10671 	delay(10);
   10672 	CSR_WRITE(sc, WMREG_CTRL, v);
   10673 	CSR_WRITE_FLUSH(sc);
   10674 	delay(10);
   10675 
   10676 	return data;
   10677 }
   10678 
   10679 #undef MDI_IO
   10680 #undef MDI_DIR
   10681 #undef MDI_CLK
   10682 
   10683 /*
   10684  * wm_gmii_i82543_readreg:	[mii interface function]
   10685  *
   10686  *	Read a PHY register on the GMII (i82543 version).
   10687  */
   10688 static int
   10689 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10690 {
   10691 	struct wm_softc *sc = device_private(dev);
   10692 
   10693 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10694 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10695 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10696 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10697 
   10698 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10699 		device_xname(dev), phy, reg, *val));
   10700 
   10701 	return 0;
   10702 }
   10703 
   10704 /*
   10705  * wm_gmii_i82543_writereg:	[mii interface function]
   10706  *
   10707  *	Write a PHY register on the GMII (i82543 version).
   10708  */
   10709 static int
   10710 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10711 {
   10712 	struct wm_softc *sc = device_private(dev);
   10713 
   10714 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10715 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10716 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10717 	    (MII_COMMAND_START << 30), 32);
   10718 
   10719 	return 0;
   10720 }
   10721 
   10722 /*
   10723  * wm_gmii_mdic_readreg:	[mii interface function]
   10724  *
   10725  *	Read a PHY register on the GMII.
   10726  */
   10727 static int
   10728 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10729 {
   10730 	struct wm_softc *sc = device_private(dev);
   10731 	uint32_t mdic = 0;
   10732 	int i;
   10733 
   10734 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10735 	    && (reg > MII_ADDRMASK)) {
   10736 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10737 		    __func__, sc->sc_phytype, reg);
   10738 		reg &= MII_ADDRMASK;
   10739 	}
   10740 
   10741 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10742 	    MDIC_REGADD(reg));
   10743 
   10744 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10745 		delay(50);
   10746 		mdic = CSR_READ(sc, WMREG_MDIC);
   10747 		if (mdic & MDIC_READY)
   10748 			break;
   10749 	}
   10750 
   10751 	if ((mdic & MDIC_READY) == 0) {
   10752 		DPRINTF(WM_DEBUG_GMII,
   10753 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10754 			device_xname(dev), phy, reg));
   10755 		return ETIMEDOUT;
   10756 	} else if (mdic & MDIC_E) {
   10757 		/* This is normal if no PHY is present. */
   10758 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10759 			device_xname(sc->sc_dev), phy, reg));
   10760 		return -1;
   10761 	} else
   10762 		*val = MDIC_DATA(mdic);
   10763 
   10764 	/*
   10765 	 * Allow some time after each MDIC transaction to avoid
   10766 	 * reading duplicate data in the next MDIC transaction.
   10767 	 */
   10768 	if (sc->sc_type == WM_T_PCH2)
   10769 		delay(100);
   10770 
   10771 	return 0;
   10772 }
   10773 
   10774 /*
   10775  * wm_gmii_mdic_writereg:	[mii interface function]
   10776  *
   10777  *	Write a PHY register on the GMII.
   10778  */
   10779 static int
   10780 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10781 {
   10782 	struct wm_softc *sc = device_private(dev);
   10783 	uint32_t mdic = 0;
   10784 	int i;
   10785 
   10786 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10787 	    && (reg > MII_ADDRMASK)) {
   10788 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10789 		    __func__, sc->sc_phytype, reg);
   10790 		reg &= MII_ADDRMASK;
   10791 	}
   10792 
   10793 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10794 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10795 
   10796 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10797 		delay(50);
   10798 		mdic = CSR_READ(sc, WMREG_MDIC);
   10799 		if (mdic & MDIC_READY)
   10800 			break;
   10801 	}
   10802 
   10803 	if ((mdic & MDIC_READY) == 0) {
   10804 		DPRINTF(WM_DEBUG_GMII,
   10805 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10806 			device_xname(dev), phy, reg));
   10807 		return ETIMEDOUT;
   10808 	} else if (mdic & MDIC_E) {
   10809 		DPRINTF(WM_DEBUG_GMII,
   10810 		    ("%s: MDIC write error: phy %d reg %d\n",
   10811 			device_xname(dev), phy, reg));
   10812 		return -1;
   10813 	}
   10814 
   10815 	/*
   10816 	 * Allow some time after each MDIC transaction to avoid
   10817 	 * reading duplicate data in the next MDIC transaction.
   10818 	 */
   10819 	if (sc->sc_type == WM_T_PCH2)
   10820 		delay(100);
   10821 
   10822 	return 0;
   10823 }
   10824 
   10825 /*
   10826  * wm_gmii_i82544_readreg:	[mii interface function]
   10827  *
   10828  *	Read a PHY register on the GMII.
   10829  */
   10830 static int
   10831 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10832 {
   10833 	struct wm_softc *sc = device_private(dev);
   10834 	int rv;
   10835 
   10836 	if (sc->phy.acquire(sc)) {
   10837 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10838 		return -1;
   10839 	}
   10840 
   10841 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10842 
   10843 	sc->phy.release(sc);
   10844 
   10845 	return rv;
   10846 }
   10847 
   10848 static int
   10849 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10850 {
   10851 	struct wm_softc *sc = device_private(dev);
   10852 	int rv;
   10853 
   10854 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10855 		switch (sc->sc_phytype) {
   10856 		case WMPHY_IGP:
   10857 		case WMPHY_IGP_2:
   10858 		case WMPHY_IGP_3:
   10859 			rv = wm_gmii_mdic_writereg(dev, phy,
   10860 			    MII_IGPHY_PAGE_SELECT, reg);
   10861 			if (rv != 0)
   10862 				return rv;
   10863 			break;
   10864 		default:
   10865 #ifdef WM_DEBUG
   10866 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10867 			    __func__, sc->sc_phytype, reg);
   10868 #endif
   10869 			break;
   10870 		}
   10871 	}
   10872 
   10873 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10874 }
   10875 
   10876 /*
   10877  * wm_gmii_i82544_writereg:	[mii interface function]
   10878  *
   10879  *	Write a PHY register on the GMII.
   10880  */
   10881 static int
   10882 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10883 {
   10884 	struct wm_softc *sc = device_private(dev);
   10885 	int rv;
   10886 
   10887 	if (sc->phy.acquire(sc)) {
   10888 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10889 		return -1;
   10890 	}
   10891 
   10892 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10893 	sc->phy.release(sc);
   10894 
   10895 	return rv;
   10896 }
   10897 
   10898 static int
   10899 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10900 {
   10901 	struct wm_softc *sc = device_private(dev);
   10902 	int rv;
   10903 
   10904 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10905 		switch (sc->sc_phytype) {
   10906 		case WMPHY_IGP:
   10907 		case WMPHY_IGP_2:
   10908 		case WMPHY_IGP_3:
   10909 			rv = wm_gmii_mdic_writereg(dev, phy,
   10910 			    MII_IGPHY_PAGE_SELECT, reg);
   10911 			if (rv != 0)
   10912 				return rv;
   10913 			break;
   10914 		default:
   10915 #ifdef WM_DEBUG
   10916 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10917 			    __func__, sc->sc_phytype, reg);
   10918 #endif
   10919 			break;
   10920 		}
   10921 	}
   10922 
   10923 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10924 }
   10925 
   10926 /*
   10927  * wm_gmii_i80003_readreg:	[mii interface function]
   10928  *
   10929  *	Read a PHY register on the kumeran
   10930  * This could be handled by the PHY layer if we didn't have to lock the
   10931  * ressource ...
   10932  */
   10933 static int
   10934 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10935 {
   10936 	struct wm_softc *sc = device_private(dev);
   10937 	int page_select;
   10938 	uint16_t temp, temp2;
   10939 	int rv = 0;
   10940 
   10941 	if (phy != 1) /* Only one PHY on kumeran bus */
   10942 		return -1;
   10943 
   10944 	if (sc->phy.acquire(sc)) {
   10945 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10946 		return -1;
   10947 	}
   10948 
   10949 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10950 		page_select = GG82563_PHY_PAGE_SELECT;
   10951 	else {
   10952 		/*
   10953 		 * Use Alternative Page Select register to access registers
   10954 		 * 30 and 31.
   10955 		 */
   10956 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10957 	}
   10958 	temp = reg >> GG82563_PAGE_SHIFT;
   10959 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10960 		goto out;
   10961 
   10962 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10963 		/*
   10964 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10965 		 * register.
   10966 		 */
   10967 		delay(200);
   10968 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10969 		if ((rv != 0) || (temp2 != temp)) {
   10970 			device_printf(dev, "%s failed\n", __func__);
   10971 			rv = -1;
   10972 			goto out;
   10973 		}
   10974 		delay(200);
   10975 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10976 		delay(200);
   10977 	} else
   10978 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10979 
   10980 out:
   10981 	sc->phy.release(sc);
   10982 	return rv;
   10983 }
   10984 
   10985 /*
   10986  * wm_gmii_i80003_writereg:	[mii interface function]
   10987  *
   10988  *	Write a PHY register on the kumeran.
   10989  * This could be handled by the PHY layer if we didn't have to lock the
   10990  * ressource ...
   10991  */
   10992 static int
   10993 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   10994 {
   10995 	struct wm_softc *sc = device_private(dev);
   10996 	int page_select, rv;
   10997 	uint16_t temp, temp2;
   10998 
   10999 	if (phy != 1) /* Only one PHY on kumeran bus */
   11000 		return -1;
   11001 
   11002 	if (sc->phy.acquire(sc)) {
   11003 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11004 		return -1;
   11005 	}
   11006 
   11007 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11008 		page_select = GG82563_PHY_PAGE_SELECT;
   11009 	else {
   11010 		/*
   11011 		 * Use Alternative Page Select register to access registers
   11012 		 * 30 and 31.
   11013 		 */
   11014 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11015 	}
   11016 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11017 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11018 		goto out;
   11019 
   11020 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11021 		/*
   11022 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11023 		 * register.
   11024 		 */
   11025 		delay(200);
   11026 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11027 		if ((rv != 0) || (temp2 != temp)) {
   11028 			device_printf(dev, "%s failed\n", __func__);
   11029 			rv = -1;
   11030 			goto out;
   11031 		}
   11032 		delay(200);
   11033 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11034 		delay(200);
   11035 	} else
   11036 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11037 
   11038 out:
   11039 	sc->phy.release(sc);
   11040 	return rv;
   11041 }
   11042 
   11043 /*
   11044  * wm_gmii_bm_readreg:	[mii interface function]
   11045  *
   11046  *	Read a PHY register on the kumeran
   11047  * This could be handled by the PHY layer if we didn't have to lock the
   11048  * ressource ...
   11049  */
   11050 static int
   11051 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11052 {
   11053 	struct wm_softc *sc = device_private(dev);
   11054 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11055 	int rv;
   11056 
   11057 	if (sc->phy.acquire(sc)) {
   11058 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11059 		return -1;
   11060 	}
   11061 
   11062 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11063 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11064 		    || (reg == 31)) ? 1 : phy;
   11065 	/* Page 800 works differently than the rest so it has its own func */
   11066 	if (page == BM_WUC_PAGE) {
   11067 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11068 		goto release;
   11069 	}
   11070 
   11071 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11072 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11073 		    && (sc->sc_type != WM_T_82583))
   11074 			rv = wm_gmii_mdic_writereg(dev, phy,
   11075 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11076 		else
   11077 			rv = wm_gmii_mdic_writereg(dev, phy,
   11078 			    BME1000_PHY_PAGE_SELECT, page);
   11079 		if (rv != 0)
   11080 			goto release;
   11081 	}
   11082 
   11083 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11084 
   11085 release:
   11086 	sc->phy.release(sc);
   11087 	return rv;
   11088 }
   11089 
   11090 /*
   11091  * wm_gmii_bm_writereg:	[mii interface function]
   11092  *
   11093  *	Write a PHY register on the kumeran.
   11094  * This could be handled by the PHY layer if we didn't have to lock the
   11095  * ressource ...
   11096  */
   11097 static int
   11098 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11099 {
   11100 	struct wm_softc *sc = device_private(dev);
   11101 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11102 	int rv;
   11103 
   11104 	if (sc->phy.acquire(sc)) {
   11105 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11106 		return -1;
   11107 	}
   11108 
   11109 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11110 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11111 		    || (reg == 31)) ? 1 : phy;
   11112 	/* Page 800 works differently than the rest so it has its own func */
   11113 	if (page == BM_WUC_PAGE) {
   11114 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11115 		goto release;
   11116 	}
   11117 
   11118 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11119 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11120 		    && (sc->sc_type != WM_T_82583))
   11121 			rv = wm_gmii_mdic_writereg(dev, phy,
   11122 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11123 		else
   11124 			rv = wm_gmii_mdic_writereg(dev, phy,
   11125 			    BME1000_PHY_PAGE_SELECT, page);
   11126 		if (rv != 0)
   11127 			goto release;
   11128 	}
   11129 
   11130 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11131 
   11132 release:
   11133 	sc->phy.release(sc);
   11134 	return rv;
   11135 }
   11136 
   11137 /*
   11138  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11139  *  @dev: pointer to the HW structure
   11140  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11141  *
   11142  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11143  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11144  */
   11145 static int
   11146 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11147 {
   11148 	uint16_t temp;
   11149 	int rv;
   11150 
   11151 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11152 		device_xname(dev), __func__));
   11153 
   11154 	if (!phy_regp)
   11155 		return -1;
   11156 
   11157 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11158 
   11159 	/* Select Port Control Registers page */
   11160 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11161 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11162 	if (rv != 0)
   11163 		return rv;
   11164 
   11165 	/* Read WUCE and save it */
   11166 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11167 	if (rv != 0)
   11168 		return rv;
   11169 
   11170 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11171 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11172 	 */
   11173 	temp = *phy_regp;
   11174 	temp |= BM_WUC_ENABLE_BIT;
   11175 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11176 
   11177 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11178 		return rv;
   11179 
   11180 	/* Select Host Wakeup Registers page - caller now able to write
   11181 	 * registers on the Wakeup registers page
   11182 	 */
   11183 	return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11184 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11185 }
   11186 
   11187 /*
   11188  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11189  *  @dev: pointer to the HW structure
   11190  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11191  *
   11192  *  Restore BM_WUC_ENABLE_REG to its original value.
   11193  *
   11194  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11195  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11196  *  caller.
   11197  */
   11198 static int
   11199 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11200 {
   11201 
   11202 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11203 		device_xname(dev), __func__));
   11204 
   11205 	if (!phy_regp)
   11206 		return -1;
   11207 
   11208 	/* Select Port Control Registers page */
   11209 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11210 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11211 
   11212 	/* Restore 769.17 to its original value */
   11213 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11214 
   11215 	return 0;
   11216 }
   11217 
   11218 /*
   11219  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11220  *  @sc: pointer to the HW structure
   11221  *  @offset: register offset to be read or written
   11222  *  @val: pointer to the data to read or write
   11223  *  @rd: determines if operation is read or write
   11224  *  @page_set: BM_WUC_PAGE already set and access enabled
   11225  *
   11226  *  Read the PHY register at offset and store the retrieved information in
   11227  *  data, or write data to PHY register at offset.  Note the procedure to
   11228  *  access the PHY wakeup registers is different than reading the other PHY
   11229  *  registers. It works as such:
   11230  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11231  *  2) Set page to 800 for host (801 if we were manageability)
   11232  *  3) Write the address using the address opcode (0x11)
   11233  *  4) Read or write the data using the data opcode (0x12)
   11234  *  5) Restore 769.17.2 to its original value
   11235  *
   11236  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11237  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11238  *
   11239  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11240  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11241  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11242  */
   11243 static int
   11244 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11245 	bool page_set)
   11246 {
   11247 	struct wm_softc *sc = device_private(dev);
   11248 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11249 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11250 	uint16_t wuce;
   11251 	int rv = 0;
   11252 
   11253 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11254 		device_xname(dev), __func__));
   11255 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11256 	if ((sc->sc_type == WM_T_PCH)
   11257 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11258 		device_printf(dev,
   11259 		    "Attempting to access page %d while gig enabled.\n", page);
   11260 	}
   11261 
   11262 	if (!page_set) {
   11263 		/* Enable access to PHY wakeup registers */
   11264 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11265 		if (rv != 0) {
   11266 			device_printf(dev,
   11267 			    "%s: Could not enable PHY wakeup reg access\n",
   11268 			    __func__);
   11269 			return rv;
   11270 		}
   11271 	}
   11272 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11273 		device_xname(sc->sc_dev), __func__, page, regnum));
   11274 
   11275 	/*
   11276 	 * 2) Access PHY wakeup register.
   11277 	 * See wm_access_phy_wakeup_reg_bm.
   11278 	 */
   11279 
   11280 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11281 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11282 	if (rv != 0)
   11283 		return rv;
   11284 
   11285 	if (rd) {
   11286 		/* Read the Wakeup register page value using opcode 0x12 */
   11287 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11288 	} else {
   11289 		/* Write the Wakeup register page value using opcode 0x12 */
   11290 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11291 	}
   11292 	if (rv != 0)
   11293 		return rv;
   11294 
   11295 	if (!page_set)
   11296 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11297 
   11298 	return rv;
   11299 }
   11300 
   11301 /*
   11302  * wm_gmii_hv_readreg:	[mii interface function]
   11303  *
   11304  *	Read a PHY register on the kumeran
   11305  * This could be handled by the PHY layer if we didn't have to lock the
   11306  * ressource ...
   11307  */
   11308 static int
   11309 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11310 {
   11311 	struct wm_softc *sc = device_private(dev);
   11312 	int rv;
   11313 
   11314 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11315 		device_xname(dev), __func__));
   11316 	if (sc->phy.acquire(sc)) {
   11317 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11318 		return -1;
   11319 	}
   11320 
   11321 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11322 	sc->phy.release(sc);
   11323 	return rv;
   11324 }
   11325 
   11326 static int
   11327 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11328 {
   11329 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11330 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11331 	int rv;
   11332 
   11333 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11334 
   11335 	/* Page 800 works differently than the rest so it has its own func */
   11336 	if (page == BM_WUC_PAGE)
   11337 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11338 
   11339 	/*
   11340 	 * Lower than page 768 works differently than the rest so it has its
   11341 	 * own func
   11342 	 */
   11343 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11344 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11345 		return -1;
   11346 	}
   11347 
   11348 	/*
   11349 	 * XXX I21[789] documents say that the SMBus Address register is at
   11350 	 * PHY address 01, Page 0 (not 768), Register 26.
   11351 	 */
   11352 	if (page == HV_INTC_FC_PAGE_START)
   11353 		page = 0;
   11354 
   11355 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11356 		rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11357 		    page << BME1000_PAGE_SHIFT);
   11358 		if (rv != 0)
   11359 			return rv;
   11360 	}
   11361 
   11362 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11363 }
   11364 
   11365 /*
   11366  * wm_gmii_hv_writereg:	[mii interface function]
   11367  *
   11368  *	Write a PHY register on the kumeran.
   11369  * This could be handled by the PHY layer if we didn't have to lock the
   11370  * ressource ...
   11371  */
   11372 static int
   11373 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11374 {
   11375 	struct wm_softc *sc = device_private(dev);
   11376 	int rv;
   11377 
   11378 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11379 		device_xname(dev), __func__));
   11380 
   11381 	if (sc->phy.acquire(sc)) {
   11382 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11383 		return -1;
   11384 	}
   11385 
   11386 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11387 	sc->phy.release(sc);
   11388 
   11389 	return rv;
   11390 }
   11391 
   11392 static int
   11393 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11394 {
   11395 	struct wm_softc *sc = device_private(dev);
   11396 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11397 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11398 	int rv;
   11399 
   11400 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11401 
   11402 	/* Page 800 works differently than the rest so it has its own func */
   11403 	if (page == BM_WUC_PAGE)
   11404 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11405 		    false);
   11406 
   11407 	/*
   11408 	 * Lower than page 768 works differently than the rest so it has its
   11409 	 * own func
   11410 	 */
   11411 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11412 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11413 		return -1;
   11414 	}
   11415 
   11416 	{
   11417 		/*
   11418 		 * XXX I21[789] documents say that the SMBus Address register
   11419 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11420 		 */
   11421 		if (page == HV_INTC_FC_PAGE_START)
   11422 			page = 0;
   11423 
   11424 		/*
   11425 		 * XXX Workaround MDIO accesses being disabled after entering
   11426 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11427 		 * register is set)
   11428 		 */
   11429 		if (sc->sc_phytype == WMPHY_82578) {
   11430 			struct mii_softc *child;
   11431 
   11432 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11433 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11434 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11435 			    && ((val & (1 << 11)) != 0)) {
   11436 				device_printf(dev, "XXX need workaround\n");
   11437 			}
   11438 		}
   11439 
   11440 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11441 			rv = wm_gmii_mdic_writereg(dev, 1,
   11442 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11443 			if (rv != 0)
   11444 				return rv;
   11445 		}
   11446 	}
   11447 
   11448 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11449 }
   11450 
   11451 /*
   11452  * wm_gmii_82580_readreg:	[mii interface function]
   11453  *
   11454  *	Read a PHY register on the 82580 and I350.
   11455  * This could be handled by the PHY layer if we didn't have to lock the
   11456  * ressource ...
   11457  */
   11458 static int
   11459 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11460 {
   11461 	struct wm_softc *sc = device_private(dev);
   11462 	int rv;
   11463 
   11464 	if (sc->phy.acquire(sc) != 0) {
   11465 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11466 		return -1;
   11467 	}
   11468 
   11469 #ifdef DIAGNOSTIC
   11470 	if (reg > MII_ADDRMASK) {
   11471 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11472 		    __func__, sc->sc_phytype, reg);
   11473 		reg &= MII_ADDRMASK;
   11474 	}
   11475 #endif
   11476 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11477 
   11478 	sc->phy.release(sc);
   11479 	return rv;
   11480 }
   11481 
   11482 /*
   11483  * wm_gmii_82580_writereg:	[mii interface function]
   11484  *
   11485  *	Write a PHY register on the 82580 and I350.
   11486  * This could be handled by the PHY layer if we didn't have to lock the
   11487  * ressource ...
   11488  */
   11489 static int
   11490 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11491 {
   11492 	struct wm_softc *sc = device_private(dev);
   11493 	int rv;
   11494 
   11495 	if (sc->phy.acquire(sc) != 0) {
   11496 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11497 		return -1;
   11498 	}
   11499 
   11500 #ifdef DIAGNOSTIC
   11501 	if (reg > MII_ADDRMASK) {
   11502 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11503 		    __func__, sc->sc_phytype, reg);
   11504 		reg &= MII_ADDRMASK;
   11505 	}
   11506 #endif
   11507 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11508 
   11509 	sc->phy.release(sc);
   11510 	return rv;
   11511 }
   11512 
   11513 /*
   11514  * wm_gmii_gs40g_readreg:	[mii interface function]
   11515  *
   11516  *	Read a PHY register on the I2100 and I211.
   11517  * This could be handled by the PHY layer if we didn't have to lock the
   11518  * ressource ...
   11519  */
   11520 static int
   11521 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11522 {
   11523 	struct wm_softc *sc = device_private(dev);
   11524 	int page, offset;
   11525 	int rv;
   11526 
   11527 	/* Acquire semaphore */
   11528 	if (sc->phy.acquire(sc)) {
   11529 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11530 		return -1;
   11531 	}
   11532 
   11533 	/* Page select */
   11534 	page = reg >> GS40G_PAGE_SHIFT;
   11535 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11536 	if (rv != 0)
   11537 		goto release;
   11538 
   11539 	/* Read reg */
   11540 	offset = reg & GS40G_OFFSET_MASK;
   11541 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11542 
   11543 release:
   11544 	sc->phy.release(sc);
   11545 	return rv;
   11546 }
   11547 
   11548 /*
   11549  * wm_gmii_gs40g_writereg:	[mii interface function]
   11550  *
   11551  *	Write a PHY register on the I210 and I211.
   11552  * This could be handled by the PHY layer if we didn't have to lock the
   11553  * ressource ...
   11554  */
   11555 static int
   11556 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11557 {
   11558 	struct wm_softc *sc = device_private(dev);
   11559 	uint16_t page;
   11560 	int offset, rv;
   11561 
   11562 	/* Acquire semaphore */
   11563 	if (sc->phy.acquire(sc)) {
   11564 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11565 		return -1;
   11566 	}
   11567 
   11568 	/* Page select */
   11569 	page = reg >> GS40G_PAGE_SHIFT;
   11570 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11571 	if (rv != 0)
   11572 		goto release;
   11573 
   11574 	/* Write reg */
   11575 	offset = reg & GS40G_OFFSET_MASK;
   11576 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11577 
   11578 release:
   11579 	/* Release semaphore */
   11580 	sc->phy.release(sc);
   11581 	return rv;
   11582 }
   11583 
   11584 /*
   11585  * wm_gmii_statchg:	[mii interface function]
   11586  *
   11587  *	Callback from MII layer when media changes.
   11588  */
   11589 static void
   11590 wm_gmii_statchg(struct ifnet *ifp)
   11591 {
   11592 	struct wm_softc *sc = ifp->if_softc;
   11593 	struct mii_data *mii = &sc->sc_mii;
   11594 
   11595 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11596 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11597 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11598 
   11599 	/* Get flow control negotiation result. */
   11600 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11601 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11602 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11603 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11604 	}
   11605 
   11606 	if (sc->sc_flowflags & IFM_FLOW) {
   11607 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11608 			sc->sc_ctrl |= CTRL_TFCE;
   11609 			sc->sc_fcrtl |= FCRTL_XONE;
   11610 		}
   11611 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11612 			sc->sc_ctrl |= CTRL_RFCE;
   11613 	}
   11614 
   11615 	if (mii->mii_media_active & IFM_FDX) {
   11616 		DPRINTF(WM_DEBUG_LINK,
   11617 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11618 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11619 	} else {
   11620 		DPRINTF(WM_DEBUG_LINK,
   11621 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11622 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11623 	}
   11624 
   11625 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11626 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11627 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11628 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11629 	if (sc->sc_type == WM_T_80003) {
   11630 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   11631 		case IFM_1000_T:
   11632 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11633 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11634 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11635 			break;
   11636 		default:
   11637 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11638 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11639 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11640 			break;
   11641 		}
   11642 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11643 	}
   11644 }
   11645 
   11646 /* kumeran related (80003, ICH* and PCH*) */
   11647 
   11648 /*
   11649  * wm_kmrn_readreg:
   11650  *
   11651  *	Read a kumeran register
   11652  */
   11653 static int
   11654 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11655 {
   11656 	int rv;
   11657 
   11658 	if (sc->sc_type == WM_T_80003)
   11659 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11660 	else
   11661 		rv = sc->phy.acquire(sc);
   11662 	if (rv != 0) {
   11663 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11664 		    __func__);
   11665 		return rv;
   11666 	}
   11667 
   11668 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11669 
   11670 	if (sc->sc_type == WM_T_80003)
   11671 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11672 	else
   11673 		sc->phy.release(sc);
   11674 
   11675 	return rv;
   11676 }
   11677 
   11678 static int
   11679 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11680 {
   11681 
   11682 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11683 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11684 	    KUMCTRLSTA_REN);
   11685 	CSR_WRITE_FLUSH(sc);
   11686 	delay(2);
   11687 
   11688 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11689 
   11690 	return 0;
   11691 }
   11692 
   11693 /*
   11694  * wm_kmrn_writereg:
   11695  *
   11696  *	Write a kumeran register
   11697  */
   11698 static int
   11699 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11700 {
   11701 	int rv;
   11702 
   11703 	if (sc->sc_type == WM_T_80003)
   11704 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11705 	else
   11706 		rv = sc->phy.acquire(sc);
   11707 	if (rv != 0) {
   11708 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11709 		    __func__);
   11710 		return rv;
   11711 	}
   11712 
   11713 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11714 
   11715 	if (sc->sc_type == WM_T_80003)
   11716 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11717 	else
   11718 		sc->phy.release(sc);
   11719 
   11720 	return rv;
   11721 }
   11722 
   11723 static int
   11724 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11725 {
   11726 
   11727 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11728 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11729 
   11730 	return 0;
   11731 }
   11732 
   11733 /*
   11734  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11735  * This access method is different from IEEE MMD.
   11736  */
   11737 static int
   11738 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11739 {
   11740 	struct wm_softc *sc = device_private(dev);
   11741 	int rv;
   11742 
   11743 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11744 	if (rv != 0)
   11745 		return rv;
   11746 
   11747 	if (rd)
   11748 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11749 	else
   11750 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11751 	return rv;
   11752 }
   11753 
   11754 static int
   11755 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11756 {
   11757 
   11758 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11759 }
   11760 
   11761 static int
   11762 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11763 {
   11764 
   11765 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11766 }
   11767 
   11768 /* SGMII related */
   11769 
   11770 /*
   11771  * wm_sgmii_uses_mdio
   11772  *
   11773  * Check whether the transaction is to the internal PHY or the external
   11774  * MDIO interface. Return true if it's MDIO.
   11775  */
   11776 static bool
   11777 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11778 {
   11779 	uint32_t reg;
   11780 	bool ismdio = false;
   11781 
   11782 	switch (sc->sc_type) {
   11783 	case WM_T_82575:
   11784 	case WM_T_82576:
   11785 		reg = CSR_READ(sc, WMREG_MDIC);
   11786 		ismdio = ((reg & MDIC_DEST) != 0);
   11787 		break;
   11788 	case WM_T_82580:
   11789 	case WM_T_I350:
   11790 	case WM_T_I354:
   11791 	case WM_T_I210:
   11792 	case WM_T_I211:
   11793 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11794 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11795 		break;
   11796 	default:
   11797 		break;
   11798 	}
   11799 
   11800 	return ismdio;
   11801 }
   11802 
   11803 /*
   11804  * wm_sgmii_readreg:	[mii interface function]
   11805  *
   11806  *	Read a PHY register on the SGMII
   11807  * This could be handled by the PHY layer if we didn't have to lock the
   11808  * ressource ...
   11809  */
   11810 static int
   11811 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11812 {
   11813 	struct wm_softc *sc = device_private(dev);
   11814 	int rv;
   11815 
   11816 	if (sc->phy.acquire(sc)) {
   11817 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11818 		return -1;
   11819 	}
   11820 
   11821 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11822 
   11823 	sc->phy.release(sc);
   11824 	return rv;
   11825 }
   11826 
   11827 static int
   11828 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11829 {
   11830 	struct wm_softc *sc = device_private(dev);
   11831 	uint32_t i2ccmd;
   11832 	int i, rv = 0;
   11833 
   11834 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11835 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11836 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11837 
   11838 	/* Poll the ready bit */
   11839 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11840 		delay(50);
   11841 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11842 		if (i2ccmd & I2CCMD_READY)
   11843 			break;
   11844 	}
   11845 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11846 		device_printf(dev, "I2CCMD Read did not complete\n");
   11847 		rv = ETIMEDOUT;
   11848 	}
   11849 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11850 		if (!sc->phy.no_errprint)
   11851 			device_printf(dev, "I2CCMD Error bit set\n");
   11852 		rv = EIO;
   11853 	}
   11854 
   11855 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11856 
   11857 	return rv;
   11858 }
   11859 
   11860 /*
   11861  * wm_sgmii_writereg:	[mii interface function]
   11862  *
   11863  *	Write a PHY register on the SGMII.
   11864  * This could be handled by the PHY layer if we didn't have to lock the
   11865  * ressource ...
   11866  */
   11867 static int
   11868 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11869 {
   11870 	struct wm_softc *sc = device_private(dev);
   11871 	int rv;
   11872 
   11873 	if (sc->phy.acquire(sc) != 0) {
   11874 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11875 		return -1;
   11876 	}
   11877 
   11878 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11879 
   11880 	sc->phy.release(sc);
   11881 
   11882 	return rv;
   11883 }
   11884 
   11885 static int
   11886 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11887 {
   11888 	struct wm_softc *sc = device_private(dev);
   11889 	uint32_t i2ccmd;
   11890 	uint16_t swapdata;
   11891 	int rv = 0;
   11892 	int i;
   11893 
   11894 	/* Swap the data bytes for the I2C interface */
   11895 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11896 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11897 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11898 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11899 
   11900 	/* Poll the ready bit */
   11901 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11902 		delay(50);
   11903 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11904 		if (i2ccmd & I2CCMD_READY)
   11905 			break;
   11906 	}
   11907 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11908 		device_printf(dev, "I2CCMD Write did not complete\n");
   11909 		rv = ETIMEDOUT;
   11910 	}
   11911 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11912 		device_printf(dev, "I2CCMD Error bit set\n");
   11913 		rv = EIO;
   11914 	}
   11915 
   11916 	return rv;
   11917 }
   11918 
   11919 /* TBI related */
   11920 
   11921 static bool
   11922 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11923 {
   11924 	bool sig;
   11925 
   11926 	sig = ctrl & CTRL_SWDPIN(1);
   11927 
   11928 	/*
   11929 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11930 	 * detect a signal, 1 if they don't.
   11931 	 */
   11932 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11933 		sig = !sig;
   11934 
   11935 	return sig;
   11936 }
   11937 
   11938 /*
   11939  * wm_tbi_mediainit:
   11940  *
   11941  *	Initialize media for use on 1000BASE-X devices.
   11942  */
   11943 static void
   11944 wm_tbi_mediainit(struct wm_softc *sc)
   11945 {
   11946 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11947 	const char *sep = "";
   11948 
   11949 	if (sc->sc_type < WM_T_82543)
   11950 		sc->sc_tipg = TIPG_WM_DFLT;
   11951 	else
   11952 		sc->sc_tipg = TIPG_LG_DFLT;
   11953 
   11954 	sc->sc_tbi_serdes_anegticks = 5;
   11955 
   11956 	/* Initialize our media structures */
   11957 	sc->sc_mii.mii_ifp = ifp;
   11958 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11959 
   11960 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11961 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11962 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11963 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11964 	else
   11965 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11966 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11967 
   11968 	/*
   11969 	 * SWD Pins:
   11970 	 *
   11971 	 *	0 = Link LED (output)
   11972 	 *	1 = Loss Of Signal (input)
   11973 	 */
   11974 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11975 
   11976 	/* XXX Perhaps this is only for TBI */
   11977 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11978 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11979 
   11980 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11981 		sc->sc_ctrl &= ~CTRL_LRST;
   11982 
   11983 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11984 
   11985 #define	ADD(ss, mm, dd)							\
   11986 do {									\
   11987 	aprint_normal("%s%s", sep, ss);					\
   11988 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11989 	sep = ", ";							\
   11990 } while (/*CONSTCOND*/0)
   11991 
   11992 	aprint_normal_dev(sc->sc_dev, "");
   11993 
   11994 	if (sc->sc_type == WM_T_I354) {
   11995 		uint32_t status;
   11996 
   11997 		status = CSR_READ(sc, WMREG_STATUS);
   11998 		if (((status & STATUS_2P5_SKU) != 0)
   11999 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12000 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12001 		} else
   12002 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12003 	} else if (sc->sc_type == WM_T_82545) {
   12004 		/* Only 82545 is LX (XXX except SFP) */
   12005 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12006 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12007 	} else if (sc->sc_sfptype != 0) {
   12008 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12009 		switch (sc->sc_sfptype) {
   12010 		default:
   12011 		case SFF_SFP_ETH_FLAGS_1000SX:
   12012 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12013 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12014 			break;
   12015 		case SFF_SFP_ETH_FLAGS_1000LX:
   12016 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12017 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12018 			break;
   12019 		case SFF_SFP_ETH_FLAGS_1000CX:
   12020 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12021 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12022 			break;
   12023 		case SFF_SFP_ETH_FLAGS_1000T:
   12024 			ADD("1000baseT", IFM_1000_T, 0);
   12025 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12026 			break;
   12027 		case SFF_SFP_ETH_FLAGS_100FX:
   12028 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12029 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12030 			break;
   12031 		}
   12032 	} else {
   12033 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12034 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12035 	}
   12036 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12037 	aprint_normal("\n");
   12038 
   12039 #undef ADD
   12040 
   12041 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12042 }
   12043 
   12044 /*
   12045  * wm_tbi_mediachange:	[ifmedia interface function]
   12046  *
   12047  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12048  */
   12049 static int
   12050 wm_tbi_mediachange(struct ifnet *ifp)
   12051 {
   12052 	struct wm_softc *sc = ifp->if_softc;
   12053 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12054 	uint32_t status, ctrl;
   12055 	bool signal;
   12056 	int i;
   12057 
   12058 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12059 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12060 		/* XXX need some work for >= 82571 and < 82575 */
   12061 		if (sc->sc_type < WM_T_82575)
   12062 			return 0;
   12063 	}
   12064 
   12065 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12066 	    || (sc->sc_type >= WM_T_82575))
   12067 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12068 
   12069 	sc->sc_ctrl &= ~CTRL_LRST;
   12070 	sc->sc_txcw = TXCW_ANE;
   12071 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12072 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12073 	else if (ife->ifm_media & IFM_FDX)
   12074 		sc->sc_txcw |= TXCW_FD;
   12075 	else
   12076 		sc->sc_txcw |= TXCW_HD;
   12077 
   12078 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12079 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12080 
   12081 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12082 		device_xname(sc->sc_dev), sc->sc_txcw));
   12083 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12084 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12085 	CSR_WRITE_FLUSH(sc);
   12086 	delay(1000);
   12087 
   12088 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12089 	signal = wm_tbi_havesignal(sc, ctrl);
   12090 
   12091 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   12092 		signal));
   12093 
   12094 	if (signal) {
   12095 		/* Have signal; wait for the link to come up. */
   12096 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12097 			delay(10000);
   12098 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12099 				break;
   12100 		}
   12101 
   12102 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   12103 			device_xname(sc->sc_dev), i));
   12104 
   12105 		status = CSR_READ(sc, WMREG_STATUS);
   12106 		DPRINTF(WM_DEBUG_LINK,
   12107 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   12108 			device_xname(sc->sc_dev), status, STATUS_LU));
   12109 		if (status & STATUS_LU) {
   12110 			/* Link is up. */
   12111 			DPRINTF(WM_DEBUG_LINK,
   12112 			    ("%s: LINK: set media -> link up %s\n",
   12113 				device_xname(sc->sc_dev),
   12114 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12115 
   12116 			/*
   12117 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12118 			 * so we should update sc->sc_ctrl
   12119 			 */
   12120 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12121 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12122 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12123 			if (status & STATUS_FD)
   12124 				sc->sc_tctl |=
   12125 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12126 			else
   12127 				sc->sc_tctl |=
   12128 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12129 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12130 				sc->sc_fcrtl |= FCRTL_XONE;
   12131 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12132 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12133 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12134 			sc->sc_tbi_linkup = 1;
   12135 		} else {
   12136 			if (i == WM_LINKUP_TIMEOUT)
   12137 				wm_check_for_link(sc);
   12138 			/* Link is down. */
   12139 			DPRINTF(WM_DEBUG_LINK,
   12140 			    ("%s: LINK: set media -> link down\n",
   12141 				device_xname(sc->sc_dev)));
   12142 			sc->sc_tbi_linkup = 0;
   12143 		}
   12144 	} else {
   12145 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   12146 			device_xname(sc->sc_dev)));
   12147 		sc->sc_tbi_linkup = 0;
   12148 	}
   12149 
   12150 	wm_tbi_serdes_set_linkled(sc);
   12151 
   12152 	return 0;
   12153 }
   12154 
   12155 /*
   12156  * wm_tbi_mediastatus:	[ifmedia interface function]
   12157  *
   12158  *	Get the current interface media status on a 1000BASE-X device.
   12159  */
   12160 static void
   12161 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12162 {
   12163 	struct wm_softc *sc = ifp->if_softc;
   12164 	uint32_t ctrl, status;
   12165 
   12166 	ifmr->ifm_status = IFM_AVALID;
   12167 	ifmr->ifm_active = IFM_ETHER;
   12168 
   12169 	status = CSR_READ(sc, WMREG_STATUS);
   12170 	if ((status & STATUS_LU) == 0) {
   12171 		ifmr->ifm_active |= IFM_NONE;
   12172 		return;
   12173 	}
   12174 
   12175 	ifmr->ifm_status |= IFM_ACTIVE;
   12176 	/* Only 82545 is LX */
   12177 	if (sc->sc_type == WM_T_82545)
   12178 		ifmr->ifm_active |= IFM_1000_LX;
   12179 	else
   12180 		ifmr->ifm_active |= IFM_1000_SX;
   12181 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   12182 		ifmr->ifm_active |= IFM_FDX;
   12183 	else
   12184 		ifmr->ifm_active |= IFM_HDX;
   12185 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12186 	if (ctrl & CTRL_RFCE)
   12187 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   12188 	if (ctrl & CTRL_TFCE)
   12189 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   12190 }
   12191 
   12192 /* XXX TBI only */
   12193 static int
   12194 wm_check_for_link(struct wm_softc *sc)
   12195 {
   12196 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12197 	uint32_t rxcw;
   12198 	uint32_t ctrl;
   12199 	uint32_t status;
   12200 	bool signal;
   12201 
   12202 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   12203 		device_xname(sc->sc_dev), __func__));
   12204 
   12205 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12206 		/* XXX need some work for >= 82571 */
   12207 		if (sc->sc_type >= WM_T_82571) {
   12208 			sc->sc_tbi_linkup = 1;
   12209 			return 0;
   12210 		}
   12211 	}
   12212 
   12213 	rxcw = CSR_READ(sc, WMREG_RXCW);
   12214 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12215 	status = CSR_READ(sc, WMREG_STATUS);
   12216 	signal = wm_tbi_havesignal(sc, ctrl);
   12217 
   12218 	DPRINTF(WM_DEBUG_LINK,
   12219 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   12220 		device_xname(sc->sc_dev), __func__, signal,
   12221 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   12222 
   12223 	/*
   12224 	 * SWDPIN   LU RXCW
   12225 	 *	0    0	  0
   12226 	 *	0    0	  1	(should not happen)
   12227 	 *	0    1	  0	(should not happen)
   12228 	 *	0    1	  1	(should not happen)
   12229 	 *	1    0	  0	Disable autonego and force linkup
   12230 	 *	1    0	  1	got /C/ but not linkup yet
   12231 	 *	1    1	  0	(linkup)
   12232 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12233 	 *
   12234 	 */
   12235 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12236 		DPRINTF(WM_DEBUG_LINK,
   12237 		    ("%s: %s: force linkup and fullduplex\n",
   12238 			device_xname(sc->sc_dev), __func__));
   12239 		sc->sc_tbi_linkup = 0;
   12240 		/* Disable auto-negotiation in the TXCW register */
   12241 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12242 
   12243 		/*
   12244 		 * Force link-up and also force full-duplex.
   12245 		 *
   12246 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12247 		 * so we should update sc->sc_ctrl
   12248 		 */
   12249 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12250 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12251 	} else if (((status & STATUS_LU) != 0)
   12252 	    && ((rxcw & RXCW_C) != 0)
   12253 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12254 		sc->sc_tbi_linkup = 1;
   12255 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12256 			device_xname(sc->sc_dev),
   12257 			__func__));
   12258 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12259 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12260 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12261 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   12262 			device_xname(sc->sc_dev), __func__));
   12263 	} else {
   12264 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12265 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12266 			status));
   12267 	}
   12268 
   12269 	return 0;
   12270 }
   12271 
   12272 /*
   12273  * wm_tbi_tick:
   12274  *
   12275  *	Check the link on TBI devices.
   12276  *	This function acts as mii_tick().
   12277  */
   12278 static void
   12279 wm_tbi_tick(struct wm_softc *sc)
   12280 {
   12281 	struct mii_data *mii = &sc->sc_mii;
   12282 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12283 	uint32_t status;
   12284 
   12285 	KASSERT(WM_CORE_LOCKED(sc));
   12286 
   12287 	status = CSR_READ(sc, WMREG_STATUS);
   12288 
   12289 	/* XXX is this needed? */
   12290 	(void)CSR_READ(sc, WMREG_RXCW);
   12291 	(void)CSR_READ(sc, WMREG_CTRL);
   12292 
   12293 	/* set link status */
   12294 	if ((status & STATUS_LU) == 0) {
   12295 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12296 			device_xname(sc->sc_dev)));
   12297 		sc->sc_tbi_linkup = 0;
   12298 	} else if (sc->sc_tbi_linkup == 0) {
   12299 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12300 			device_xname(sc->sc_dev),
   12301 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12302 		sc->sc_tbi_linkup = 1;
   12303 		sc->sc_tbi_serdes_ticks = 0;
   12304 	}
   12305 
   12306 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12307 		goto setled;
   12308 
   12309 	if ((status & STATUS_LU) == 0) {
   12310 		sc->sc_tbi_linkup = 0;
   12311 		/* If the timer expired, retry autonegotiation */
   12312 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12313 		    && (++sc->sc_tbi_serdes_ticks
   12314 			>= sc->sc_tbi_serdes_anegticks)) {
   12315 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12316 				device_xname(sc->sc_dev), __func__));
   12317 			sc->sc_tbi_serdes_ticks = 0;
   12318 			/*
   12319 			 * Reset the link, and let autonegotiation do
   12320 			 * its thing
   12321 			 */
   12322 			sc->sc_ctrl |= CTRL_LRST;
   12323 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12324 			CSR_WRITE_FLUSH(sc);
   12325 			delay(1000);
   12326 			sc->sc_ctrl &= ~CTRL_LRST;
   12327 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12328 			CSR_WRITE_FLUSH(sc);
   12329 			delay(1000);
   12330 			CSR_WRITE(sc, WMREG_TXCW,
   12331 			    sc->sc_txcw & ~TXCW_ANE);
   12332 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12333 		}
   12334 	}
   12335 
   12336 setled:
   12337 	wm_tbi_serdes_set_linkled(sc);
   12338 }
   12339 
   12340 /* SERDES related */
   12341 static void
   12342 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12343 {
   12344 	uint32_t reg;
   12345 
   12346 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12347 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12348 		return;
   12349 
   12350 	/* Enable PCS to turn on link */
   12351 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12352 	reg |= PCS_CFG_PCS_EN;
   12353 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12354 
   12355 	/* Power up the laser */
   12356 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12357 	reg &= ~CTRL_EXT_SWDPIN(3);
   12358 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12359 
   12360 	/* Flush the write to verify completion */
   12361 	CSR_WRITE_FLUSH(sc);
   12362 	delay(1000);
   12363 }
   12364 
   12365 static int
   12366 wm_serdes_mediachange(struct ifnet *ifp)
   12367 {
   12368 	struct wm_softc *sc = ifp->if_softc;
   12369 	bool pcs_autoneg = true; /* XXX */
   12370 	uint32_t ctrl_ext, pcs_lctl, reg;
   12371 
   12372 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12373 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12374 		return 0;
   12375 
   12376 	/* XXX Currently, this function is not called on 8257[12] */
   12377 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12378 	    || (sc->sc_type >= WM_T_82575))
   12379 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12380 
   12381 	/* Power on the sfp cage if present */
   12382 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12383 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12384 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   12385 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12386 
   12387 	sc->sc_ctrl |= CTRL_SLU;
   12388 
   12389 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   12390 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12391 
   12392 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12393 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12394 	case CTRL_EXT_LINK_MODE_SGMII:
   12395 		/* SGMII mode lets the phy handle forcing speed/duplex */
   12396 		pcs_autoneg = true;
   12397 		/* Autoneg time out should be disabled for SGMII mode */
   12398 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12399 		break;
   12400 	case CTRL_EXT_LINK_MODE_1000KX:
   12401 		pcs_autoneg = false;
   12402 		/* FALLTHROUGH */
   12403 	default:
   12404 		if ((sc->sc_type == WM_T_82575)
   12405 		    || (sc->sc_type == WM_T_82576)) {
   12406 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12407 				pcs_autoneg = false;
   12408 		}
   12409 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12410 		    | CTRL_FRCFDX;
   12411 
   12412 		/* Set speed of 1000/Full if speed/duplex is forced */
   12413 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12414 	}
   12415 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12416 
   12417 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   12418 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   12419 
   12420 	if (pcs_autoneg) {
   12421 		/* Set PCS register for autoneg */
   12422 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12423 
   12424 		/* Disable force flow control for autoneg */
   12425 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12426 
   12427 		/* Configure flow control advertisement for autoneg */
   12428 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12429 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12430 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12431 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12432 	} else
   12433 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12434 
   12435 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12436 
   12437 	return 0;
   12438 }
   12439 
   12440 static void
   12441 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12442 {
   12443 	struct wm_softc *sc = ifp->if_softc;
   12444 	struct mii_data *mii = &sc->sc_mii;
   12445 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12446 	uint32_t pcs_adv, pcs_lpab, reg;
   12447 
   12448 	ifmr->ifm_status = IFM_AVALID;
   12449 	ifmr->ifm_active = IFM_ETHER;
   12450 
   12451 	/* Check PCS */
   12452 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12453 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12454 		ifmr->ifm_active |= IFM_NONE;
   12455 		sc->sc_tbi_linkup = 0;
   12456 		goto setled;
   12457 	}
   12458 
   12459 	sc->sc_tbi_linkup = 1;
   12460 	ifmr->ifm_status |= IFM_ACTIVE;
   12461 	if (sc->sc_type == WM_T_I354) {
   12462 		uint32_t status;
   12463 
   12464 		status = CSR_READ(sc, WMREG_STATUS);
   12465 		if (((status & STATUS_2P5_SKU) != 0)
   12466 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12467 			ifmr->ifm_active |= IFM_2500_KX;
   12468 		} else
   12469 			ifmr->ifm_active |= IFM_1000_KX;
   12470 	} else {
   12471 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12472 		case PCS_LSTS_SPEED_10:
   12473 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12474 			break;
   12475 		case PCS_LSTS_SPEED_100:
   12476 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12477 			break;
   12478 		case PCS_LSTS_SPEED_1000:
   12479 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12480 			break;
   12481 		default:
   12482 			device_printf(sc->sc_dev, "Unknown speed\n");
   12483 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12484 			break;
   12485 		}
   12486 	}
   12487 	if ((reg & PCS_LSTS_FDX) != 0)
   12488 		ifmr->ifm_active |= IFM_FDX;
   12489 	else
   12490 		ifmr->ifm_active |= IFM_HDX;
   12491 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12492 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12493 		/* Check flow */
   12494 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12495 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12496 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12497 			goto setled;
   12498 		}
   12499 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12500 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12501 		DPRINTF(WM_DEBUG_LINK,
   12502 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12503 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12504 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12505 			mii->mii_media_active |= IFM_FLOW
   12506 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12507 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12508 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12509 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12510 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12511 			mii->mii_media_active |= IFM_FLOW
   12512 			    | IFM_ETH_TXPAUSE;
   12513 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12514 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12515 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12516 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12517 			mii->mii_media_active |= IFM_FLOW
   12518 			    | IFM_ETH_RXPAUSE;
   12519 		}
   12520 	}
   12521 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12522 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12523 setled:
   12524 	wm_tbi_serdes_set_linkled(sc);
   12525 }
   12526 
   12527 /*
   12528  * wm_serdes_tick:
   12529  *
   12530  *	Check the link on serdes devices.
   12531  */
   12532 static void
   12533 wm_serdes_tick(struct wm_softc *sc)
   12534 {
   12535 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12536 	struct mii_data *mii = &sc->sc_mii;
   12537 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12538 	uint32_t reg;
   12539 
   12540 	KASSERT(WM_CORE_LOCKED(sc));
   12541 
   12542 	mii->mii_media_status = IFM_AVALID;
   12543 	mii->mii_media_active = IFM_ETHER;
   12544 
   12545 	/* Check PCS */
   12546 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12547 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12548 		mii->mii_media_status |= IFM_ACTIVE;
   12549 		sc->sc_tbi_linkup = 1;
   12550 		sc->sc_tbi_serdes_ticks = 0;
   12551 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12552 		if ((reg & PCS_LSTS_FDX) != 0)
   12553 			mii->mii_media_active |= IFM_FDX;
   12554 		else
   12555 			mii->mii_media_active |= IFM_HDX;
   12556 	} else {
   12557 		mii->mii_media_status |= IFM_NONE;
   12558 		sc->sc_tbi_linkup = 0;
   12559 		/* If the timer expired, retry autonegotiation */
   12560 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12561 		    && (++sc->sc_tbi_serdes_ticks
   12562 			>= sc->sc_tbi_serdes_anegticks)) {
   12563 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12564 				device_xname(sc->sc_dev), __func__));
   12565 			sc->sc_tbi_serdes_ticks = 0;
   12566 			/* XXX */
   12567 			wm_serdes_mediachange(ifp);
   12568 		}
   12569 	}
   12570 
   12571 	wm_tbi_serdes_set_linkled(sc);
   12572 }
   12573 
   12574 /* SFP related */
   12575 
   12576 static int
   12577 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12578 {
   12579 	uint32_t i2ccmd;
   12580 	int i;
   12581 
   12582 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12583 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12584 
   12585 	/* Poll the ready bit */
   12586 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12587 		delay(50);
   12588 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12589 		if (i2ccmd & I2CCMD_READY)
   12590 			break;
   12591 	}
   12592 	if ((i2ccmd & I2CCMD_READY) == 0)
   12593 		return -1;
   12594 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12595 		return -1;
   12596 
   12597 	*data = i2ccmd & 0x00ff;
   12598 
   12599 	return 0;
   12600 }
   12601 
   12602 static uint32_t
   12603 wm_sfp_get_media_type(struct wm_softc *sc)
   12604 {
   12605 	uint32_t ctrl_ext;
   12606 	uint8_t val = 0;
   12607 	int timeout = 3;
   12608 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12609 	int rv = -1;
   12610 
   12611 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12612 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12613 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12614 	CSR_WRITE_FLUSH(sc);
   12615 
   12616 	/* Read SFP module data */
   12617 	while (timeout) {
   12618 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12619 		if (rv == 0)
   12620 			break;
   12621 		delay(100*1000); /* XXX too big */
   12622 		timeout--;
   12623 	}
   12624 	if (rv != 0)
   12625 		goto out;
   12626 
   12627 	switch (val) {
   12628 	case SFF_SFP_ID_SFF:
   12629 		aprint_normal_dev(sc->sc_dev,
   12630 		    "Module/Connector soldered to board\n");
   12631 		break;
   12632 	case SFF_SFP_ID_SFP:
   12633 		sc->sc_flags |= WM_F_SFP;
   12634 		break;
   12635 	case SFF_SFP_ID_UNKNOWN:
   12636 		goto out;
   12637 	default:
   12638 		break;
   12639 	}
   12640 
   12641 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12642 	if (rv != 0)
   12643 		goto out;
   12644 
   12645 	sc->sc_sfptype = val;
   12646 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12647 		mediatype = WM_MEDIATYPE_SERDES;
   12648 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12649 		sc->sc_flags |= WM_F_SGMII;
   12650 		mediatype = WM_MEDIATYPE_COPPER;
   12651 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12652 		sc->sc_flags |= WM_F_SGMII;
   12653 		mediatype = WM_MEDIATYPE_SERDES;
   12654 	} else {
   12655 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   12656 		    __func__, sc->sc_sfptype);
   12657 		sc->sc_sfptype = 0; /* XXX unknown */
   12658 	}
   12659 
   12660 out:
   12661 	/* Restore I2C interface setting */
   12662 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12663 
   12664 	return mediatype;
   12665 }
   12666 
   12667 /*
   12668  * NVM related.
   12669  * Microwire, SPI (w/wo EERD) and Flash.
   12670  */
   12671 
   12672 /* Both spi and uwire */
   12673 
   12674 /*
   12675  * wm_eeprom_sendbits:
   12676  *
   12677  *	Send a series of bits to the EEPROM.
   12678  */
   12679 static void
   12680 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12681 {
   12682 	uint32_t reg;
   12683 	int x;
   12684 
   12685 	reg = CSR_READ(sc, WMREG_EECD);
   12686 
   12687 	for (x = nbits; x > 0; x--) {
   12688 		if (bits & (1U << (x - 1)))
   12689 			reg |= EECD_DI;
   12690 		else
   12691 			reg &= ~EECD_DI;
   12692 		CSR_WRITE(sc, WMREG_EECD, reg);
   12693 		CSR_WRITE_FLUSH(sc);
   12694 		delay(2);
   12695 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12696 		CSR_WRITE_FLUSH(sc);
   12697 		delay(2);
   12698 		CSR_WRITE(sc, WMREG_EECD, reg);
   12699 		CSR_WRITE_FLUSH(sc);
   12700 		delay(2);
   12701 	}
   12702 }
   12703 
   12704 /*
   12705  * wm_eeprom_recvbits:
   12706  *
   12707  *	Receive a series of bits from the EEPROM.
   12708  */
   12709 static void
   12710 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12711 {
   12712 	uint32_t reg, val;
   12713 	int x;
   12714 
   12715 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12716 
   12717 	val = 0;
   12718 	for (x = nbits; x > 0; x--) {
   12719 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12720 		CSR_WRITE_FLUSH(sc);
   12721 		delay(2);
   12722 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12723 			val |= (1U << (x - 1));
   12724 		CSR_WRITE(sc, WMREG_EECD, reg);
   12725 		CSR_WRITE_FLUSH(sc);
   12726 		delay(2);
   12727 	}
   12728 	*valp = val;
   12729 }
   12730 
   12731 /* Microwire */
   12732 
   12733 /*
   12734  * wm_nvm_read_uwire:
   12735  *
   12736  *	Read a word from the EEPROM using the MicroWire protocol.
   12737  */
   12738 static int
   12739 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12740 {
   12741 	uint32_t reg, val;
   12742 	int i;
   12743 
   12744 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12745 		device_xname(sc->sc_dev), __func__));
   12746 
   12747 	if (sc->nvm.acquire(sc) != 0)
   12748 		return -1;
   12749 
   12750 	for (i = 0; i < wordcnt; i++) {
   12751 		/* Clear SK and DI. */
   12752 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12753 		CSR_WRITE(sc, WMREG_EECD, reg);
   12754 
   12755 		/*
   12756 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12757 		 * and Xen.
   12758 		 *
   12759 		 * We use this workaround only for 82540 because qemu's
   12760 		 * e1000 act as 82540.
   12761 		 */
   12762 		if (sc->sc_type == WM_T_82540) {
   12763 			reg |= EECD_SK;
   12764 			CSR_WRITE(sc, WMREG_EECD, reg);
   12765 			reg &= ~EECD_SK;
   12766 			CSR_WRITE(sc, WMREG_EECD, reg);
   12767 			CSR_WRITE_FLUSH(sc);
   12768 			delay(2);
   12769 		}
   12770 		/* XXX: end of workaround */
   12771 
   12772 		/* Set CHIP SELECT. */
   12773 		reg |= EECD_CS;
   12774 		CSR_WRITE(sc, WMREG_EECD, reg);
   12775 		CSR_WRITE_FLUSH(sc);
   12776 		delay(2);
   12777 
   12778 		/* Shift in the READ command. */
   12779 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12780 
   12781 		/* Shift in address. */
   12782 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12783 
   12784 		/* Shift out the data. */
   12785 		wm_eeprom_recvbits(sc, &val, 16);
   12786 		data[i] = val & 0xffff;
   12787 
   12788 		/* Clear CHIP SELECT. */
   12789 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12790 		CSR_WRITE(sc, WMREG_EECD, reg);
   12791 		CSR_WRITE_FLUSH(sc);
   12792 		delay(2);
   12793 	}
   12794 
   12795 	sc->nvm.release(sc);
   12796 	return 0;
   12797 }
   12798 
   12799 /* SPI */
   12800 
   12801 /*
   12802  * Set SPI and FLASH related information from the EECD register.
   12803  * For 82541 and 82547, the word size is taken from EEPROM.
   12804  */
   12805 static int
   12806 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12807 {
   12808 	int size;
   12809 	uint32_t reg;
   12810 	uint16_t data;
   12811 
   12812 	reg = CSR_READ(sc, WMREG_EECD);
   12813 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12814 
   12815 	/* Read the size of NVM from EECD by default */
   12816 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12817 	switch (sc->sc_type) {
   12818 	case WM_T_82541:
   12819 	case WM_T_82541_2:
   12820 	case WM_T_82547:
   12821 	case WM_T_82547_2:
   12822 		/* Set dummy value to access EEPROM */
   12823 		sc->sc_nvm_wordsize = 64;
   12824 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12825 			aprint_error_dev(sc->sc_dev,
   12826 			    "%s: failed to read EEPROM size\n", __func__);
   12827 		}
   12828 		reg = data;
   12829 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12830 		if (size == 0)
   12831 			size = 6; /* 64 word size */
   12832 		else
   12833 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12834 		break;
   12835 	case WM_T_80003:
   12836 	case WM_T_82571:
   12837 	case WM_T_82572:
   12838 	case WM_T_82573: /* SPI case */
   12839 	case WM_T_82574: /* SPI case */
   12840 	case WM_T_82583: /* SPI case */
   12841 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12842 		if (size > 14)
   12843 			size = 14;
   12844 		break;
   12845 	case WM_T_82575:
   12846 	case WM_T_82576:
   12847 	case WM_T_82580:
   12848 	case WM_T_I350:
   12849 	case WM_T_I354:
   12850 	case WM_T_I210:
   12851 	case WM_T_I211:
   12852 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12853 		if (size > 15)
   12854 			size = 15;
   12855 		break;
   12856 	default:
   12857 		aprint_error_dev(sc->sc_dev,
   12858 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12859 		return -1;
   12860 		break;
   12861 	}
   12862 
   12863 	sc->sc_nvm_wordsize = 1 << size;
   12864 
   12865 	return 0;
   12866 }
   12867 
   12868 /*
   12869  * wm_nvm_ready_spi:
   12870  *
   12871  *	Wait for a SPI EEPROM to be ready for commands.
   12872  */
   12873 static int
   12874 wm_nvm_ready_spi(struct wm_softc *sc)
   12875 {
   12876 	uint32_t val;
   12877 	int usec;
   12878 
   12879 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12880 		device_xname(sc->sc_dev), __func__));
   12881 
   12882 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12883 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12884 		wm_eeprom_recvbits(sc, &val, 8);
   12885 		if ((val & SPI_SR_RDY) == 0)
   12886 			break;
   12887 	}
   12888 	if (usec >= SPI_MAX_RETRIES) {
   12889 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12890 		return -1;
   12891 	}
   12892 	return 0;
   12893 }
   12894 
   12895 /*
   12896  * wm_nvm_read_spi:
   12897  *
   12898  *	Read a work from the EEPROM using the SPI protocol.
   12899  */
   12900 static int
   12901 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12902 {
   12903 	uint32_t reg, val;
   12904 	int i;
   12905 	uint8_t opc;
   12906 	int rv = 0;
   12907 
   12908 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12909 		device_xname(sc->sc_dev), __func__));
   12910 
   12911 	if (sc->nvm.acquire(sc) != 0)
   12912 		return -1;
   12913 
   12914 	/* Clear SK and CS. */
   12915 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12916 	CSR_WRITE(sc, WMREG_EECD, reg);
   12917 	CSR_WRITE_FLUSH(sc);
   12918 	delay(2);
   12919 
   12920 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12921 		goto out;
   12922 
   12923 	/* Toggle CS to flush commands. */
   12924 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12925 	CSR_WRITE_FLUSH(sc);
   12926 	delay(2);
   12927 	CSR_WRITE(sc, WMREG_EECD, reg);
   12928 	CSR_WRITE_FLUSH(sc);
   12929 	delay(2);
   12930 
   12931 	opc = SPI_OPC_READ;
   12932 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12933 		opc |= SPI_OPC_A8;
   12934 
   12935 	wm_eeprom_sendbits(sc, opc, 8);
   12936 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12937 
   12938 	for (i = 0; i < wordcnt; i++) {
   12939 		wm_eeprom_recvbits(sc, &val, 16);
   12940 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12941 	}
   12942 
   12943 	/* Raise CS and clear SK. */
   12944 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12945 	CSR_WRITE(sc, WMREG_EECD, reg);
   12946 	CSR_WRITE_FLUSH(sc);
   12947 	delay(2);
   12948 
   12949 out:
   12950 	sc->nvm.release(sc);
   12951 	return rv;
   12952 }
   12953 
   12954 /* Using with EERD */
   12955 
   12956 static int
   12957 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12958 {
   12959 	uint32_t attempts = 100000;
   12960 	uint32_t i, reg = 0;
   12961 	int32_t done = -1;
   12962 
   12963 	for (i = 0; i < attempts; i++) {
   12964 		reg = CSR_READ(sc, rw);
   12965 
   12966 		if (reg & EERD_DONE) {
   12967 			done = 0;
   12968 			break;
   12969 		}
   12970 		delay(5);
   12971 	}
   12972 
   12973 	return done;
   12974 }
   12975 
   12976 static int
   12977 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12978 {
   12979 	int i, eerd = 0;
   12980 	int rv = 0;
   12981 
   12982 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12983 		device_xname(sc->sc_dev), __func__));
   12984 
   12985 	if (sc->nvm.acquire(sc) != 0)
   12986 		return -1;
   12987 
   12988 	for (i = 0; i < wordcnt; i++) {
   12989 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12990 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12991 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12992 		if (rv != 0) {
   12993 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12994 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12995 			break;
   12996 		}
   12997 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12998 	}
   12999 
   13000 	sc->nvm.release(sc);
   13001 	return rv;
   13002 }
   13003 
   13004 /* Flash */
   13005 
   13006 static int
   13007 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13008 {
   13009 	uint32_t eecd;
   13010 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13011 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13012 	uint32_t nvm_dword = 0;
   13013 	uint8_t sig_byte = 0;
   13014 	int rv;
   13015 
   13016 	switch (sc->sc_type) {
   13017 	case WM_T_PCH_SPT:
   13018 	case WM_T_PCH_CNP:
   13019 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13020 		act_offset = ICH_NVM_SIG_WORD * 2;
   13021 
   13022 		/* Set bank to 0 in case flash read fails. */
   13023 		*bank = 0;
   13024 
   13025 		/* Check bank 0 */
   13026 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13027 		if (rv != 0)
   13028 			return rv;
   13029 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13030 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13031 			*bank = 0;
   13032 			return 0;
   13033 		}
   13034 
   13035 		/* Check bank 1 */
   13036 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13037 		    &nvm_dword);
   13038 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13039 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13040 			*bank = 1;
   13041 			return 0;
   13042 		}
   13043 		aprint_error_dev(sc->sc_dev,
   13044 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13045 		return -1;
   13046 	case WM_T_ICH8:
   13047 	case WM_T_ICH9:
   13048 		eecd = CSR_READ(sc, WMREG_EECD);
   13049 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13050 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13051 			return 0;
   13052 		}
   13053 		/* FALLTHROUGH */
   13054 	default:
   13055 		/* Default to 0 */
   13056 		*bank = 0;
   13057 
   13058 		/* Check bank 0 */
   13059 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13060 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13061 			*bank = 0;
   13062 			return 0;
   13063 		}
   13064 
   13065 		/* Check bank 1 */
   13066 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13067 		    &sig_byte);
   13068 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13069 			*bank = 1;
   13070 			return 0;
   13071 		}
   13072 	}
   13073 
   13074 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13075 		device_xname(sc->sc_dev)));
   13076 	return -1;
   13077 }
   13078 
   13079 /******************************************************************************
   13080  * This function does initial flash setup so that a new read/write/erase cycle
   13081  * can be started.
   13082  *
   13083  * sc - The pointer to the hw structure
   13084  ****************************************************************************/
   13085 static int32_t
   13086 wm_ich8_cycle_init(struct wm_softc *sc)
   13087 {
   13088 	uint16_t hsfsts;
   13089 	int32_t error = 1;
   13090 	int32_t i     = 0;
   13091 
   13092 	if (sc->sc_type >= WM_T_PCH_SPT)
   13093 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13094 	else
   13095 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13096 
   13097 	/* May be check the Flash Des Valid bit in Hw status */
   13098 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13099 		return error;
   13100 
   13101 	/* Clear FCERR in Hw status by writing 1 */
   13102 	/* Clear DAEL in Hw status by writing a 1 */
   13103 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13104 
   13105 	if (sc->sc_type >= WM_T_PCH_SPT)
   13106 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13107 	else
   13108 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13109 
   13110 	/*
   13111 	 * Either we should have a hardware SPI cycle in progress bit to check
   13112 	 * against, in order to start a new cycle or FDONE bit should be
   13113 	 * changed in the hardware so that it is 1 after hardware reset, which
   13114 	 * can then be used as an indication whether a cycle is in progress or
   13115 	 * has been completed .. we should also have some software semaphore
   13116 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13117 	 * threads access to those bits can be sequentiallized or a way so that
   13118 	 * 2 threads don't start the cycle at the same time
   13119 	 */
   13120 
   13121 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13122 		/*
   13123 		 * There is no cycle running at present, so we can start a
   13124 		 * cycle
   13125 		 */
   13126 
   13127 		/* Begin by setting Flash Cycle Done. */
   13128 		hsfsts |= HSFSTS_DONE;
   13129 		if (sc->sc_type >= WM_T_PCH_SPT)
   13130 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13131 			    hsfsts & 0xffffUL);
   13132 		else
   13133 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13134 		error = 0;
   13135 	} else {
   13136 		/*
   13137 		 * Otherwise poll for sometime so the current cycle has a
   13138 		 * chance to end before giving up.
   13139 		 */
   13140 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   13141 			if (sc->sc_type >= WM_T_PCH_SPT)
   13142 				hsfsts = ICH8_FLASH_READ32(sc,
   13143 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13144 			else
   13145 				hsfsts = ICH8_FLASH_READ16(sc,
   13146 				    ICH_FLASH_HSFSTS);
   13147 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13148 				error = 0;
   13149 				break;
   13150 			}
   13151 			delay(1);
   13152 		}
   13153 		if (error == 0) {
   13154 			/*
   13155 			 * Successful in waiting for previous cycle to timeout,
   13156 			 * now set the Flash Cycle Done.
   13157 			 */
   13158 			hsfsts |= HSFSTS_DONE;
   13159 			if (sc->sc_type >= WM_T_PCH_SPT)
   13160 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13161 				    hsfsts & 0xffffUL);
   13162 			else
   13163 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   13164 				    hsfsts);
   13165 		}
   13166 	}
   13167 	return error;
   13168 }
   13169 
   13170 /******************************************************************************
   13171  * This function starts a flash cycle and waits for its completion
   13172  *
   13173  * sc - The pointer to the hw structure
   13174  ****************************************************************************/
   13175 static int32_t
   13176 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   13177 {
   13178 	uint16_t hsflctl;
   13179 	uint16_t hsfsts;
   13180 	int32_t error = 1;
   13181 	uint32_t i = 0;
   13182 
   13183 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   13184 	if (sc->sc_type >= WM_T_PCH_SPT)
   13185 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   13186 	else
   13187 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13188 	hsflctl |= HSFCTL_GO;
   13189 	if (sc->sc_type >= WM_T_PCH_SPT)
   13190 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13191 		    (uint32_t)hsflctl << 16);
   13192 	else
   13193 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13194 
   13195 	/* Wait till FDONE bit is set to 1 */
   13196 	do {
   13197 		if (sc->sc_type >= WM_T_PCH_SPT)
   13198 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13199 			    & 0xffffUL;
   13200 		else
   13201 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13202 		if (hsfsts & HSFSTS_DONE)
   13203 			break;
   13204 		delay(1);
   13205 		i++;
   13206 	} while (i < timeout);
   13207 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   13208 		error = 0;
   13209 
   13210 	return error;
   13211 }
   13212 
   13213 /******************************************************************************
   13214  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   13215  *
   13216  * sc - The pointer to the hw structure
   13217  * index - The index of the byte or word to read.
   13218  * size - Size of data to read, 1=byte 2=word, 4=dword
   13219  * data - Pointer to the word to store the value read.
   13220  *****************************************************************************/
   13221 static int32_t
   13222 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   13223     uint32_t size, uint32_t *data)
   13224 {
   13225 	uint16_t hsfsts;
   13226 	uint16_t hsflctl;
   13227 	uint32_t flash_linear_address;
   13228 	uint32_t flash_data = 0;
   13229 	int32_t error = 1;
   13230 	int32_t count = 0;
   13231 
   13232 	if (size < 1  || size > 4 || data == 0x0 ||
   13233 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   13234 		return error;
   13235 
   13236 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   13237 	    sc->sc_ich8_flash_base;
   13238 
   13239 	do {
   13240 		delay(1);
   13241 		/* Steps */
   13242 		error = wm_ich8_cycle_init(sc);
   13243 		if (error)
   13244 			break;
   13245 
   13246 		if (sc->sc_type >= WM_T_PCH_SPT)
   13247 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13248 			    >> 16;
   13249 		else
   13250 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13251 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13252 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13253 		    & HSFCTL_BCOUNT_MASK;
   13254 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13255 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13256 			/*
   13257 			 * In SPT, This register is in Lan memory space, not
   13258 			 * flash. Therefore, only 32 bit access is supported.
   13259 			 */
   13260 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13261 			    (uint32_t)hsflctl << 16);
   13262 		} else
   13263 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13264 
   13265 		/*
   13266 		 * Write the last 24 bits of index into Flash Linear address
   13267 		 * field in Flash Address
   13268 		 */
   13269 		/* TODO: TBD maybe check the index against the size of flash */
   13270 
   13271 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13272 
   13273 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13274 
   13275 		/*
   13276 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13277 		 * the whole sequence a few more times, else read in (shift in)
   13278 		 * the Flash Data0, the order is least significant byte first
   13279 		 * msb to lsb
   13280 		 */
   13281 		if (error == 0) {
   13282 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13283 			if (size == 1)
   13284 				*data = (uint8_t)(flash_data & 0x000000FF);
   13285 			else if (size == 2)
   13286 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13287 			else if (size == 4)
   13288 				*data = (uint32_t)flash_data;
   13289 			break;
   13290 		} else {
   13291 			/*
   13292 			 * If we've gotten here, then things are probably
   13293 			 * completely hosed, but if the error condition is
   13294 			 * detected, it won't hurt to give it another try...
   13295 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13296 			 */
   13297 			if (sc->sc_type >= WM_T_PCH_SPT)
   13298 				hsfsts = ICH8_FLASH_READ32(sc,
   13299 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13300 			else
   13301 				hsfsts = ICH8_FLASH_READ16(sc,
   13302 				    ICH_FLASH_HSFSTS);
   13303 
   13304 			if (hsfsts & HSFSTS_ERR) {
   13305 				/* Repeat for some time before giving up. */
   13306 				continue;
   13307 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13308 				break;
   13309 		}
   13310 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13311 
   13312 	return error;
   13313 }
   13314 
   13315 /******************************************************************************
   13316  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13317  *
   13318  * sc - pointer to wm_hw structure
   13319  * index - The index of the byte to read.
   13320  * data - Pointer to a byte to store the value read.
   13321  *****************************************************************************/
   13322 static int32_t
   13323 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13324 {
   13325 	int32_t status;
   13326 	uint32_t word = 0;
   13327 
   13328 	status = wm_read_ich8_data(sc, index, 1, &word);
   13329 	if (status == 0)
   13330 		*data = (uint8_t)word;
   13331 	else
   13332 		*data = 0;
   13333 
   13334 	return status;
   13335 }
   13336 
   13337 /******************************************************************************
   13338  * Reads a word from the NVM using the ICH8 flash access registers.
   13339  *
   13340  * sc - pointer to wm_hw structure
   13341  * index - The starting byte index of the word to read.
   13342  * data - Pointer to a word to store the value read.
   13343  *****************************************************************************/
   13344 static int32_t
   13345 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13346 {
   13347 	int32_t status;
   13348 	uint32_t word = 0;
   13349 
   13350 	status = wm_read_ich8_data(sc, index, 2, &word);
   13351 	if (status == 0)
   13352 		*data = (uint16_t)word;
   13353 	else
   13354 		*data = 0;
   13355 
   13356 	return status;
   13357 }
   13358 
   13359 /******************************************************************************
   13360  * Reads a dword from the NVM using the ICH8 flash access registers.
   13361  *
   13362  * sc - pointer to wm_hw structure
   13363  * index - The starting byte index of the word to read.
   13364  * data - Pointer to a word to store the value read.
   13365  *****************************************************************************/
   13366 static int32_t
   13367 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13368 {
   13369 	int32_t status;
   13370 
   13371 	status = wm_read_ich8_data(sc, index, 4, data);
   13372 	return status;
   13373 }
   13374 
   13375 /******************************************************************************
   13376  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13377  * register.
   13378  *
   13379  * sc - Struct containing variables accessed by shared code
   13380  * offset - offset of word in the EEPROM to read
   13381  * data - word read from the EEPROM
   13382  * words - number of words to read
   13383  *****************************************************************************/
   13384 static int
   13385 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13386 {
   13387 	int32_t	 rv = 0;
   13388 	uint32_t flash_bank = 0;
   13389 	uint32_t act_offset = 0;
   13390 	uint32_t bank_offset = 0;
   13391 	uint16_t word = 0;
   13392 	uint16_t i = 0;
   13393 
   13394 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13395 		device_xname(sc->sc_dev), __func__));
   13396 
   13397 	if (sc->nvm.acquire(sc) != 0)
   13398 		return -1;
   13399 
   13400 	/*
   13401 	 * We need to know which is the valid flash bank.  In the event
   13402 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13403 	 * managing flash_bank. So it cannot be trusted and needs
   13404 	 * to be updated with each read.
   13405 	 */
   13406 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13407 	if (rv) {
   13408 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13409 			device_xname(sc->sc_dev)));
   13410 		flash_bank = 0;
   13411 	}
   13412 
   13413 	/*
   13414 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13415 	 * size
   13416 	 */
   13417 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13418 
   13419 	for (i = 0; i < words; i++) {
   13420 		/* The NVM part needs a byte offset, hence * 2 */
   13421 		act_offset = bank_offset + ((offset + i) * 2);
   13422 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13423 		if (rv) {
   13424 			aprint_error_dev(sc->sc_dev,
   13425 			    "%s: failed to read NVM\n", __func__);
   13426 			break;
   13427 		}
   13428 		data[i] = word;
   13429 	}
   13430 
   13431 	sc->nvm.release(sc);
   13432 	return rv;
   13433 }
   13434 
   13435 /******************************************************************************
   13436  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13437  * register.
   13438  *
   13439  * sc - Struct containing variables accessed by shared code
   13440  * offset - offset of word in the EEPROM to read
   13441  * data - word read from the EEPROM
   13442  * words - number of words to read
   13443  *****************************************************************************/
   13444 static int
   13445 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13446 {
   13447 	int32_t	 rv = 0;
   13448 	uint32_t flash_bank = 0;
   13449 	uint32_t act_offset = 0;
   13450 	uint32_t bank_offset = 0;
   13451 	uint32_t dword = 0;
   13452 	uint16_t i = 0;
   13453 
   13454 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13455 		device_xname(sc->sc_dev), __func__));
   13456 
   13457 	if (sc->nvm.acquire(sc) != 0)
   13458 		return -1;
   13459 
   13460 	/*
   13461 	 * We need to know which is the valid flash bank.  In the event
   13462 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13463 	 * managing flash_bank. So it cannot be trusted and needs
   13464 	 * to be updated with each read.
   13465 	 */
   13466 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13467 	if (rv) {
   13468 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13469 			device_xname(sc->sc_dev)));
   13470 		flash_bank = 0;
   13471 	}
   13472 
   13473 	/*
   13474 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13475 	 * size
   13476 	 */
   13477 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13478 
   13479 	for (i = 0; i < words; i++) {
   13480 		/* The NVM part needs a byte offset, hence * 2 */
   13481 		act_offset = bank_offset + ((offset + i) * 2);
   13482 		/* but we must read dword aligned, so mask ... */
   13483 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13484 		if (rv) {
   13485 			aprint_error_dev(sc->sc_dev,
   13486 			    "%s: failed to read NVM\n", __func__);
   13487 			break;
   13488 		}
   13489 		/* ... and pick out low or high word */
   13490 		if ((act_offset & 0x2) == 0)
   13491 			data[i] = (uint16_t)(dword & 0xFFFF);
   13492 		else
   13493 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13494 	}
   13495 
   13496 	sc->nvm.release(sc);
   13497 	return rv;
   13498 }
   13499 
   13500 /* iNVM */
   13501 
   13502 static int
   13503 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13504 {
   13505 	int32_t	 rv = 0;
   13506 	uint32_t invm_dword;
   13507 	uint16_t i;
   13508 	uint8_t record_type, word_address;
   13509 
   13510 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13511 		device_xname(sc->sc_dev), __func__));
   13512 
   13513 	for (i = 0; i < INVM_SIZE; i++) {
   13514 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13515 		/* Get record type */
   13516 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13517 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13518 			break;
   13519 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13520 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13521 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13522 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13523 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13524 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13525 			if (word_address == address) {
   13526 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13527 				rv = 0;
   13528 				break;
   13529 			}
   13530 		}
   13531 	}
   13532 
   13533 	return rv;
   13534 }
   13535 
   13536 static int
   13537 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13538 {
   13539 	int rv = 0;
   13540 	int i;
   13541 
   13542 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13543 		device_xname(sc->sc_dev), __func__));
   13544 
   13545 	if (sc->nvm.acquire(sc) != 0)
   13546 		return -1;
   13547 
   13548 	for (i = 0; i < words; i++) {
   13549 		switch (offset + i) {
   13550 		case NVM_OFF_MACADDR:
   13551 		case NVM_OFF_MACADDR1:
   13552 		case NVM_OFF_MACADDR2:
   13553 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13554 			if (rv != 0) {
   13555 				data[i] = 0xffff;
   13556 				rv = -1;
   13557 			}
   13558 			break;
   13559 		case NVM_OFF_CFG2:
   13560 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13561 			if (rv != 0) {
   13562 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13563 				rv = 0;
   13564 			}
   13565 			break;
   13566 		case NVM_OFF_CFG4:
   13567 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13568 			if (rv != 0) {
   13569 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13570 				rv = 0;
   13571 			}
   13572 			break;
   13573 		case NVM_OFF_LED_1_CFG:
   13574 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13575 			if (rv != 0) {
   13576 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13577 				rv = 0;
   13578 			}
   13579 			break;
   13580 		case NVM_OFF_LED_0_2_CFG:
   13581 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13582 			if (rv != 0) {
   13583 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13584 				rv = 0;
   13585 			}
   13586 			break;
   13587 		case NVM_OFF_ID_LED_SETTINGS:
   13588 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13589 			if (rv != 0) {
   13590 				*data = ID_LED_RESERVED_FFFF;
   13591 				rv = 0;
   13592 			}
   13593 			break;
   13594 		default:
   13595 			DPRINTF(WM_DEBUG_NVM,
   13596 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13597 			*data = NVM_RESERVED_WORD;
   13598 			break;
   13599 		}
   13600 	}
   13601 
   13602 	sc->nvm.release(sc);
   13603 	return rv;
   13604 }
   13605 
   13606 /* Lock, detecting NVM type, validate checksum, version and read */
   13607 
   13608 static int
   13609 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13610 {
   13611 	uint32_t eecd = 0;
   13612 
   13613 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13614 	    || sc->sc_type == WM_T_82583) {
   13615 		eecd = CSR_READ(sc, WMREG_EECD);
   13616 
   13617 		/* Isolate bits 15 & 16 */
   13618 		eecd = ((eecd >> 15) & 0x03);
   13619 
   13620 		/* If both bits are set, device is Flash type */
   13621 		if (eecd == 0x03)
   13622 			return 0;
   13623 	}
   13624 	return 1;
   13625 }
   13626 
   13627 static int
   13628 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13629 {
   13630 	uint32_t eec;
   13631 
   13632 	eec = CSR_READ(sc, WMREG_EEC);
   13633 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13634 		return 1;
   13635 
   13636 	return 0;
   13637 }
   13638 
   13639 /*
   13640  * wm_nvm_validate_checksum
   13641  *
   13642  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13643  */
   13644 static int
   13645 wm_nvm_validate_checksum(struct wm_softc *sc)
   13646 {
   13647 	uint16_t checksum;
   13648 	uint16_t eeprom_data;
   13649 #ifdef WM_DEBUG
   13650 	uint16_t csum_wordaddr, valid_checksum;
   13651 #endif
   13652 	int i;
   13653 
   13654 	checksum = 0;
   13655 
   13656 	/* Don't check for I211 */
   13657 	if (sc->sc_type == WM_T_I211)
   13658 		return 0;
   13659 
   13660 #ifdef WM_DEBUG
   13661 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13662 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13663 		csum_wordaddr = NVM_OFF_COMPAT;
   13664 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13665 	} else {
   13666 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13667 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13668 	}
   13669 
   13670 	/* Dump EEPROM image for debug */
   13671 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13672 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13673 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13674 		/* XXX PCH_SPT? */
   13675 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13676 		if ((eeprom_data & valid_checksum) == 0)
   13677 			DPRINTF(WM_DEBUG_NVM,
   13678 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13679 				device_xname(sc->sc_dev), eeprom_data,
   13680 				    valid_checksum));
   13681 	}
   13682 
   13683 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13684 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13685 		for (i = 0; i < NVM_SIZE; i++) {
   13686 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13687 				printf("XXXX ");
   13688 			else
   13689 				printf("%04hx ", eeprom_data);
   13690 			if (i % 8 == 7)
   13691 				printf("\n");
   13692 		}
   13693 	}
   13694 
   13695 #endif /* WM_DEBUG */
   13696 
   13697 	for (i = 0; i < NVM_SIZE; i++) {
   13698 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13699 			return 1;
   13700 		checksum += eeprom_data;
   13701 	}
   13702 
   13703 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13704 #ifdef WM_DEBUG
   13705 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13706 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13707 #endif
   13708 	}
   13709 
   13710 	return 0;
   13711 }
   13712 
   13713 static void
   13714 wm_nvm_version_invm(struct wm_softc *sc)
   13715 {
   13716 	uint32_t dword;
   13717 
   13718 	/*
   13719 	 * Linux's code to decode version is very strange, so we don't
   13720 	 * obey that algorithm and just use word 61 as the document.
   13721 	 * Perhaps it's not perfect though...
   13722 	 *
   13723 	 * Example:
   13724 	 *
   13725 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13726 	 */
   13727 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13728 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13729 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13730 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13731 }
   13732 
   13733 static void
   13734 wm_nvm_version(struct wm_softc *sc)
   13735 {
   13736 	uint16_t major, minor, build, patch;
   13737 	uint16_t uid0, uid1;
   13738 	uint16_t nvm_data;
   13739 	uint16_t off;
   13740 	bool check_version = false;
   13741 	bool check_optionrom = false;
   13742 	bool have_build = false;
   13743 	bool have_uid = true;
   13744 
   13745 	/*
   13746 	 * Version format:
   13747 	 *
   13748 	 * XYYZ
   13749 	 * X0YZ
   13750 	 * X0YY
   13751 	 *
   13752 	 * Example:
   13753 	 *
   13754 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13755 	 *	82571	0x50a6	5.10.6?
   13756 	 *	82572	0x506a	5.6.10?
   13757 	 *	82572EI	0x5069	5.6.9?
   13758 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13759 	 *		0x2013	2.1.3?
   13760 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   13761 	 * ICH8+82567	0x0040	0.4.0?
   13762 	 * ICH9+82566	0x1040	1.4.0?
   13763 	 *ICH10+82567	0x0043	0.4.3?
   13764 	 *  PCH+82577	0x00c1	0.12.1?
   13765 	 * PCH2+82579	0x00d3	0.13.3?
   13766 	 *		0x00d4	0.13.4?
   13767 	 *  LPT+I218	0x0023	0.2.3?
   13768 	 *  SPT+I219	0x0084	0.8.4?
   13769 	 *  CNP+I219	0x0054	0.5.4?
   13770 	 */
   13771 
   13772 	/*
   13773 	 * XXX
   13774 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13775 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13776 	 */
   13777 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13778 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13779 		have_uid = false;
   13780 
   13781 	switch (sc->sc_type) {
   13782 	case WM_T_82571:
   13783 	case WM_T_82572:
   13784 	case WM_T_82574:
   13785 	case WM_T_82583:
   13786 		check_version = true;
   13787 		check_optionrom = true;
   13788 		have_build = true;
   13789 		break;
   13790 	case WM_T_ICH8:
   13791 	case WM_T_ICH9:
   13792 	case WM_T_ICH10:
   13793 	case WM_T_PCH:
   13794 	case WM_T_PCH2:
   13795 	case WM_T_PCH_LPT:
   13796 	case WM_T_PCH_SPT:
   13797 	case WM_T_PCH_CNP:
   13798 		check_version = true;
   13799 		have_build = true;
   13800 		have_uid = false;
   13801 		break;
   13802 	case WM_T_82575:
   13803 	case WM_T_82576:
   13804 	case WM_T_82580:
   13805 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13806 			check_version = true;
   13807 		break;
   13808 	case WM_T_I211:
   13809 		wm_nvm_version_invm(sc);
   13810 		have_uid = false;
   13811 		goto printver;
   13812 	case WM_T_I210:
   13813 		if (!wm_nvm_flash_presence_i210(sc)) {
   13814 			wm_nvm_version_invm(sc);
   13815 			have_uid = false;
   13816 			goto printver;
   13817 		}
   13818 		/* FALLTHROUGH */
   13819 	case WM_T_I350:
   13820 	case WM_T_I354:
   13821 		check_version = true;
   13822 		check_optionrom = true;
   13823 		break;
   13824 	default:
   13825 		return;
   13826 	}
   13827 	if (check_version
   13828 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13829 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13830 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13831 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13832 			build = nvm_data & NVM_BUILD_MASK;
   13833 			have_build = true;
   13834 		} else
   13835 			minor = nvm_data & 0x00ff;
   13836 
   13837 		/* Decimal */
   13838 		minor = (minor / 16) * 10 + (minor % 16);
   13839 		sc->sc_nvm_ver_major = major;
   13840 		sc->sc_nvm_ver_minor = minor;
   13841 
   13842 printver:
   13843 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13844 		    sc->sc_nvm_ver_minor);
   13845 		if (have_build) {
   13846 			sc->sc_nvm_ver_build = build;
   13847 			aprint_verbose(".%d", build);
   13848 		}
   13849 	}
   13850 
   13851 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13852 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13853 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13854 		/* Option ROM Version */
   13855 		if ((off != 0x0000) && (off != 0xffff)) {
   13856 			int rv;
   13857 
   13858 			off += NVM_COMBO_VER_OFF;
   13859 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13860 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13861 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13862 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13863 				/* 16bits */
   13864 				major = uid0 >> 8;
   13865 				build = (uid0 << 8) | (uid1 >> 8);
   13866 				patch = uid1 & 0x00ff;
   13867 				aprint_verbose(", option ROM Version %d.%d.%d",
   13868 				    major, build, patch);
   13869 			}
   13870 		}
   13871 	}
   13872 
   13873 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13874 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13875 }
   13876 
   13877 /*
   13878  * wm_nvm_read:
   13879  *
   13880  *	Read data from the serial EEPROM.
   13881  */
   13882 static int
   13883 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13884 {
   13885 	int rv;
   13886 
   13887 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13888 		device_xname(sc->sc_dev), __func__));
   13889 
   13890 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13891 		return -1;
   13892 
   13893 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13894 
   13895 	return rv;
   13896 }
   13897 
   13898 /*
   13899  * Hardware semaphores.
   13900  * Very complexed...
   13901  */
   13902 
   13903 static int
   13904 wm_get_null(struct wm_softc *sc)
   13905 {
   13906 
   13907 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13908 		device_xname(sc->sc_dev), __func__));
   13909 	return 0;
   13910 }
   13911 
   13912 static void
   13913 wm_put_null(struct wm_softc *sc)
   13914 {
   13915 
   13916 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13917 		device_xname(sc->sc_dev), __func__));
   13918 	return;
   13919 }
   13920 
   13921 static int
   13922 wm_get_eecd(struct wm_softc *sc)
   13923 {
   13924 	uint32_t reg;
   13925 	int x;
   13926 
   13927 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13928 		device_xname(sc->sc_dev), __func__));
   13929 
   13930 	reg = CSR_READ(sc, WMREG_EECD);
   13931 
   13932 	/* Request EEPROM access. */
   13933 	reg |= EECD_EE_REQ;
   13934 	CSR_WRITE(sc, WMREG_EECD, reg);
   13935 
   13936 	/* ..and wait for it to be granted. */
   13937 	for (x = 0; x < 1000; x++) {
   13938 		reg = CSR_READ(sc, WMREG_EECD);
   13939 		if (reg & EECD_EE_GNT)
   13940 			break;
   13941 		delay(5);
   13942 	}
   13943 	if ((reg & EECD_EE_GNT) == 0) {
   13944 		aprint_error_dev(sc->sc_dev,
   13945 		    "could not acquire EEPROM GNT\n");
   13946 		reg &= ~EECD_EE_REQ;
   13947 		CSR_WRITE(sc, WMREG_EECD, reg);
   13948 		return -1;
   13949 	}
   13950 
   13951 	return 0;
   13952 }
   13953 
   13954 static void
   13955 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13956 {
   13957 
   13958 	*eecd |= EECD_SK;
   13959 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13960 	CSR_WRITE_FLUSH(sc);
   13961 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13962 		delay(1);
   13963 	else
   13964 		delay(50);
   13965 }
   13966 
   13967 static void
   13968 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13969 {
   13970 
   13971 	*eecd &= ~EECD_SK;
   13972 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13973 	CSR_WRITE_FLUSH(sc);
   13974 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13975 		delay(1);
   13976 	else
   13977 		delay(50);
   13978 }
   13979 
   13980 static void
   13981 wm_put_eecd(struct wm_softc *sc)
   13982 {
   13983 	uint32_t reg;
   13984 
   13985 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13986 		device_xname(sc->sc_dev), __func__));
   13987 
   13988 	/* Stop nvm */
   13989 	reg = CSR_READ(sc, WMREG_EECD);
   13990 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13991 		/* Pull CS high */
   13992 		reg |= EECD_CS;
   13993 		wm_nvm_eec_clock_lower(sc, &reg);
   13994 	} else {
   13995 		/* CS on Microwire is active-high */
   13996 		reg &= ~(EECD_CS | EECD_DI);
   13997 		CSR_WRITE(sc, WMREG_EECD, reg);
   13998 		wm_nvm_eec_clock_raise(sc, &reg);
   13999 		wm_nvm_eec_clock_lower(sc, &reg);
   14000 	}
   14001 
   14002 	reg = CSR_READ(sc, WMREG_EECD);
   14003 	reg &= ~EECD_EE_REQ;
   14004 	CSR_WRITE(sc, WMREG_EECD, reg);
   14005 
   14006 	return;
   14007 }
   14008 
   14009 /*
   14010  * Get hardware semaphore.
   14011  * Same as e1000_get_hw_semaphore_generic()
   14012  */
   14013 static int
   14014 wm_get_swsm_semaphore(struct wm_softc *sc)
   14015 {
   14016 	int32_t timeout;
   14017 	uint32_t swsm;
   14018 
   14019 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14020 		device_xname(sc->sc_dev), __func__));
   14021 	KASSERT(sc->sc_nvm_wordsize > 0);
   14022 
   14023 retry:
   14024 	/* Get the SW semaphore. */
   14025 	timeout = sc->sc_nvm_wordsize + 1;
   14026 	while (timeout) {
   14027 		swsm = CSR_READ(sc, WMREG_SWSM);
   14028 
   14029 		if ((swsm & SWSM_SMBI) == 0)
   14030 			break;
   14031 
   14032 		delay(50);
   14033 		timeout--;
   14034 	}
   14035 
   14036 	if (timeout == 0) {
   14037 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14038 			/*
   14039 			 * In rare circumstances, the SW semaphore may already
   14040 			 * be held unintentionally. Clear the semaphore once
   14041 			 * before giving up.
   14042 			 */
   14043 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14044 			wm_put_swsm_semaphore(sc);
   14045 			goto retry;
   14046 		}
   14047 		aprint_error_dev(sc->sc_dev,
   14048 		    "could not acquire SWSM SMBI\n");
   14049 		return 1;
   14050 	}
   14051 
   14052 	/* Get the FW semaphore. */
   14053 	timeout = sc->sc_nvm_wordsize + 1;
   14054 	while (timeout) {
   14055 		swsm = CSR_READ(sc, WMREG_SWSM);
   14056 		swsm |= SWSM_SWESMBI;
   14057 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14058 		/* If we managed to set the bit we got the semaphore. */
   14059 		swsm = CSR_READ(sc, WMREG_SWSM);
   14060 		if (swsm & SWSM_SWESMBI)
   14061 			break;
   14062 
   14063 		delay(50);
   14064 		timeout--;
   14065 	}
   14066 
   14067 	if (timeout == 0) {
   14068 		aprint_error_dev(sc->sc_dev,
   14069 		    "could not acquire SWSM SWESMBI\n");
   14070 		/* Release semaphores */
   14071 		wm_put_swsm_semaphore(sc);
   14072 		return 1;
   14073 	}
   14074 	return 0;
   14075 }
   14076 
   14077 /*
   14078  * Put hardware semaphore.
   14079  * Same as e1000_put_hw_semaphore_generic()
   14080  */
   14081 static void
   14082 wm_put_swsm_semaphore(struct wm_softc *sc)
   14083 {
   14084 	uint32_t swsm;
   14085 
   14086 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14087 		device_xname(sc->sc_dev), __func__));
   14088 
   14089 	swsm = CSR_READ(sc, WMREG_SWSM);
   14090 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14091 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14092 }
   14093 
   14094 /*
   14095  * Get SW/FW semaphore.
   14096  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14097  */
   14098 static int
   14099 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14100 {
   14101 	uint32_t swfw_sync;
   14102 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14103 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14104 	int timeout;
   14105 
   14106 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14107 		device_xname(sc->sc_dev), __func__));
   14108 
   14109 	if (sc->sc_type == WM_T_80003)
   14110 		timeout = 50;
   14111 	else
   14112 		timeout = 200;
   14113 
   14114 	while (timeout) {
   14115 		if (wm_get_swsm_semaphore(sc)) {
   14116 			aprint_error_dev(sc->sc_dev,
   14117 			    "%s: failed to get semaphore\n",
   14118 			    __func__);
   14119 			return 1;
   14120 		}
   14121 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14122 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14123 			swfw_sync |= swmask;
   14124 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14125 			wm_put_swsm_semaphore(sc);
   14126 			return 0;
   14127 		}
   14128 		wm_put_swsm_semaphore(sc);
   14129 		delay(5000);
   14130 		timeout--;
   14131 	}
   14132 	device_printf(sc->sc_dev,
   14133 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   14134 	    mask, swfw_sync);
   14135 	return 1;
   14136 }
   14137 
   14138 static void
   14139 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14140 {
   14141 	uint32_t swfw_sync;
   14142 
   14143 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14144 		device_xname(sc->sc_dev), __func__));
   14145 
   14146 	while (wm_get_swsm_semaphore(sc) != 0)
   14147 		continue;
   14148 
   14149 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14150 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   14151 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14152 
   14153 	wm_put_swsm_semaphore(sc);
   14154 }
   14155 
   14156 static int
   14157 wm_get_nvm_80003(struct wm_softc *sc)
   14158 {
   14159 	int rv;
   14160 
   14161 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14162 		device_xname(sc->sc_dev), __func__));
   14163 
   14164 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   14165 		aprint_error_dev(sc->sc_dev,
   14166 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   14167 		return rv;
   14168 	}
   14169 
   14170 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14171 	    && (rv = wm_get_eecd(sc)) != 0) {
   14172 		aprint_error_dev(sc->sc_dev,
   14173 		    "%s: failed to get semaphore(EECD)\n", __func__);
   14174 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14175 		return rv;
   14176 	}
   14177 
   14178 	return 0;
   14179 }
   14180 
   14181 static void
   14182 wm_put_nvm_80003(struct wm_softc *sc)
   14183 {
   14184 
   14185 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14186 		device_xname(sc->sc_dev), __func__));
   14187 
   14188 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14189 		wm_put_eecd(sc);
   14190 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14191 }
   14192 
   14193 static int
   14194 wm_get_nvm_82571(struct wm_softc *sc)
   14195 {
   14196 	int rv;
   14197 
   14198 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14199 		device_xname(sc->sc_dev), __func__));
   14200 
   14201 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   14202 		return rv;
   14203 
   14204 	switch (sc->sc_type) {
   14205 	case WM_T_82573:
   14206 		break;
   14207 	default:
   14208 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14209 			rv = wm_get_eecd(sc);
   14210 		break;
   14211 	}
   14212 
   14213 	if (rv != 0) {
   14214 		aprint_error_dev(sc->sc_dev,
   14215 		    "%s: failed to get semaphore\n",
   14216 		    __func__);
   14217 		wm_put_swsm_semaphore(sc);
   14218 	}
   14219 
   14220 	return rv;
   14221 }
   14222 
   14223 static void
   14224 wm_put_nvm_82571(struct wm_softc *sc)
   14225 {
   14226 
   14227 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14228 		device_xname(sc->sc_dev), __func__));
   14229 
   14230 	switch (sc->sc_type) {
   14231 	case WM_T_82573:
   14232 		break;
   14233 	default:
   14234 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14235 			wm_put_eecd(sc);
   14236 		break;
   14237 	}
   14238 
   14239 	wm_put_swsm_semaphore(sc);
   14240 }
   14241 
   14242 static int
   14243 wm_get_phy_82575(struct wm_softc *sc)
   14244 {
   14245 
   14246 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14247 		device_xname(sc->sc_dev), __func__));
   14248 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14249 }
   14250 
   14251 static void
   14252 wm_put_phy_82575(struct wm_softc *sc)
   14253 {
   14254 
   14255 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14256 		device_xname(sc->sc_dev), __func__));
   14257 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14258 }
   14259 
   14260 static int
   14261 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14262 {
   14263 	uint32_t ext_ctrl;
   14264 	int timeout = 200;
   14265 
   14266 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14267 		device_xname(sc->sc_dev), __func__));
   14268 
   14269 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14270 	for (timeout = 0; timeout < 200; timeout++) {
   14271 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14272 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14273 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14274 
   14275 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14276 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14277 			return 0;
   14278 		delay(5000);
   14279 	}
   14280 	device_printf(sc->sc_dev,
   14281 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14282 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14283 	return 1;
   14284 }
   14285 
   14286 static void
   14287 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14288 {
   14289 	uint32_t ext_ctrl;
   14290 
   14291 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14292 		device_xname(sc->sc_dev), __func__));
   14293 
   14294 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14295 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14296 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14297 
   14298 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14299 }
   14300 
   14301 static int
   14302 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14303 {
   14304 	uint32_t ext_ctrl;
   14305 	int timeout;
   14306 
   14307 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14308 		device_xname(sc->sc_dev), __func__));
   14309 	mutex_enter(sc->sc_ich_phymtx);
   14310 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14311 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14312 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14313 			break;
   14314 		delay(1000);
   14315 	}
   14316 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14317 		device_printf(sc->sc_dev,
   14318 		    "SW has already locked the resource\n");
   14319 		goto out;
   14320 	}
   14321 
   14322 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14323 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14324 	for (timeout = 0; timeout < 1000; timeout++) {
   14325 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14326 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14327 			break;
   14328 		delay(1000);
   14329 	}
   14330 	if (timeout >= 1000) {
   14331 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14332 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14333 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14334 		goto out;
   14335 	}
   14336 	return 0;
   14337 
   14338 out:
   14339 	mutex_exit(sc->sc_ich_phymtx);
   14340 	return 1;
   14341 }
   14342 
   14343 static void
   14344 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14345 {
   14346 	uint32_t ext_ctrl;
   14347 
   14348 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14349 		device_xname(sc->sc_dev), __func__));
   14350 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14351 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14352 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14353 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14354 	} else {
   14355 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14356 	}
   14357 
   14358 	mutex_exit(sc->sc_ich_phymtx);
   14359 }
   14360 
   14361 static int
   14362 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14363 {
   14364 
   14365 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14366 		device_xname(sc->sc_dev), __func__));
   14367 	mutex_enter(sc->sc_ich_nvmmtx);
   14368 
   14369 	return 0;
   14370 }
   14371 
   14372 static void
   14373 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14374 {
   14375 
   14376 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14377 		device_xname(sc->sc_dev), __func__));
   14378 	mutex_exit(sc->sc_ich_nvmmtx);
   14379 }
   14380 
   14381 static int
   14382 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14383 {
   14384 	int i = 0;
   14385 	uint32_t reg;
   14386 
   14387 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14388 		device_xname(sc->sc_dev), __func__));
   14389 
   14390 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14391 	do {
   14392 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14393 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14394 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14395 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14396 			break;
   14397 		delay(2*1000);
   14398 		i++;
   14399 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14400 
   14401 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14402 		wm_put_hw_semaphore_82573(sc);
   14403 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14404 		    device_xname(sc->sc_dev));
   14405 		return -1;
   14406 	}
   14407 
   14408 	return 0;
   14409 }
   14410 
   14411 static void
   14412 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14413 {
   14414 	uint32_t reg;
   14415 
   14416 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14417 		device_xname(sc->sc_dev), __func__));
   14418 
   14419 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14420 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14421 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14422 }
   14423 
   14424 /*
   14425  * Management mode and power management related subroutines.
   14426  * BMC, AMT, suspend/resume and EEE.
   14427  */
   14428 
   14429 #ifdef WM_WOL
   14430 static int
   14431 wm_check_mng_mode(struct wm_softc *sc)
   14432 {
   14433 	int rv;
   14434 
   14435 	switch (sc->sc_type) {
   14436 	case WM_T_ICH8:
   14437 	case WM_T_ICH9:
   14438 	case WM_T_ICH10:
   14439 	case WM_T_PCH:
   14440 	case WM_T_PCH2:
   14441 	case WM_T_PCH_LPT:
   14442 	case WM_T_PCH_SPT:
   14443 	case WM_T_PCH_CNP:
   14444 		rv = wm_check_mng_mode_ich8lan(sc);
   14445 		break;
   14446 	case WM_T_82574:
   14447 	case WM_T_82583:
   14448 		rv = wm_check_mng_mode_82574(sc);
   14449 		break;
   14450 	case WM_T_82571:
   14451 	case WM_T_82572:
   14452 	case WM_T_82573:
   14453 	case WM_T_80003:
   14454 		rv = wm_check_mng_mode_generic(sc);
   14455 		break;
   14456 	default:
   14457 		/* Noting to do */
   14458 		rv = 0;
   14459 		break;
   14460 	}
   14461 
   14462 	return rv;
   14463 }
   14464 
   14465 static int
   14466 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14467 {
   14468 	uint32_t fwsm;
   14469 
   14470 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14471 
   14472 	if (((fwsm & FWSM_FW_VALID) != 0)
   14473 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14474 		return 1;
   14475 
   14476 	return 0;
   14477 }
   14478 
   14479 static int
   14480 wm_check_mng_mode_82574(struct wm_softc *sc)
   14481 {
   14482 	uint16_t data;
   14483 
   14484 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14485 
   14486 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14487 		return 1;
   14488 
   14489 	return 0;
   14490 }
   14491 
   14492 static int
   14493 wm_check_mng_mode_generic(struct wm_softc *sc)
   14494 {
   14495 	uint32_t fwsm;
   14496 
   14497 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14498 
   14499 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14500 		return 1;
   14501 
   14502 	return 0;
   14503 }
   14504 #endif /* WM_WOL */
   14505 
   14506 static int
   14507 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14508 {
   14509 	uint32_t manc, fwsm, factps;
   14510 
   14511 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14512 		return 0;
   14513 
   14514 	manc = CSR_READ(sc, WMREG_MANC);
   14515 
   14516 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14517 		device_xname(sc->sc_dev), manc));
   14518 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14519 		return 0;
   14520 
   14521 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14522 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14523 		factps = CSR_READ(sc, WMREG_FACTPS);
   14524 		if (((factps & FACTPS_MNGCG) == 0)
   14525 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14526 			return 1;
   14527 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14528 		uint16_t data;
   14529 
   14530 		factps = CSR_READ(sc, WMREG_FACTPS);
   14531 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14532 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14533 			device_xname(sc->sc_dev), factps, data));
   14534 		if (((factps & FACTPS_MNGCG) == 0)
   14535 		    && ((data & NVM_CFG2_MNGM_MASK)
   14536 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14537 			return 1;
   14538 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14539 	    && ((manc & MANC_ASF_EN) == 0))
   14540 		return 1;
   14541 
   14542 	return 0;
   14543 }
   14544 
   14545 static bool
   14546 wm_phy_resetisblocked(struct wm_softc *sc)
   14547 {
   14548 	bool blocked = false;
   14549 	uint32_t reg;
   14550 	int i = 0;
   14551 
   14552 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14553 		device_xname(sc->sc_dev), __func__));
   14554 
   14555 	switch (sc->sc_type) {
   14556 	case WM_T_ICH8:
   14557 	case WM_T_ICH9:
   14558 	case WM_T_ICH10:
   14559 	case WM_T_PCH:
   14560 	case WM_T_PCH2:
   14561 	case WM_T_PCH_LPT:
   14562 	case WM_T_PCH_SPT:
   14563 	case WM_T_PCH_CNP:
   14564 		do {
   14565 			reg = CSR_READ(sc, WMREG_FWSM);
   14566 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14567 				blocked = true;
   14568 				delay(10*1000);
   14569 				continue;
   14570 			}
   14571 			blocked = false;
   14572 		} while (blocked && (i++ < 30));
   14573 		return blocked;
   14574 		break;
   14575 	case WM_T_82571:
   14576 	case WM_T_82572:
   14577 	case WM_T_82573:
   14578 	case WM_T_82574:
   14579 	case WM_T_82583:
   14580 	case WM_T_80003:
   14581 		reg = CSR_READ(sc, WMREG_MANC);
   14582 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14583 			return true;
   14584 		else
   14585 			return false;
   14586 		break;
   14587 	default:
   14588 		/* No problem */
   14589 		break;
   14590 	}
   14591 
   14592 	return false;
   14593 }
   14594 
   14595 static void
   14596 wm_get_hw_control(struct wm_softc *sc)
   14597 {
   14598 	uint32_t reg;
   14599 
   14600 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14601 		device_xname(sc->sc_dev), __func__));
   14602 
   14603 	if (sc->sc_type == WM_T_82573) {
   14604 		reg = CSR_READ(sc, WMREG_SWSM);
   14605 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14606 	} else if (sc->sc_type >= WM_T_82571) {
   14607 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14608 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14609 	}
   14610 }
   14611 
   14612 static void
   14613 wm_release_hw_control(struct wm_softc *sc)
   14614 {
   14615 	uint32_t reg;
   14616 
   14617 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14618 		device_xname(sc->sc_dev), __func__));
   14619 
   14620 	if (sc->sc_type == WM_T_82573) {
   14621 		reg = CSR_READ(sc, WMREG_SWSM);
   14622 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14623 	} else if (sc->sc_type >= WM_T_82571) {
   14624 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14625 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14626 	}
   14627 }
   14628 
   14629 static void
   14630 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14631 {
   14632 	uint32_t reg;
   14633 
   14634 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14635 		device_xname(sc->sc_dev), __func__));
   14636 
   14637 	if (sc->sc_type < WM_T_PCH2)
   14638 		return;
   14639 
   14640 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14641 
   14642 	if (gate)
   14643 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14644 	else
   14645 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14646 
   14647 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14648 }
   14649 
   14650 static int
   14651 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14652 {
   14653 	uint32_t fwsm, reg;
   14654 	int rv = 0;
   14655 
   14656 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14657 		device_xname(sc->sc_dev), __func__));
   14658 
   14659 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14660 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14661 
   14662 	/* Disable ULP */
   14663 	wm_ulp_disable(sc);
   14664 
   14665 	/* Acquire PHY semaphore */
   14666 	rv = sc->phy.acquire(sc);
   14667 	if (rv != 0) {
   14668 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14669 		device_xname(sc->sc_dev), __func__));
   14670 		return -1;
   14671 	}
   14672 
   14673 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14674 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14675 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14676 	 */
   14677 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14678 	switch (sc->sc_type) {
   14679 	case WM_T_PCH_LPT:
   14680 	case WM_T_PCH_SPT:
   14681 	case WM_T_PCH_CNP:
   14682 		if (wm_phy_is_accessible_pchlan(sc))
   14683 			break;
   14684 
   14685 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14686 		 * forcing MAC to SMBus mode first.
   14687 		 */
   14688 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14689 		reg |= CTRL_EXT_FORCE_SMBUS;
   14690 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14691 #if 0
   14692 		/* XXX Isn't this required??? */
   14693 		CSR_WRITE_FLUSH(sc);
   14694 #endif
   14695 		/* Wait 50 milliseconds for MAC to finish any retries
   14696 		 * that it might be trying to perform from previous
   14697 		 * attempts to acknowledge any phy read requests.
   14698 		 */
   14699 		delay(50 * 1000);
   14700 		/* FALLTHROUGH */
   14701 	case WM_T_PCH2:
   14702 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14703 			break;
   14704 		/* FALLTHROUGH */
   14705 	case WM_T_PCH:
   14706 		if (sc->sc_type == WM_T_PCH)
   14707 			if ((fwsm & FWSM_FW_VALID) != 0)
   14708 				break;
   14709 
   14710 		if (wm_phy_resetisblocked(sc) == true) {
   14711 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   14712 			break;
   14713 		}
   14714 
   14715 		/* Toggle LANPHYPC Value bit */
   14716 		wm_toggle_lanphypc_pch_lpt(sc);
   14717 
   14718 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14719 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14720 				break;
   14721 
   14722 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14723 			 * so ensure that the MAC is also out of SMBus mode
   14724 			 */
   14725 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14726 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14727 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14728 
   14729 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14730 				break;
   14731 			rv = -1;
   14732 		}
   14733 		break;
   14734 	default:
   14735 		break;
   14736 	}
   14737 
   14738 	/* Release semaphore */
   14739 	sc->phy.release(sc);
   14740 
   14741 	if (rv == 0) {
   14742 		/* Check to see if able to reset PHY.  Print error if not */
   14743 		if (wm_phy_resetisblocked(sc)) {
   14744 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14745 			goto out;
   14746 		}
   14747 
   14748 		/* Reset the PHY before any access to it.  Doing so, ensures
   14749 		 * that the PHY is in a known good state before we read/write
   14750 		 * PHY registers.  The generic reset is sufficient here,
   14751 		 * because we haven't determined the PHY type yet.
   14752 		 */
   14753 		if (wm_reset_phy(sc) != 0)
   14754 			goto out;
   14755 
   14756 		/* On a successful reset, possibly need to wait for the PHY
   14757 		 * to quiesce to an accessible state before returning control
   14758 		 * to the calling function.  If the PHY does not quiesce, then
   14759 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14760 		 *  the PHY is in.
   14761 		 */
   14762 		if (wm_phy_resetisblocked(sc))
   14763 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14764 	}
   14765 
   14766 out:
   14767 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14768 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14769 		delay(10*1000);
   14770 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14771 	}
   14772 
   14773 	return 0;
   14774 }
   14775 
   14776 static void
   14777 wm_init_manageability(struct wm_softc *sc)
   14778 {
   14779 
   14780 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14781 		device_xname(sc->sc_dev), __func__));
   14782 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14783 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14784 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14785 
   14786 		/* Disable hardware interception of ARP */
   14787 		manc &= ~MANC_ARP_EN;
   14788 
   14789 		/* Enable receiving management packets to the host */
   14790 		if (sc->sc_type >= WM_T_82571) {
   14791 			manc |= MANC_EN_MNG2HOST;
   14792 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14793 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14794 		}
   14795 
   14796 		CSR_WRITE(sc, WMREG_MANC, manc);
   14797 	}
   14798 }
   14799 
   14800 static void
   14801 wm_release_manageability(struct wm_softc *sc)
   14802 {
   14803 
   14804 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14805 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14806 
   14807 		manc |= MANC_ARP_EN;
   14808 		if (sc->sc_type >= WM_T_82571)
   14809 			manc &= ~MANC_EN_MNG2HOST;
   14810 
   14811 		CSR_WRITE(sc, WMREG_MANC, manc);
   14812 	}
   14813 }
   14814 
   14815 static void
   14816 wm_get_wakeup(struct wm_softc *sc)
   14817 {
   14818 
   14819 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14820 	switch (sc->sc_type) {
   14821 	case WM_T_82573:
   14822 	case WM_T_82583:
   14823 		sc->sc_flags |= WM_F_HAS_AMT;
   14824 		/* FALLTHROUGH */
   14825 	case WM_T_80003:
   14826 	case WM_T_82575:
   14827 	case WM_T_82576:
   14828 	case WM_T_82580:
   14829 	case WM_T_I350:
   14830 	case WM_T_I354:
   14831 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14832 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14833 		/* FALLTHROUGH */
   14834 	case WM_T_82541:
   14835 	case WM_T_82541_2:
   14836 	case WM_T_82547:
   14837 	case WM_T_82547_2:
   14838 	case WM_T_82571:
   14839 	case WM_T_82572:
   14840 	case WM_T_82574:
   14841 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14842 		break;
   14843 	case WM_T_ICH8:
   14844 	case WM_T_ICH9:
   14845 	case WM_T_ICH10:
   14846 	case WM_T_PCH:
   14847 	case WM_T_PCH2:
   14848 	case WM_T_PCH_LPT:
   14849 	case WM_T_PCH_SPT:
   14850 	case WM_T_PCH_CNP:
   14851 		sc->sc_flags |= WM_F_HAS_AMT;
   14852 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14853 		break;
   14854 	default:
   14855 		break;
   14856 	}
   14857 
   14858 	/* 1: HAS_MANAGE */
   14859 	if (wm_enable_mng_pass_thru(sc) != 0)
   14860 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14861 
   14862 	/*
   14863 	 * Note that the WOL flags is set after the resetting of the eeprom
   14864 	 * stuff
   14865 	 */
   14866 }
   14867 
   14868 /*
   14869  * Unconfigure Ultra Low Power mode.
   14870  * Only for I217 and newer (see below).
   14871  */
   14872 static int
   14873 wm_ulp_disable(struct wm_softc *sc)
   14874 {
   14875 	uint32_t reg;
   14876 	uint16_t phyreg;
   14877 	int i = 0, rv = 0;
   14878 
   14879 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14880 		device_xname(sc->sc_dev), __func__));
   14881 	/* Exclude old devices */
   14882 	if ((sc->sc_type < WM_T_PCH_LPT)
   14883 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14884 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14885 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14886 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14887 		return 0;
   14888 
   14889 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14890 		/* Request ME un-configure ULP mode in the PHY */
   14891 		reg = CSR_READ(sc, WMREG_H2ME);
   14892 		reg &= ~H2ME_ULP;
   14893 		reg |= H2ME_ENFORCE_SETTINGS;
   14894 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14895 
   14896 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14897 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14898 			if (i++ == 30) {
   14899 				device_printf(sc->sc_dev, "%s timed out\n",
   14900 				    __func__);
   14901 				return -1;
   14902 			}
   14903 			delay(10 * 1000);
   14904 		}
   14905 		reg = CSR_READ(sc, WMREG_H2ME);
   14906 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14907 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14908 
   14909 		return 0;
   14910 	}
   14911 
   14912 	/* Acquire semaphore */
   14913 	rv = sc->phy.acquire(sc);
   14914 	if (rv != 0) {
   14915 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14916 		device_xname(sc->sc_dev), __func__));
   14917 		return -1;
   14918 	}
   14919 
   14920 	/* Toggle LANPHYPC */
   14921 	wm_toggle_lanphypc_pch_lpt(sc);
   14922 
   14923 	/* Unforce SMBus mode in PHY */
   14924 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14925 	if (rv != 0) {
   14926 		uint32_t reg2;
   14927 
   14928 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   14929 			__func__);
   14930 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14931 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14932 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14933 		delay(50 * 1000);
   14934 
   14935 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14936 		    &phyreg);
   14937 		if (rv != 0)
   14938 			goto release;
   14939 	}
   14940 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14941 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14942 
   14943 	/* Unforce SMBus mode in MAC */
   14944 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14945 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14946 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14947 
   14948 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14949 	if (rv != 0)
   14950 		goto release;
   14951 	phyreg |= HV_PM_CTRL_K1_ENA;
   14952 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14953 
   14954 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14955 		&phyreg);
   14956 	if (rv != 0)
   14957 		goto release;
   14958 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14959 	    | I218_ULP_CONFIG1_STICKY_ULP
   14960 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14961 	    | I218_ULP_CONFIG1_WOL_HOST
   14962 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14963 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14964 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14965 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14966 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14967 	phyreg |= I218_ULP_CONFIG1_START;
   14968 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14969 
   14970 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14971 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14972 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14973 
   14974 release:
   14975 	/* Release semaphore */
   14976 	sc->phy.release(sc);
   14977 	wm_gmii_reset(sc);
   14978 	delay(50 * 1000);
   14979 
   14980 	return rv;
   14981 }
   14982 
   14983 /* WOL in the newer chipset interfaces (pchlan) */
   14984 static int
   14985 wm_enable_phy_wakeup(struct wm_softc *sc)
   14986 {
   14987 	device_t dev = sc->sc_dev;
   14988 	uint32_t mreg, moff;
   14989 	uint16_t wuce, wuc, wufc, preg;
   14990 	int i, rv;
   14991 
   14992 	KASSERT(sc->sc_type >= WM_T_PCH);
   14993 
   14994 	/* Copy MAC RARs to PHY RARs */
   14995 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   14996 
   14997 	/* Activate PHY wakeup */
   14998 	rv = sc->phy.acquire(sc);
   14999 	if (rv != 0) {
   15000 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15001 		    __func__);
   15002 		return rv;
   15003 	}
   15004 
   15005 	/*
   15006 	 * Enable access to PHY wakeup registers.
   15007 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15008 	 */
   15009 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15010 	if (rv != 0) {
   15011 		device_printf(dev,
   15012 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15013 		goto release;
   15014 	}
   15015 
   15016 	/* Copy MAC MTA to PHY MTA */
   15017 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15018 		uint16_t lo, hi;
   15019 
   15020 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15021 		lo = (uint16_t)(mreg & 0xffff);
   15022 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15023 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15024 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15025 	}
   15026 
   15027 	/* Configure PHY Rx Control register */
   15028 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15029 	mreg = CSR_READ(sc, WMREG_RCTL);
   15030 	if (mreg & RCTL_UPE)
   15031 		preg |= BM_RCTL_UPE;
   15032 	if (mreg & RCTL_MPE)
   15033 		preg |= BM_RCTL_MPE;
   15034 	preg &= ~(BM_RCTL_MO_MASK);
   15035 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15036 	if (moff != 0)
   15037 		preg |= moff << BM_RCTL_MO_SHIFT;
   15038 	if (mreg & RCTL_BAM)
   15039 		preg |= BM_RCTL_BAM;
   15040 	if (mreg & RCTL_PMCF)
   15041 		preg |= BM_RCTL_PMCF;
   15042 	mreg = CSR_READ(sc, WMREG_CTRL);
   15043 	if (mreg & CTRL_RFCE)
   15044 		preg |= BM_RCTL_RFCE;
   15045 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15046 
   15047 	wuc = WUC_APME | WUC_PME_EN;
   15048 	wufc = WUFC_MAG;
   15049 	/* Enable PHY wakeup in MAC register */
   15050 	CSR_WRITE(sc, WMREG_WUC,
   15051 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15052 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15053 
   15054 	/* Configure and enable PHY wakeup in PHY registers */
   15055 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15056 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15057 
   15058 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15059 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15060 
   15061 release:
   15062 	sc->phy.release(sc);
   15063 
   15064 	return 0;
   15065 }
   15066 
   15067 /* Power down workaround on D3 */
   15068 static void
   15069 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15070 {
   15071 	uint32_t reg;
   15072 	uint16_t phyreg;
   15073 	int i;
   15074 
   15075 	for (i = 0; i < 2; i++) {
   15076 		/* Disable link */
   15077 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15078 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15079 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15080 
   15081 		/*
   15082 		 * Call gig speed drop workaround on Gig disable before
   15083 		 * accessing any PHY registers
   15084 		 */
   15085 		if (sc->sc_type == WM_T_ICH8)
   15086 			wm_gig_downshift_workaround_ich8lan(sc);
   15087 
   15088 		/* Write VR power-down enable */
   15089 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15090 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15091 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15092 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15093 
   15094 		/* Read it back and test */
   15095 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15096 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15097 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15098 			break;
   15099 
   15100 		/* Issue PHY reset and repeat at most one more time */
   15101 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15102 	}
   15103 }
   15104 
   15105 /*
   15106  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15107  *  @sc: pointer to the HW structure
   15108  *
   15109  *  During S0 to Sx transition, it is possible the link remains at gig
   15110  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15111  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15112  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15113  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15114  *  needs to be written.
   15115  *  Parts that support (and are linked to a partner which support) EEE in
   15116  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15117  *  than 10Mbps w/o EEE.
   15118  */
   15119 static void
   15120 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15121 {
   15122 	device_t dev = sc->sc_dev;
   15123 	struct ethercom *ec = &sc->sc_ethercom;
   15124 	uint32_t phy_ctrl;
   15125 	int rv;
   15126 
   15127 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15128 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15129 
   15130 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   15131 
   15132 	if (sc->sc_phytype == WMPHY_I217) {
   15133 		uint16_t devid = sc->sc_pcidevid;
   15134 
   15135 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   15136 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   15137 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   15138 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   15139 		    (sc->sc_type >= WM_T_PCH_SPT))
   15140 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   15141 			    CSR_READ(sc, WMREG_FEXTNVM6)
   15142 			    & ~FEXTNVM6_REQ_PLL_CLK);
   15143 
   15144 		if (sc->phy.acquire(sc) != 0)
   15145 			goto out;
   15146 
   15147 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15148 			uint16_t eee_advert;
   15149 
   15150 			rv = wm_read_emi_reg_locked(dev,
   15151 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   15152 			if (rv)
   15153 				goto release;
   15154 
   15155 			/*
   15156 			 * Disable LPLU if both link partners support 100BaseT
   15157 			 * EEE and 100Full is advertised on both ends of the
   15158 			 * link, and enable Auto Enable LPI since there will
   15159 			 * be no driver to enable LPI while in Sx.
   15160 			 */
   15161 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   15162 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   15163 				uint16_t anar, phy_reg;
   15164 
   15165 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   15166 				    &anar);
   15167 				if (anar & ANAR_TX_FD) {
   15168 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   15169 					    PHY_CTRL_NOND0A_LPLU);
   15170 
   15171 					/* Set Auto Enable LPI after link up */
   15172 					sc->phy.readreg_locked(dev, 2,
   15173 					    I217_LPI_GPIO_CTRL, &phy_reg);
   15174 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15175 					sc->phy.writereg_locked(dev, 2,
   15176 					    I217_LPI_GPIO_CTRL, phy_reg);
   15177 				}
   15178 			}
   15179 		}
   15180 
   15181 		/*
   15182 		 * For i217 Intel Rapid Start Technology support,
   15183 		 * when the system is going into Sx and no manageability engine
   15184 		 * is present, the driver must configure proxy to reset only on
   15185 		 * power good.	LPI (Low Power Idle) state must also reset only
   15186 		 * on power good, as well as the MTA (Multicast table array).
   15187 		 * The SMBus release must also be disabled on LCD reset.
   15188 		 */
   15189 
   15190 		/*
   15191 		 * Enable MTA to reset for Intel Rapid Start Technology
   15192 		 * Support
   15193 		 */
   15194 
   15195 release:
   15196 		sc->phy.release(sc);
   15197 	}
   15198 out:
   15199 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   15200 
   15201 	if (sc->sc_type == WM_T_ICH8)
   15202 		wm_gig_downshift_workaround_ich8lan(sc);
   15203 
   15204 	if (sc->sc_type >= WM_T_PCH) {
   15205 		wm_oem_bits_config_ich8lan(sc, false);
   15206 
   15207 		/* Reset PHY to activate OEM bits on 82577/8 */
   15208 		if (sc->sc_type == WM_T_PCH)
   15209 			wm_reset_phy(sc);
   15210 
   15211 		if (sc->phy.acquire(sc) != 0)
   15212 			return;
   15213 		wm_write_smbus_addr(sc);
   15214 		sc->phy.release(sc);
   15215 	}
   15216 }
   15217 
   15218 /*
   15219  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   15220  *  @sc: pointer to the HW structure
   15221  *
   15222  *  During Sx to S0 transitions on non-managed devices or managed devices
   15223  *  on which PHY resets are not blocked, if the PHY registers cannot be
   15224  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   15225  *  the PHY.
   15226  *  On i217, setup Intel Rapid Start Technology.
   15227  */
   15228 static int
   15229 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   15230 {
   15231 	device_t dev = sc->sc_dev;
   15232 	int rv;
   15233 
   15234 	if (sc->sc_type < WM_T_PCH2)
   15235 		return 0;
   15236 
   15237 	rv = wm_init_phy_workarounds_pchlan(sc);
   15238 	if (rv != 0)
   15239 		return -1;
   15240 
   15241 	/* For i217 Intel Rapid Start Technology support when the system
   15242 	 * is transitioning from Sx and no manageability engine is present
   15243 	 * configure SMBus to restore on reset, disable proxy, and enable
   15244 	 * the reset on MTA (Multicast table array).
   15245 	 */
   15246 	if (sc->sc_phytype == WMPHY_I217) {
   15247 		uint16_t phy_reg;
   15248 
   15249 		if (sc->phy.acquire(sc) != 0)
   15250 			return -1;
   15251 
   15252 		/* Clear Auto Enable LPI after link up */
   15253 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15254 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15255 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15256 
   15257 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15258 			/* Restore clear on SMB if no manageability engine
   15259 			 * is present
   15260 			 */
   15261 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15262 			    &phy_reg);
   15263 			if (rv != 0)
   15264 				goto release;
   15265 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15266 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15267 
   15268 			/* Disable Proxy */
   15269 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15270 		}
   15271 		/* Enable reset on MTA */
   15272 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15273 		if (rv != 0)
   15274 			goto release;
   15275 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15276 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15277 
   15278 release:
   15279 		sc->phy.release(sc);
   15280 		return rv;
   15281 	}
   15282 
   15283 	return 0;
   15284 }
   15285 
   15286 static void
   15287 wm_enable_wakeup(struct wm_softc *sc)
   15288 {
   15289 	uint32_t reg, pmreg;
   15290 	pcireg_t pmode;
   15291 	int rv = 0;
   15292 
   15293 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15294 		device_xname(sc->sc_dev), __func__));
   15295 
   15296 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15297 	    &pmreg, NULL) == 0)
   15298 		return;
   15299 
   15300 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15301 		goto pme;
   15302 
   15303 	/* Advertise the wakeup capability */
   15304 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15305 	    | CTRL_SWDPIN(3));
   15306 
   15307 	/* Keep the laser running on fiber adapters */
   15308 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15309 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15310 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15311 		reg |= CTRL_EXT_SWDPIN(3);
   15312 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15313 	}
   15314 
   15315 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15316 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15317 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15318 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15319 		wm_suspend_workarounds_ich8lan(sc);
   15320 
   15321 #if 0	/* For the multicast packet */
   15322 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15323 	reg |= WUFC_MC;
   15324 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15325 #endif
   15326 
   15327 	if (sc->sc_type >= WM_T_PCH) {
   15328 		rv = wm_enable_phy_wakeup(sc);
   15329 		if (rv != 0)
   15330 			goto pme;
   15331 	} else {
   15332 		/* Enable wakeup by the MAC */
   15333 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15334 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15335 	}
   15336 
   15337 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15338 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15339 		|| (sc->sc_type == WM_T_PCH2))
   15340 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15341 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15342 
   15343 pme:
   15344 	/* Request PME */
   15345 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15346 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15347 		/* For WOL */
   15348 		pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   15349 	} else {
   15350 		/* Disable WOL */
   15351 		pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   15352 	}
   15353 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15354 }
   15355 
   15356 /* Disable ASPM L0s and/or L1 for workaround */
   15357 static void
   15358 wm_disable_aspm(struct wm_softc *sc)
   15359 {
   15360 	pcireg_t reg, mask = 0;
   15361 	unsigned const char *str = "";
   15362 
   15363 	/*
   15364 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15365 	 * space.
   15366 	 */
   15367 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15368 		return;
   15369 
   15370 	switch (sc->sc_type) {
   15371 	case WM_T_82571:
   15372 	case WM_T_82572:
   15373 		/*
   15374 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15375 		 * State Power management L1 State (ASPM L1).
   15376 		 */
   15377 		mask = PCIE_LCSR_ASPM_L1;
   15378 		str = "L1 is";
   15379 		break;
   15380 	case WM_T_82573:
   15381 	case WM_T_82574:
   15382 	case WM_T_82583:
   15383 		/*
   15384 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15385 		 *
   15386 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15387 		 * some chipset.  The document of 82574 and 82583 says that
   15388 		 * disabling L0s with some specific chipset is sufficient,
   15389 		 * but we follow as of the Intel em driver does.
   15390 		 *
   15391 		 * References:
   15392 		 * Errata 8 of the Specification Update of i82573.
   15393 		 * Errata 20 of the Specification Update of i82574.
   15394 		 * Errata 9 of the Specification Update of i82583.
   15395 		 */
   15396 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15397 		str = "L0s and L1 are";
   15398 		break;
   15399 	default:
   15400 		return;
   15401 	}
   15402 
   15403 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15404 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15405 	reg &= ~mask;
   15406 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15407 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15408 
   15409 	/* Print only in wm_attach() */
   15410 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15411 		aprint_verbose_dev(sc->sc_dev,
   15412 		    "ASPM %s disabled to workaround the errata.\n", str);
   15413 }
   15414 
   15415 /* LPLU */
   15416 
   15417 static void
   15418 wm_lplu_d0_disable(struct wm_softc *sc)
   15419 {
   15420 	struct mii_data *mii = &sc->sc_mii;
   15421 	uint32_t reg;
   15422 	uint16_t phyval;
   15423 
   15424 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15425 		device_xname(sc->sc_dev), __func__));
   15426 
   15427 	if (sc->sc_phytype == WMPHY_IFE)
   15428 		return;
   15429 
   15430 	switch (sc->sc_type) {
   15431 	case WM_T_82571:
   15432 	case WM_T_82572:
   15433 	case WM_T_82573:
   15434 	case WM_T_82575:
   15435 	case WM_T_82576:
   15436 		mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
   15437 		phyval &= ~PMR_D0_LPLU;
   15438 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
   15439 		break;
   15440 	case WM_T_82580:
   15441 	case WM_T_I350:
   15442 	case WM_T_I210:
   15443 	case WM_T_I211:
   15444 		reg = CSR_READ(sc, WMREG_PHPM);
   15445 		reg &= ~PHPM_D0A_LPLU;
   15446 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15447 		break;
   15448 	case WM_T_82574:
   15449 	case WM_T_82583:
   15450 	case WM_T_ICH8:
   15451 	case WM_T_ICH9:
   15452 	case WM_T_ICH10:
   15453 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15454 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15455 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15456 		CSR_WRITE_FLUSH(sc);
   15457 		break;
   15458 	case WM_T_PCH:
   15459 	case WM_T_PCH2:
   15460 	case WM_T_PCH_LPT:
   15461 	case WM_T_PCH_SPT:
   15462 	case WM_T_PCH_CNP:
   15463 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15464 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15465 		if (wm_phy_resetisblocked(sc) == false)
   15466 			phyval |= HV_OEM_BITS_ANEGNOW;
   15467 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15468 		break;
   15469 	default:
   15470 		break;
   15471 	}
   15472 }
   15473 
   15474 /* EEE */
   15475 
   15476 static int
   15477 wm_set_eee_i350(struct wm_softc *sc)
   15478 {
   15479 	struct ethercom *ec = &sc->sc_ethercom;
   15480 	uint32_t ipcnfg, eeer;
   15481 	uint32_t ipcnfg_mask
   15482 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15483 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15484 
   15485 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15486 
   15487 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15488 	eeer = CSR_READ(sc, WMREG_EEER);
   15489 
   15490 	/* Enable or disable per user setting */
   15491 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15492 		ipcnfg |= ipcnfg_mask;
   15493 		eeer |= eeer_mask;
   15494 	} else {
   15495 		ipcnfg &= ~ipcnfg_mask;
   15496 		eeer &= ~eeer_mask;
   15497 	}
   15498 
   15499 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15500 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15501 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15502 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15503 
   15504 	return 0;
   15505 }
   15506 
   15507 static int
   15508 wm_set_eee_pchlan(struct wm_softc *sc)
   15509 {
   15510 	device_t dev = sc->sc_dev;
   15511 	struct ethercom *ec = &sc->sc_ethercom;
   15512 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15513 	int rv = 0;
   15514 
   15515 	switch (sc->sc_phytype) {
   15516 	case WMPHY_82579:
   15517 		lpa = I82579_EEE_LP_ABILITY;
   15518 		pcs_status = I82579_EEE_PCS_STATUS;
   15519 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15520 		break;
   15521 	case WMPHY_I217:
   15522 		lpa = I217_EEE_LP_ABILITY;
   15523 		pcs_status = I217_EEE_PCS_STATUS;
   15524 		adv_addr = I217_EEE_ADVERTISEMENT;
   15525 		break;
   15526 	default:
   15527 		return 0;
   15528 	}
   15529 
   15530 	if (sc->phy.acquire(sc)) {
   15531 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15532 		return 0;
   15533 	}
   15534 
   15535 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15536 	if (rv != 0)
   15537 		goto release;
   15538 
   15539 	/* Clear bits that enable EEE in various speeds */
   15540 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15541 
   15542 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15543 		/* Save off link partner's EEE ability */
   15544 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15545 		if (rv != 0)
   15546 			goto release;
   15547 
   15548 		/* Read EEE advertisement */
   15549 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15550 			goto release;
   15551 
   15552 		/*
   15553 		 * Enable EEE only for speeds in which the link partner is
   15554 		 * EEE capable and for which we advertise EEE.
   15555 		 */
   15556 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15557 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15558 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15559 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15560 			if ((data & ANLPAR_TX_FD) != 0)
   15561 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15562 			else {
   15563 				/*
   15564 				 * EEE is not supported in 100Half, so ignore
   15565 				 * partner's EEE in 100 ability if full-duplex
   15566 				 * is not advertised.
   15567 				 */
   15568 				sc->eee_lp_ability
   15569 				    &= ~AN_EEEADVERT_100_TX;
   15570 			}
   15571 		}
   15572 	}
   15573 
   15574 	if (sc->sc_phytype == WMPHY_82579) {
   15575 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15576 		if (rv != 0)
   15577 			goto release;
   15578 
   15579 		data &= ~I82579_LPI_PLL_SHUT_100;
   15580 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15581 	}
   15582 
   15583 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15584 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15585 		goto release;
   15586 
   15587 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15588 release:
   15589 	sc->phy.release(sc);
   15590 
   15591 	return rv;
   15592 }
   15593 
   15594 static int
   15595 wm_set_eee(struct wm_softc *sc)
   15596 {
   15597 	struct ethercom *ec = &sc->sc_ethercom;
   15598 
   15599 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15600 		return 0;
   15601 
   15602 	if (sc->sc_type == WM_T_I354) {
   15603 		/* I354 uses an external PHY */
   15604 		return 0; /* not yet */
   15605 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15606 		return wm_set_eee_i350(sc);
   15607 	else if (sc->sc_type >= WM_T_PCH2)
   15608 		return wm_set_eee_pchlan(sc);
   15609 
   15610 	return 0;
   15611 }
   15612 
   15613 /*
   15614  * Workarounds (mainly PHY related).
   15615  * Basically, PHY's workarounds are in the PHY drivers.
   15616  */
   15617 
   15618 /* Work-around for 82566 Kumeran PCS lock loss */
   15619 static int
   15620 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15621 {
   15622 	struct mii_data *mii = &sc->sc_mii;
   15623 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15624 	int i, reg, rv;
   15625 	uint16_t phyreg;
   15626 
   15627 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15628 		device_xname(sc->sc_dev), __func__));
   15629 
   15630 	/* If the link is not up, do nothing */
   15631 	if ((status & STATUS_LU) == 0)
   15632 		return 0;
   15633 
   15634 	/* Nothing to do if the link is other than 1Gbps */
   15635 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15636 		return 0;
   15637 
   15638 	for (i = 0; i < 10; i++) {
   15639 		/* read twice */
   15640 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15641 		if (rv != 0)
   15642 			return rv;
   15643 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15644 		if (rv != 0)
   15645 			return rv;
   15646 
   15647 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15648 			goto out;	/* GOOD! */
   15649 
   15650 		/* Reset the PHY */
   15651 		wm_reset_phy(sc);
   15652 		delay(5*1000);
   15653 	}
   15654 
   15655 	/* Disable GigE link negotiation */
   15656 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15657 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15658 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15659 
   15660 	/*
   15661 	 * Call gig speed drop workaround on Gig disable before accessing
   15662 	 * any PHY registers.
   15663 	 */
   15664 	wm_gig_downshift_workaround_ich8lan(sc);
   15665 
   15666 out:
   15667 	return 0;
   15668 }
   15669 
   15670 /*
   15671  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15672  *  @sc: pointer to the HW structure
   15673  *
   15674  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15675  *  LPLU, Gig disable, MDIC PHY reset):
   15676  *    1) Set Kumeran Near-end loopback
   15677  *    2) Clear Kumeran Near-end loopback
   15678  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15679  */
   15680 static void
   15681 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15682 {
   15683 	uint16_t kmreg;
   15684 
   15685 	/* Only for igp3 */
   15686 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15687 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15688 			return;
   15689 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15690 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15691 			return;
   15692 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15693 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15694 	}
   15695 }
   15696 
   15697 /*
   15698  * Workaround for pch's PHYs
   15699  * XXX should be moved to new PHY driver?
   15700  */
   15701 static int
   15702 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15703 {
   15704 	device_t dev = sc->sc_dev;
   15705 	struct mii_data *mii = &sc->sc_mii;
   15706 	struct mii_softc *child;
   15707 	uint16_t phy_data, phyrev = 0;
   15708 	int phytype = sc->sc_phytype;
   15709 	int rv;
   15710 
   15711 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15712 		device_xname(dev), __func__));
   15713 	KASSERT(sc->sc_type == WM_T_PCH);
   15714 
   15715 	/* Set MDIO slow mode before any other MDIO access */
   15716 	if (phytype == WMPHY_82577)
   15717 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15718 			return rv;
   15719 
   15720 	child = LIST_FIRST(&mii->mii_phys);
   15721 	if (child != NULL)
   15722 		phyrev = child->mii_mpd_rev;
   15723 
   15724 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15725 	if ((child != NULL) &&
   15726 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   15727 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   15728 		/* Disable generation of early preamble (0x4431) */
   15729 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15730 		    &phy_data);
   15731 		if (rv != 0)
   15732 			return rv;
   15733 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   15734 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   15735 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15736 		    phy_data);
   15737 		if (rv != 0)
   15738 			return rv;
   15739 
   15740 		/* Preamble tuning for SSC */
   15741 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   15742 		if (rv != 0)
   15743 			return rv;
   15744 	}
   15745 
   15746 	/* 82578 */
   15747 	if (phytype == WMPHY_82578) {
   15748 		/*
   15749 		 * Return registers to default by doing a soft reset then
   15750 		 * writing 0x3140 to the control register
   15751 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15752 		 */
   15753 		if ((child != NULL) && (phyrev < 2)) {
   15754 			PHY_RESET(child);
   15755 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   15756 			if (rv != 0)
   15757 				return rv;
   15758 		}
   15759 	}
   15760 
   15761 	/* Select page 0 */
   15762 	if ((rv = sc->phy.acquire(sc)) != 0)
   15763 		return rv;
   15764 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   15765 	sc->phy.release(sc);
   15766 	if (rv != 0)
   15767 		return rv;
   15768 
   15769 	/*
   15770 	 * Configure the K1 Si workaround during phy reset assuming there is
   15771 	 * link so that it disables K1 if link is in 1Gbps.
   15772 	 */
   15773 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15774 		return rv;
   15775 
   15776 	/* Workaround for link disconnects on a busy hub in half duplex */
   15777 	rv = sc->phy.acquire(sc);
   15778 	if (rv)
   15779 		return rv;
   15780 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   15781 	if (rv)
   15782 		goto release;
   15783 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   15784 	    phy_data & 0x00ff);
   15785 	if (rv)
   15786 		goto release;
   15787 
   15788 	/* Set MSE higher to enable link to stay up when noise is high */
   15789 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   15790 release:
   15791 	sc->phy.release(sc);
   15792 
   15793 	return rv;
   15794 }
   15795 
   15796 /*
   15797  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15798  *  @sc:   pointer to the HW structure
   15799  */
   15800 static void
   15801 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15802 {
   15803 	device_t dev = sc->sc_dev;
   15804 	uint32_t mac_reg;
   15805 	uint16_t i, wuce;
   15806 	int count;
   15807 
   15808 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15809 		device_xname(sc->sc_dev), __func__));
   15810 
   15811 	if (sc->phy.acquire(sc) != 0)
   15812 		return;
   15813 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15814 		goto release;
   15815 
   15816 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15817 	count = wm_rar_count(sc);
   15818 	for (i = 0; i < count; i++) {
   15819 		uint16_t lo, hi;
   15820 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15821 		lo = (uint16_t)(mac_reg & 0xffff);
   15822 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15823 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15824 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15825 
   15826 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15827 		lo = (uint16_t)(mac_reg & 0xffff);
   15828 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15829 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15830 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15831 	}
   15832 
   15833 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15834 
   15835 release:
   15836 	sc->phy.release(sc);
   15837 }
   15838 
   15839 /*
   15840  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15841  *  done after every PHY reset.
   15842  */
   15843 static int
   15844 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15845 {
   15846 	device_t dev = sc->sc_dev;
   15847 	int rv;
   15848 
   15849 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15850 		device_xname(dev), __func__));
   15851 	KASSERT(sc->sc_type == WM_T_PCH2);
   15852 
   15853 	/* Set MDIO slow mode before any other MDIO access */
   15854 	rv = wm_set_mdio_slow_mode_hv(sc);
   15855 	if (rv != 0)
   15856 		return rv;
   15857 
   15858 	rv = sc->phy.acquire(sc);
   15859 	if (rv != 0)
   15860 		return rv;
   15861 	/* Set MSE higher to enable link to stay up when noise is high */
   15862 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   15863 	if (rv != 0)
   15864 		goto release;
   15865 	/* Drop link after 5 times MSE threshold was reached */
   15866 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   15867 release:
   15868 	sc->phy.release(sc);
   15869 
   15870 	return rv;
   15871 }
   15872 
   15873 /**
   15874  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15875  *  @link: link up bool flag
   15876  *
   15877  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15878  *  preventing further DMA write requests.  Workaround the issue by disabling
   15879  *  the de-assertion of the clock request when in 1Gpbs mode.
   15880  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15881  *  speeds in order to avoid Tx hangs.
   15882  **/
   15883 static int
   15884 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15885 {
   15886 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15887 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15888 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15889 	uint16_t phyreg;
   15890 
   15891 	if (link && (speed == STATUS_SPEED_1000)) {
   15892 		sc->phy.acquire(sc);
   15893 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15894 		    &phyreg);
   15895 		if (rv != 0)
   15896 			goto release;
   15897 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15898 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15899 		if (rv != 0)
   15900 			goto release;
   15901 		delay(20);
   15902 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15903 
   15904 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15905 		    &phyreg);
   15906 release:
   15907 		sc->phy.release(sc);
   15908 		return rv;
   15909 	}
   15910 
   15911 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15912 
   15913 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15914 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15915 	    || !link
   15916 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15917 		goto update_fextnvm6;
   15918 
   15919 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   15920 
   15921 	/* Clear link status transmit timeout */
   15922 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   15923 	if (speed == STATUS_SPEED_100) {
   15924 		/* Set inband Tx timeout to 5x10us for 100Half */
   15925 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15926 
   15927 		/* Do not extend the K1 entry latency for 100Half */
   15928 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15929 	} else {
   15930 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   15931 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15932 
   15933 		/* Extend the K1 entry latency for 10 Mbps */
   15934 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15935 	}
   15936 
   15937 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   15938 
   15939 update_fextnvm6:
   15940 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   15941 	return 0;
   15942 }
   15943 
   15944 /*
   15945  *  wm_k1_gig_workaround_hv - K1 Si workaround
   15946  *  @sc:   pointer to the HW structure
   15947  *  @link: link up bool flag
   15948  *
   15949  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   15950  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   15951  *  If link is down, the function will restore the default K1 setting located
   15952  *  in the NVM.
   15953  */
   15954 static int
   15955 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   15956 {
   15957 	int k1_enable = sc->sc_nvm_k1_enabled;
   15958 
   15959 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15960 		device_xname(sc->sc_dev), __func__));
   15961 
   15962 	if (sc->phy.acquire(sc) != 0)
   15963 		return -1;
   15964 
   15965 	if (link) {
   15966 		k1_enable = 0;
   15967 
   15968 		/* Link stall fix for link up */
   15969 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15970 		    0x0100);
   15971 	} else {
   15972 		/* Link stall fix for link down */
   15973 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15974 		    0x4100);
   15975 	}
   15976 
   15977 	wm_configure_k1_ich8lan(sc, k1_enable);
   15978 	sc->phy.release(sc);
   15979 
   15980 	return 0;
   15981 }
   15982 
   15983 /*
   15984  *  wm_k1_workaround_lv - K1 Si workaround
   15985  *  @sc:   pointer to the HW structure
   15986  *
   15987  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   15988  *  Disable K1 for 1000 and 100 speeds
   15989  */
   15990 static int
   15991 wm_k1_workaround_lv(struct wm_softc *sc)
   15992 {
   15993 	uint32_t reg;
   15994 	uint16_t phyreg;
   15995 	int rv;
   15996 
   15997 	if (sc->sc_type != WM_T_PCH2)
   15998 		return 0;
   15999 
   16000 	/* Set K1 beacon duration based on 10Mbps speed */
   16001 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   16002 	if (rv != 0)
   16003 		return rv;
   16004 
   16005 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   16006 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   16007 		if (phyreg &
   16008 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   16009 			/* LV 1G/100 Packet drop issue wa  */
   16010 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   16011 			    &phyreg);
   16012 			if (rv != 0)
   16013 				return rv;
   16014 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   16015 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   16016 			    phyreg);
   16017 			if (rv != 0)
   16018 				return rv;
   16019 		} else {
   16020 			/* For 10Mbps */
   16021 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   16022 			reg &= ~FEXTNVM4_BEACON_DURATION;
   16023 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   16024 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   16025 		}
   16026 	}
   16027 
   16028 	return 0;
   16029 }
   16030 
   16031 /*
   16032  *  wm_link_stall_workaround_hv - Si workaround
   16033  *  @sc: pointer to the HW structure
   16034  *
   16035  *  This function works around a Si bug where the link partner can get
   16036  *  a link up indication before the PHY does. If small packets are sent
   16037  *  by the link partner they can be placed in the packet buffer without
   16038  *  being properly accounted for by the PHY and will stall preventing
   16039  *  further packets from being received.  The workaround is to clear the
   16040  *  packet buffer after the PHY detects link up.
   16041  */
   16042 static int
   16043 wm_link_stall_workaround_hv(struct wm_softc *sc)
   16044 {
   16045 	uint16_t phyreg;
   16046 
   16047 	if (sc->sc_phytype != WMPHY_82578)
   16048 		return 0;
   16049 
   16050 	/* Do not apply workaround if in PHY loopback bit 14 set */
   16051 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   16052 	if ((phyreg & BMCR_LOOP) != 0)
   16053 		return 0;
   16054 
   16055 	/* Check if link is up and at 1Gbps */
   16056 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   16057 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16058 	    | BM_CS_STATUS_SPEED_MASK;
   16059 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16060 		| BM_CS_STATUS_SPEED_1000))
   16061 		return 0;
   16062 
   16063 	delay(200 * 1000);	/* XXX too big */
   16064 
   16065 	/* Flush the packets in the fifo buffer */
   16066 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16067 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   16068 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16069 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   16070 
   16071 	return 0;
   16072 }
   16073 
   16074 static int
   16075 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   16076 {
   16077 	int rv;
   16078 	uint16_t reg;
   16079 
   16080 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   16081 	if (rv != 0)
   16082 		return rv;
   16083 
   16084 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   16085 	    reg | HV_KMRN_MDIO_SLOW);
   16086 }
   16087 
   16088 /*
   16089  *  wm_configure_k1_ich8lan - Configure K1 power state
   16090  *  @sc: pointer to the HW structure
   16091  *  @enable: K1 state to configure
   16092  *
   16093  *  Configure the K1 power state based on the provided parameter.
   16094  *  Assumes semaphore already acquired.
   16095  */
   16096 static void
   16097 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   16098 {
   16099 	uint32_t ctrl, ctrl_ext, tmp;
   16100 	uint16_t kmreg;
   16101 	int rv;
   16102 
   16103 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16104 
   16105 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   16106 	if (rv != 0)
   16107 		return;
   16108 
   16109 	if (k1_enable)
   16110 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   16111 	else
   16112 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   16113 
   16114 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   16115 	if (rv != 0)
   16116 		return;
   16117 
   16118 	delay(20);
   16119 
   16120 	ctrl = CSR_READ(sc, WMREG_CTRL);
   16121 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   16122 
   16123 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   16124 	tmp |= CTRL_FRCSPD;
   16125 
   16126 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   16127 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   16128 	CSR_WRITE_FLUSH(sc);
   16129 	delay(20);
   16130 
   16131 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   16132 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   16133 	CSR_WRITE_FLUSH(sc);
   16134 	delay(20);
   16135 
   16136 	return;
   16137 }
   16138 
   16139 /* special case - for 82575 - need to do manual init ... */
   16140 static void
   16141 wm_reset_init_script_82575(struct wm_softc *sc)
   16142 {
   16143 	/*
   16144 	 * Remark: this is untested code - we have no board without EEPROM
   16145 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   16146 	 */
   16147 
   16148 	/* SerDes configuration via SERDESCTRL */
   16149 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   16150 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   16151 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   16152 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   16153 
   16154 	/* CCM configuration via CCMCTL register */
   16155 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   16156 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   16157 
   16158 	/* PCIe lanes configuration */
   16159 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   16160 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   16161 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   16162 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   16163 
   16164 	/* PCIe PLL Configuration */
   16165 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   16166 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   16167 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   16168 }
   16169 
   16170 static void
   16171 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   16172 {
   16173 	uint32_t reg;
   16174 	uint16_t nvmword;
   16175 	int rv;
   16176 
   16177 	if (sc->sc_type != WM_T_82580)
   16178 		return;
   16179 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   16180 		return;
   16181 
   16182 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   16183 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   16184 	if (rv != 0) {
   16185 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   16186 		    __func__);
   16187 		return;
   16188 	}
   16189 
   16190 	reg = CSR_READ(sc, WMREG_MDICNFG);
   16191 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   16192 		reg |= MDICNFG_DEST;
   16193 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   16194 		reg |= MDICNFG_COM_MDIO;
   16195 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16196 }
   16197 
   16198 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   16199 
   16200 static bool
   16201 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   16202 {
   16203 	uint32_t reg;
   16204 	uint16_t id1, id2;
   16205 	int i, rv;
   16206 
   16207 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16208 		device_xname(sc->sc_dev), __func__));
   16209 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16210 
   16211 	id1 = id2 = 0xffff;
   16212 	for (i = 0; i < 2; i++) {
   16213 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   16214 		    &id1);
   16215 		if ((rv != 0) || MII_INVALIDID(id1))
   16216 			continue;
   16217 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   16218 		    &id2);
   16219 		if ((rv != 0) || MII_INVALIDID(id2))
   16220 			continue;
   16221 		break;
   16222 	}
   16223 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   16224 		goto out;
   16225 
   16226 	/*
   16227 	 * In case the PHY needs to be in mdio slow mode,
   16228 	 * set slow mode and try to get the PHY id again.
   16229 	 */
   16230 	rv = 0;
   16231 	if (sc->sc_type < WM_T_PCH_LPT) {
   16232 		sc->phy.release(sc);
   16233 		wm_set_mdio_slow_mode_hv(sc);
   16234 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   16235 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   16236 		sc->phy.acquire(sc);
   16237 	}
   16238 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   16239 		device_printf(sc->sc_dev, "XXX return with false\n");
   16240 		return false;
   16241 	}
   16242 out:
   16243 	if (sc->sc_type >= WM_T_PCH_LPT) {
   16244 		/* Only unforce SMBus if ME is not active */
   16245 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16246 			uint16_t phyreg;
   16247 
   16248 			/* Unforce SMBus mode in PHY */
   16249 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   16250 			    CV_SMB_CTRL, &phyreg);
   16251 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16252 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16253 			    CV_SMB_CTRL, phyreg);
   16254 
   16255 			/* Unforce SMBus mode in MAC */
   16256 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16257 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16258 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16259 		}
   16260 	}
   16261 	return true;
   16262 }
   16263 
   16264 static void
   16265 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16266 {
   16267 	uint32_t reg;
   16268 	int i;
   16269 
   16270 	/* Set PHY Config Counter to 50msec */
   16271 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16272 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16273 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16274 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16275 
   16276 	/* Toggle LANPHYPC */
   16277 	reg = CSR_READ(sc, WMREG_CTRL);
   16278 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16279 	reg &= ~CTRL_LANPHYPC_VALUE;
   16280 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16281 	CSR_WRITE_FLUSH(sc);
   16282 	delay(1000);
   16283 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16284 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16285 	CSR_WRITE_FLUSH(sc);
   16286 
   16287 	if (sc->sc_type < WM_T_PCH_LPT)
   16288 		delay(50 * 1000);
   16289 	else {
   16290 		i = 20;
   16291 
   16292 		do {
   16293 			delay(5 * 1000);
   16294 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16295 		    && i--);
   16296 
   16297 		delay(30 * 1000);
   16298 	}
   16299 }
   16300 
   16301 static int
   16302 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16303 {
   16304 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16305 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16306 	uint32_t rxa;
   16307 	uint16_t scale = 0, lat_enc = 0;
   16308 	int32_t obff_hwm = 0;
   16309 	int64_t lat_ns, value;
   16310 
   16311 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16312 		device_xname(sc->sc_dev), __func__));
   16313 
   16314 	if (link) {
   16315 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16316 		uint32_t status;
   16317 		uint16_t speed;
   16318 		pcireg_t preg;
   16319 
   16320 		status = CSR_READ(sc, WMREG_STATUS);
   16321 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16322 		case STATUS_SPEED_10:
   16323 			speed = 10;
   16324 			break;
   16325 		case STATUS_SPEED_100:
   16326 			speed = 100;
   16327 			break;
   16328 		case STATUS_SPEED_1000:
   16329 			speed = 1000;
   16330 			break;
   16331 		default:
   16332 			device_printf(sc->sc_dev, "Unknown speed "
   16333 			    "(status = %08x)\n", status);
   16334 			return -1;
   16335 		}
   16336 
   16337 		/* Rx Packet Buffer Allocation size (KB) */
   16338 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16339 
   16340 		/*
   16341 		 * Determine the maximum latency tolerated by the device.
   16342 		 *
   16343 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16344 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16345 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16346 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16347 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16348 		 */
   16349 		lat_ns = ((int64_t)rxa * 1024 -
   16350 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16351 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16352 		if (lat_ns < 0)
   16353 			lat_ns = 0;
   16354 		else
   16355 			lat_ns /= speed;
   16356 		value = lat_ns;
   16357 
   16358 		while (value > LTRV_VALUE) {
   16359 			scale ++;
   16360 			value = howmany(value, __BIT(5));
   16361 		}
   16362 		if (scale > LTRV_SCALE_MAX) {
   16363 			device_printf(sc->sc_dev,
   16364 			    "Invalid LTR latency scale %d\n", scale);
   16365 			return -1;
   16366 		}
   16367 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16368 
   16369 		/* Determine the maximum latency tolerated by the platform */
   16370 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16371 		    WM_PCI_LTR_CAP_LPT);
   16372 		max_snoop = preg & 0xffff;
   16373 		max_nosnoop = preg >> 16;
   16374 
   16375 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16376 
   16377 		if (lat_enc > max_ltr_enc) {
   16378 			lat_enc = max_ltr_enc;
   16379 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16380 			    * PCI_LTR_SCALETONS(
   16381 				    __SHIFTOUT(lat_enc,
   16382 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16383 		}
   16384 
   16385 		if (lat_ns) {
   16386 			lat_ns *= speed * 1000;
   16387 			lat_ns /= 8;
   16388 			lat_ns /= 1000000000;
   16389 			obff_hwm = (int32_t)(rxa - lat_ns);
   16390 		}
   16391 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16392 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16393 			    "(rxa = %d, lat_ns = %d)\n",
   16394 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16395 			return -1;
   16396 		}
   16397 	}
   16398 	/* Snoop and No-Snoop latencies the same */
   16399 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16400 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16401 
   16402 	/* Set OBFF high water mark */
   16403 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16404 	reg |= obff_hwm;
   16405 	CSR_WRITE(sc, WMREG_SVT, reg);
   16406 
   16407 	/* Enable OBFF */
   16408 	reg = CSR_READ(sc, WMREG_SVCR);
   16409 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16410 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16411 
   16412 	return 0;
   16413 }
   16414 
   16415 /*
   16416  * I210 Errata 25 and I211 Errata 10
   16417  * Slow System Clock.
   16418  */
   16419 static int
   16420 wm_pll_workaround_i210(struct wm_softc *sc)
   16421 {
   16422 	uint32_t mdicnfg, wuc;
   16423 	uint32_t reg;
   16424 	pcireg_t pcireg;
   16425 	uint32_t pmreg;
   16426 	uint16_t nvmword, tmp_nvmword;
   16427 	uint16_t phyval;
   16428 	bool wa_done = false;
   16429 	int i, rv = 0;
   16430 
   16431 	/* Get Power Management cap offset */
   16432 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16433 	    &pmreg, NULL) == 0)
   16434 		return -1;
   16435 
   16436 	/* Save WUC and MDICNFG registers */
   16437 	wuc = CSR_READ(sc, WMREG_WUC);
   16438 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16439 
   16440 	reg = mdicnfg & ~MDICNFG_DEST;
   16441 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16442 
   16443 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   16444 		nvmword = INVM_DEFAULT_AL;
   16445 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16446 
   16447 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16448 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16449 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16450 
   16451 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16452 			rv = 0;
   16453 			break; /* OK */
   16454 		} else
   16455 			rv = -1;
   16456 
   16457 		wa_done = true;
   16458 		/* Directly reset the internal PHY */
   16459 		reg = CSR_READ(sc, WMREG_CTRL);
   16460 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16461 
   16462 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16463 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16464 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16465 
   16466 		CSR_WRITE(sc, WMREG_WUC, 0);
   16467 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16468 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16469 
   16470 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16471 		    pmreg + PCI_PMCSR);
   16472 		pcireg |= PCI_PMCSR_STATE_D3;
   16473 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16474 		    pmreg + PCI_PMCSR, pcireg);
   16475 		delay(1000);
   16476 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16477 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16478 		    pmreg + PCI_PMCSR, pcireg);
   16479 
   16480 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16481 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16482 
   16483 		/* Restore WUC register */
   16484 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16485 	}
   16486 
   16487 	/* Restore MDICNFG setting */
   16488 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16489 	if (wa_done)
   16490 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16491 	return rv;
   16492 }
   16493 
   16494 static void
   16495 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16496 {
   16497 	uint32_t reg;
   16498 
   16499 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16500 		device_xname(sc->sc_dev), __func__));
   16501 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16502 	    || (sc->sc_type == WM_T_PCH_CNP));
   16503 
   16504 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16505 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16506 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16507 
   16508 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16509 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16510 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16511 }
   16512