Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.669
      1 /*	$NetBSD: if_wm.c,v 1.669 2020/03/15 23:04:50 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.669 2020/03/15 23:04:50 thorpej Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/workqueue.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <net/rss_config.h>
    121 
    122 #include <netinet/in.h>			/* XXX for struct ip */
    123 #include <netinet/in_systm.h>		/* XXX for struct ip */
    124 #include <netinet/ip.h>			/* XXX for struct ip */
    125 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    126 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    127 
    128 #include <sys/bus.h>
    129 #include <sys/intr.h>
    130 #include <machine/endian.h>
    131 
    132 #include <dev/mii/mii.h>
    133 #include <dev/mii/mdio.h>
    134 #include <dev/mii/miivar.h>
    135 #include <dev/mii/miidevs.h>
    136 #include <dev/mii/mii_bitbang.h>
    137 #include <dev/mii/ikphyreg.h>
    138 #include <dev/mii/igphyreg.h>
    139 #include <dev/mii/igphyvar.h>
    140 #include <dev/mii/inbmphyreg.h>
    141 #include <dev/mii/ihphyreg.h>
    142 
    143 #include <dev/pci/pcireg.h>
    144 #include <dev/pci/pcivar.h>
    145 #include <dev/pci/pcidevs.h>
    146 
    147 #include <dev/pci/if_wmreg.h>
    148 #include <dev/pci/if_wmvar.h>
    149 
    150 #ifdef WM_DEBUG
    151 #define	WM_DEBUG_LINK		__BIT(0)
    152 #define	WM_DEBUG_TX		__BIT(1)
    153 #define	WM_DEBUG_RX		__BIT(2)
    154 #define	WM_DEBUG_GMII		__BIT(3)
    155 #define	WM_DEBUG_MANAGE		__BIT(4)
    156 #define	WM_DEBUG_NVM		__BIT(5)
    157 #define	WM_DEBUG_INIT		__BIT(6)
    158 #define	WM_DEBUG_LOCK		__BIT(7)
    159 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    160     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    161 #define	DPRINTF(x, y)	do { if (wm_debug & (x)) printf y; } while (0)
    162 #else
    163 #define	DPRINTF(x, y)	__nothing
    164 #endif /* WM_DEBUG */
    165 
    166 #ifdef NET_MPSAFE
    167 #define WM_MPSAFE	1
    168 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    169 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    170 #else
    171 #define CALLOUT_FLAGS	0
    172 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    173 #endif
    174 
    175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    176 
    177 /*
    178  * This device driver's max interrupt numbers.
    179  */
    180 #define WM_MAX_NQUEUEINTR	16
    181 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    182 
    183 #ifndef WM_DISABLE_MSI
    184 #define	WM_DISABLE_MSI 0
    185 #endif
    186 #ifndef WM_DISABLE_MSIX
    187 #define	WM_DISABLE_MSIX 0
    188 #endif
    189 
    190 int wm_disable_msi = WM_DISABLE_MSI;
    191 int wm_disable_msix = WM_DISABLE_MSIX;
    192 
    193 #ifndef WM_WATCHDOG_TIMEOUT
    194 #define WM_WATCHDOG_TIMEOUT 5
    195 #endif
    196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    197 
    198 /*
    199  * Transmit descriptor list size.  Due to errata, we can only have
    200  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    201  * on >= 82544. We tell the upper layers that they can queue a lot
    202  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    203  * of them at a time.
    204  *
    205  * We allow up to 64 DMA segments per packet.  Pathological packet
    206  * chains containing many small mbufs have been observed in zero-copy
    207  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    208  * m_defrag() is called to reduce it.
    209  */
    210 #define	WM_NTXSEGS		64
    211 #define	WM_IFQUEUELEN		256
    212 #define	WM_TXQUEUELEN_MAX	64
    213 #define	WM_TXQUEUELEN_MAX_82547	16
    214 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    215 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    216 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    217 #define	WM_NTXDESC_82542	256
    218 #define	WM_NTXDESC_82544	4096
    219 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    220 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    221 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    222 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    223 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    224 
    225 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    226 
    227 #define	WM_TXINTERQSIZE		256
    228 
    229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    230 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    231 #endif
    232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    233 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    234 #endif
    235 
    236 /*
    237  * Receive descriptor list size.  We have one Rx buffer for normal
    238  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    239  * packet.  We allocate 256 receive descriptors, each with a 2k
    240  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    241  */
    242 #define	WM_NRXDESC		256U
    243 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    244 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    245 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    246 
    247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    248 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    249 #endif
    250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    251 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    252 #endif
    253 
    254 typedef union txdescs {
    255 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    256 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    257 } txdescs_t;
    258 
    259 typedef union rxdescs {
    260 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    261 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    262 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    263 } rxdescs_t;
    264 
    265 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    266 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    267 
    268 /*
    269  * Software state for transmit jobs.
    270  */
    271 struct wm_txsoft {
    272 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    273 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    274 	int txs_firstdesc;		/* first descriptor in packet */
    275 	int txs_lastdesc;		/* last descriptor in packet */
    276 	int txs_ndesc;			/* # of descriptors used */
    277 };
    278 
    279 /*
    280  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    281  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    282  * them together.
    283  */
    284 struct wm_rxsoft {
    285 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    287 };
    288 
    289 #define WM_LINKUP_TIMEOUT	50
    290 
    291 static uint16_t swfwphysem[] = {
    292 	SWFW_PHY0_SM,
    293 	SWFW_PHY1_SM,
    294 	SWFW_PHY2_SM,
    295 	SWFW_PHY3_SM
    296 };
    297 
    298 static const uint32_t wm_82580_rxpbs_table[] = {
    299 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    300 };
    301 
    302 struct wm_softc;
    303 
    304 #ifdef WM_EVENT_COUNTERS
    305 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    306 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    307 	struct evcnt qname##_ev_##evname;
    308 
    309 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    310 	do {								\
    311 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    312 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    313 		    "%s%02d%s", #qname, (qnum), #evname);		\
    314 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    315 		    (evtype), NULL, (xname),				\
    316 		    (q)->qname##_##evname##_evcnt_name);		\
    317 	} while (0)
    318 
    319 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    320 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    321 
    322 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    323 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    324 
    325 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    326 	evcnt_detach(&(q)->qname##_ev_##evname);
    327 #endif /* WM_EVENT_COUNTERS */
    328 
    329 struct wm_txqueue {
    330 	kmutex_t *txq_lock;		/* lock for tx operations */
    331 
    332 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    333 
    334 	/* Software state for the transmit descriptors. */
    335 	int txq_num;			/* must be a power of two */
    336 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    337 
    338 	/* TX control data structures. */
    339 	int txq_ndesc;			/* must be a power of two */
    340 	size_t txq_descsize;		/* a tx descriptor size */
    341 	txdescs_t *txq_descs_u;
    342 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    343 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    344 	int txq_desc_rseg;		/* real number of control segment */
    345 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    346 #define	txq_descs	txq_descs_u->sctxu_txdescs
    347 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    348 
    349 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    350 
    351 	int txq_free;			/* number of free Tx descriptors */
    352 	int txq_next;			/* next ready Tx descriptor */
    353 
    354 	int txq_sfree;			/* number of free Tx jobs */
    355 	int txq_snext;			/* next free Tx job */
    356 	int txq_sdirty;			/* dirty Tx jobs */
    357 
    358 	/* These 4 variables are used only on the 82547. */
    359 	int txq_fifo_size;		/* Tx FIFO size */
    360 	int txq_fifo_head;		/* current head of FIFO */
    361 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    362 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    363 
    364 	/*
    365 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    366 	 * CPUs. This queue intermediate them without block.
    367 	 */
    368 	pcq_t *txq_interq;
    369 
    370 	/*
    371 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    372 	 * to manage Tx H/W queue's busy flag.
    373 	 */
    374 	int txq_flags;			/* flags for H/W queue, see below */
    375 #define	WM_TXQ_NO_SPACE	0x1
    376 
    377 	bool txq_stopping;
    378 
    379 	bool txq_sending;
    380 	time_t txq_lastsent;
    381 
    382 	uint32_t txq_packets;		/* for AIM */
    383 	uint32_t txq_bytes;		/* for AIM */
    384 #ifdef WM_EVENT_COUNTERS
    385 	/* TX event counters */
    386 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    387 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    388 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    389 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    390 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    391 					    /* XXX not used? */
    392 
    393 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    394 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    395 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    396 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    397 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    398 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    399 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    400 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    401 					    /* other than toomanyseg */
    402 
    403 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    404 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    405 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    406 
    407 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    408 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    409 #endif /* WM_EVENT_COUNTERS */
    410 };
    411 
    412 struct wm_rxqueue {
    413 	kmutex_t *rxq_lock;		/* lock for rx operations */
    414 
    415 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    416 
    417 	/* Software state for the receive descriptors. */
    418 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    419 
    420 	/* RX control data structures. */
    421 	int rxq_ndesc;			/* must be a power of two */
    422 	size_t rxq_descsize;		/* a rx descriptor size */
    423 	rxdescs_t *rxq_descs_u;
    424 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    425 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    426 	int rxq_desc_rseg;		/* real number of control segment */
    427 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    428 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    429 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    430 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    431 
    432 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    433 
    434 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    435 	int rxq_discard;
    436 	int rxq_len;
    437 	struct mbuf *rxq_head;
    438 	struct mbuf *rxq_tail;
    439 	struct mbuf **rxq_tailp;
    440 
    441 	bool rxq_stopping;
    442 
    443 	uint32_t rxq_packets;		/* for AIM */
    444 	uint32_t rxq_bytes;		/* for AIM */
    445 #ifdef WM_EVENT_COUNTERS
    446 	/* RX event counters */
    447 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    448 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    449 
    450 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    451 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    452 #endif
    453 };
    454 
    455 struct wm_queue {
    456 	int wmq_id;			/* index of TX/RX queues */
    457 	int wmq_intr_idx;		/* index of MSI-X tables */
    458 
    459 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    460 	bool wmq_set_itr;
    461 
    462 	struct wm_txqueue wmq_txq;
    463 	struct wm_rxqueue wmq_rxq;
    464 
    465 	bool wmq_txrx_use_workqueue;
    466 	struct work wmq_cookie;
    467 	void *wmq_si;
    468 	krndsource_t rnd_source;	/* random source */
    469 };
    470 
    471 struct wm_phyop {
    472 	int (*acquire)(struct wm_softc *);
    473 	void (*release)(struct wm_softc *);
    474 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    475 	int (*writereg_locked)(device_t, int, int, uint16_t);
    476 	int reset_delay_us;
    477 	bool no_errprint;
    478 };
    479 
    480 struct wm_nvmop {
    481 	int (*acquire)(struct wm_softc *);
    482 	void (*release)(struct wm_softc *);
    483 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    484 };
    485 
    486 /*
    487  * Software state per device.
    488  */
    489 struct wm_softc {
    490 	device_t sc_dev;		/* generic device information */
    491 	bus_space_tag_t sc_st;		/* bus space tag */
    492 	bus_space_handle_t sc_sh;	/* bus space handle */
    493 	bus_size_t sc_ss;		/* bus space size */
    494 	bus_space_tag_t sc_iot;		/* I/O space tag */
    495 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    496 	bus_size_t sc_ios;		/* I/O space size */
    497 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    498 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    499 	bus_size_t sc_flashs;		/* flash registers space size */
    500 	off_t sc_flashreg_offset;	/*
    501 					 * offset to flash registers from
    502 					 * start of BAR
    503 					 */
    504 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    505 
    506 	struct ethercom sc_ethercom;	/* ethernet common data */
    507 	struct mii_data sc_mii;		/* MII/media information */
    508 
    509 	pci_chipset_tag_t sc_pc;
    510 	pcitag_t sc_pcitag;
    511 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    512 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    513 
    514 	uint16_t sc_pcidevid;		/* PCI device ID */
    515 	wm_chip_type sc_type;		/* MAC type */
    516 	int sc_rev;			/* MAC revision */
    517 	wm_phy_type sc_phytype;		/* PHY type */
    518 	uint8_t sc_sfptype;		/* SFP type */
    519 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    520 #define	WM_MEDIATYPE_UNKNOWN		0x00
    521 #define	WM_MEDIATYPE_FIBER		0x01
    522 #define	WM_MEDIATYPE_COPPER		0x02
    523 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    524 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    525 	int sc_flags;			/* flags; see below */
    526 	u_short sc_if_flags;		/* last if_flags */
    527 	int sc_ec_capenable;		/* last ec_capenable */
    528 	int sc_flowflags;		/* 802.3x flow control flags */
    529 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    530 	int sc_align_tweak;
    531 
    532 	void *sc_ihs[WM_MAX_NINTR];	/*
    533 					 * interrupt cookie.
    534 					 * - legacy and msi use sc_ihs[0] only
    535 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    536 					 */
    537 	pci_intr_handle_t *sc_intrs;	/*
    538 					 * legacy and msi use sc_intrs[0] only
    539 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    540 					 */
    541 	int sc_nintrs;			/* number of interrupts */
    542 
    543 	int sc_link_intr_idx;		/* index of MSI-X tables */
    544 
    545 	callout_t sc_tick_ch;		/* tick callout */
    546 	bool sc_core_stopping;
    547 
    548 	int sc_nvm_ver_major;
    549 	int sc_nvm_ver_minor;
    550 	int sc_nvm_ver_build;
    551 	int sc_nvm_addrbits;		/* NVM address bits */
    552 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    553 	int sc_ich8_flash_base;
    554 	int sc_ich8_flash_bank_size;
    555 	int sc_nvm_k1_enabled;
    556 
    557 	int sc_nqueues;
    558 	struct wm_queue *sc_queue;
    559 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    560 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    561 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    562 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    563 	struct workqueue *sc_queue_wq;
    564 	bool sc_txrx_use_workqueue;
    565 
    566 	int sc_affinity_offset;
    567 
    568 #ifdef WM_EVENT_COUNTERS
    569 	/* Event counters. */
    570 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    571 
    572 	/* WM_T_82542_2_1 only */
    573 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    574 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    575 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    576 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    577 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    578 #endif /* WM_EVENT_COUNTERS */
    579 
    580 	struct sysctllog *sc_sysctllog;
    581 
    582 	/* This variable are used only on the 82547. */
    583 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    584 
    585 	uint32_t sc_ctrl;		/* prototype CTRL register */
    586 #if 0
    587 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    588 #endif
    589 	uint32_t sc_icr;		/* prototype interrupt bits */
    590 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    591 	uint32_t sc_tctl;		/* prototype TCTL register */
    592 	uint32_t sc_rctl;		/* prototype RCTL register */
    593 	uint32_t sc_txcw;		/* prototype TXCW register */
    594 	uint32_t sc_tipg;		/* prototype TIPG register */
    595 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    596 	uint32_t sc_pba;		/* prototype PBA register */
    597 
    598 	int sc_tbi_linkup;		/* TBI link status */
    599 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    600 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    601 
    602 	int sc_mchash_type;		/* multicast filter offset */
    603 
    604 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    605 
    606 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    607 	kmutex_t *sc_ich_phymtx;	/*
    608 					 * 82574/82583/ICH/PCH specific PHY
    609 					 * mutex. For 82574/82583, the mutex
    610 					 * is used for both PHY and NVM.
    611 					 */
    612 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    613 
    614 	struct wm_phyop phy;
    615 	struct wm_nvmop nvm;
    616 };
    617 
    618 #define WM_CORE_LOCK(_sc)						\
    619 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    620 #define WM_CORE_UNLOCK(_sc)						\
    621 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    622 #define WM_CORE_LOCKED(_sc)						\
    623 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    624 
    625 #define	WM_RXCHAIN_RESET(rxq)						\
    626 do {									\
    627 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    628 	*(rxq)->rxq_tailp = NULL;					\
    629 	(rxq)->rxq_len = 0;						\
    630 } while (/*CONSTCOND*/0)
    631 
    632 #define	WM_RXCHAIN_LINK(rxq, m)						\
    633 do {									\
    634 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    635 	(rxq)->rxq_tailp = &(m)->m_next;				\
    636 } while (/*CONSTCOND*/0)
    637 
    638 #ifdef WM_EVENT_COUNTERS
    639 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    640 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    641 
    642 #define WM_Q_EVCNT_INCR(qname, evname)			\
    643 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    644 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    645 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    646 #else /* !WM_EVENT_COUNTERS */
    647 #define	WM_EVCNT_INCR(ev)	/* nothing */
    648 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    649 
    650 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    651 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    652 #endif /* !WM_EVENT_COUNTERS */
    653 
    654 #define	CSR_READ(sc, reg)						\
    655 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    656 #define	CSR_WRITE(sc, reg, val)						\
    657 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    658 #define	CSR_WRITE_FLUSH(sc)						\
    659 	(void)CSR_READ((sc), WMREG_STATUS)
    660 
    661 #define ICH8_FLASH_READ32(sc, reg)					\
    662 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    663 	    (reg) + sc->sc_flashreg_offset)
    664 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    665 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    666 	    (reg) + sc->sc_flashreg_offset, (data))
    667 
    668 #define ICH8_FLASH_READ16(sc, reg)					\
    669 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    670 	    (reg) + sc->sc_flashreg_offset)
    671 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    672 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    673 	    (reg) + sc->sc_flashreg_offset, (data))
    674 
    675 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    676 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    677 
    678 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    679 #define	WM_CDTXADDR_HI(txq, x)						\
    680 	(sizeof(bus_addr_t) == 8 ?					\
    681 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    682 
    683 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    684 #define	WM_CDRXADDR_HI(rxq, x)						\
    685 	(sizeof(bus_addr_t) == 8 ?					\
    686 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    687 
    688 /*
    689  * Register read/write functions.
    690  * Other than CSR_{READ|WRITE}().
    691  */
    692 #if 0
    693 static inline uint32_t wm_io_read(struct wm_softc *, int);
    694 #endif
    695 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    696 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    697     uint32_t, uint32_t);
    698 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    699 
    700 /*
    701  * Descriptor sync/init functions.
    702  */
    703 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    704 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    705 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    706 
    707 /*
    708  * Device driver interface functions and commonly used functions.
    709  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    710  */
    711 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    712 static int	wm_match(device_t, cfdata_t, void *);
    713 static void	wm_attach(device_t, device_t, void *);
    714 static int	wm_detach(device_t, int);
    715 static bool	wm_suspend(device_t, const pmf_qual_t *);
    716 static bool	wm_resume(device_t, const pmf_qual_t *);
    717 static void	wm_watchdog(struct ifnet *);
    718 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    719     uint16_t *);
    720 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    721     uint16_t *);
    722 static void	wm_tick(void *);
    723 static int	wm_ifflags_cb(struct ethercom *);
    724 static int	wm_ioctl(struct ifnet *, u_long, void *);
    725 /* MAC address related */
    726 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    727 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    728 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    729 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    730 static int	wm_rar_count(struct wm_softc *);
    731 static void	wm_set_filter(struct wm_softc *);
    732 /* Reset and init related */
    733 static void	wm_set_vlan(struct wm_softc *);
    734 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    735 static void	wm_get_auto_rd_done(struct wm_softc *);
    736 static void	wm_lan_init_done(struct wm_softc *);
    737 static void	wm_get_cfg_done(struct wm_softc *);
    738 static int	wm_phy_post_reset(struct wm_softc *);
    739 static int	wm_write_smbus_addr(struct wm_softc *);
    740 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    741 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    742 static void	wm_initialize_hardware_bits(struct wm_softc *);
    743 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    744 static int	wm_reset_phy(struct wm_softc *);
    745 static void	wm_flush_desc_rings(struct wm_softc *);
    746 static void	wm_reset(struct wm_softc *);
    747 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    748 static void	wm_rxdrain(struct wm_rxqueue *);
    749 static void	wm_init_rss(struct wm_softc *);
    750 static void	wm_adjust_qnum(struct wm_softc *, int);
    751 static inline bool	wm_is_using_msix(struct wm_softc *);
    752 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    753 static int	wm_softint_establish(struct wm_softc *, int, int);
    754 static int	wm_setup_legacy(struct wm_softc *);
    755 static int	wm_setup_msix(struct wm_softc *);
    756 static int	wm_init(struct ifnet *);
    757 static int	wm_init_locked(struct ifnet *);
    758 static void	wm_init_sysctls(struct wm_softc *);
    759 static void	wm_unset_stopping_flags(struct wm_softc *);
    760 static void	wm_set_stopping_flags(struct wm_softc *);
    761 static void	wm_stop(struct ifnet *, int);
    762 static void	wm_stop_locked(struct ifnet *, bool, bool);
    763 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    764 static void	wm_82547_txfifo_stall(void *);
    765 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    766 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    767 /* DMA related */
    768 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    769 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    770 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    771 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    772     struct wm_txqueue *);
    773 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    774 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    775 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    776     struct wm_rxqueue *);
    777 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    778 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    779 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    780 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    781 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    782 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    783 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    784     struct wm_txqueue *);
    785 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    786     struct wm_rxqueue *);
    787 static int	wm_alloc_txrx_queues(struct wm_softc *);
    788 static void	wm_free_txrx_queues(struct wm_softc *);
    789 static int	wm_init_txrx_queues(struct wm_softc *);
    790 /* Start */
    791 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    792     struct wm_txsoft *, uint32_t *, uint8_t *);
    793 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    794 static void	wm_start(struct ifnet *);
    795 static void	wm_start_locked(struct ifnet *);
    796 static int	wm_transmit(struct ifnet *, struct mbuf *);
    797 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    798 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    799     bool);
    800 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    801     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    802 static void	wm_nq_start(struct ifnet *);
    803 static void	wm_nq_start_locked(struct ifnet *);
    804 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    805 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    806 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    807     bool);
    808 static void	wm_deferred_start_locked(struct wm_txqueue *);
    809 static void	wm_handle_queue(void *);
    810 static void	wm_handle_queue_work(struct work *, void *);
    811 /* Interrupt */
    812 static bool	wm_txeof(struct wm_txqueue *, u_int);
    813 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    814 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    815 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    816 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    817 static void	wm_linkintr(struct wm_softc *, uint32_t);
    818 static int	wm_intr_legacy(void *);
    819 static inline void	wm_txrxintr_disable(struct wm_queue *);
    820 static inline void	wm_txrxintr_enable(struct wm_queue *);
    821 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    822 static int	wm_txrxintr_msix(void *);
    823 static int	wm_linkintr_msix(void *);
    824 
    825 /*
    826  * Media related.
    827  * GMII, SGMII, TBI, SERDES and SFP.
    828  */
    829 /* Common */
    830 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    831 /* GMII related */
    832 static void	wm_gmii_reset(struct wm_softc *);
    833 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    834 static int	wm_get_phy_id_82575(struct wm_softc *);
    835 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    836 static int	wm_gmii_mediachange(struct ifnet *);
    837 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    838 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    839 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    840 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    841 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    842 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    843 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    844 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    845 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    846 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    847 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    848 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    849 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    850 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    851 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    852 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    853 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    854 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    855 	bool);
    856 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    857 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    858 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    859 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    860 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    861 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    862 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    863 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    864 static void	wm_gmii_statchg(struct ifnet *);
    865 /*
    866  * kumeran related (80003, ICH* and PCH*).
    867  * These functions are not for accessing MII registers but for accessing
    868  * kumeran specific registers.
    869  */
    870 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    871 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    872 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    873 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    874 /* EMI register related */
    875 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    876 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    877 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    878 /* SGMII */
    879 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    880 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    881 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    882 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    883 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    884 /* TBI related */
    885 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    886 static void	wm_tbi_mediainit(struct wm_softc *);
    887 static int	wm_tbi_mediachange(struct ifnet *);
    888 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    889 static int	wm_check_for_link(struct wm_softc *);
    890 static void	wm_tbi_tick(struct wm_softc *);
    891 /* SERDES related */
    892 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    893 static int	wm_serdes_mediachange(struct ifnet *);
    894 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    895 static void	wm_serdes_tick(struct wm_softc *);
    896 /* SFP related */
    897 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    898 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    899 
    900 /*
    901  * NVM related.
    902  * Microwire, SPI (w/wo EERD) and Flash.
    903  */
    904 /* Misc functions */
    905 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    906 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    907 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    908 /* Microwire */
    909 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    910 /* SPI */
    911 static int	wm_nvm_ready_spi(struct wm_softc *);
    912 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    913 /* Using with EERD */
    914 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    915 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    916 /* Flash */
    917 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    918     unsigned int *);
    919 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    920 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    921 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    922     uint32_t *);
    923 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    924 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    925 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    926 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    927 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    928 /* iNVM */
    929 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    930 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    931 /* Lock, detecting NVM type, validate checksum and read */
    932 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    933 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    934 static int	wm_nvm_validate_checksum(struct wm_softc *);
    935 static void	wm_nvm_version_invm(struct wm_softc *);
    936 static void	wm_nvm_version(struct wm_softc *);
    937 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    938 
    939 /*
    940  * Hardware semaphores.
    941  * Very complexed...
    942  */
    943 static int	wm_get_null(struct wm_softc *);
    944 static void	wm_put_null(struct wm_softc *);
    945 static int	wm_get_eecd(struct wm_softc *);
    946 static void	wm_put_eecd(struct wm_softc *);
    947 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    948 static void	wm_put_swsm_semaphore(struct wm_softc *);
    949 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    950 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    951 static int	wm_get_nvm_80003(struct wm_softc *);
    952 static void	wm_put_nvm_80003(struct wm_softc *);
    953 static int	wm_get_nvm_82571(struct wm_softc *);
    954 static void	wm_put_nvm_82571(struct wm_softc *);
    955 static int	wm_get_phy_82575(struct wm_softc *);
    956 static void	wm_put_phy_82575(struct wm_softc *);
    957 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    958 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    959 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    960 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    961 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    962 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    963 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    964 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    965 
    966 /*
    967  * Management mode and power management related subroutines.
    968  * BMC, AMT, suspend/resume and EEE.
    969  */
    970 #if 0
    971 static int	wm_check_mng_mode(struct wm_softc *);
    972 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    973 static int	wm_check_mng_mode_82574(struct wm_softc *);
    974 static int	wm_check_mng_mode_generic(struct wm_softc *);
    975 #endif
    976 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    977 static bool	wm_phy_resetisblocked(struct wm_softc *);
    978 static void	wm_get_hw_control(struct wm_softc *);
    979 static void	wm_release_hw_control(struct wm_softc *);
    980 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    981 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    982 static void	wm_init_manageability(struct wm_softc *);
    983 static void	wm_release_manageability(struct wm_softc *);
    984 static void	wm_get_wakeup(struct wm_softc *);
    985 static int	wm_ulp_disable(struct wm_softc *);
    986 static int	wm_enable_phy_wakeup(struct wm_softc *);
    987 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    988 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    989 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
    990 static void	wm_enable_wakeup(struct wm_softc *);
    991 static void	wm_disable_aspm(struct wm_softc *);
    992 /* LPLU (Low Power Link Up) */
    993 static void	wm_lplu_d0_disable(struct wm_softc *);
    994 /* EEE */
    995 static int	wm_set_eee_i350(struct wm_softc *);
    996 static int	wm_set_eee_pchlan(struct wm_softc *);
    997 static int	wm_set_eee(struct wm_softc *);
    998 
    999 /*
   1000  * Workarounds (mainly PHY related).
   1001  * Basically, PHY's workarounds are in the PHY drivers.
   1002  */
   1003 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1004 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1005 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1006 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1007 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1008 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1009 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1010 static int	wm_k1_workaround_lv(struct wm_softc *);
   1011 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1012 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1013 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1014 static void	wm_reset_init_script_82575(struct wm_softc *);
   1015 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1016 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1017 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1018 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1019 static int	wm_pll_workaround_i210(struct wm_softc *);
   1020 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1021 
   1022 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1023     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1024 
   1025 /*
   1026  * Devices supported by this driver.
   1027  */
   1028 static const struct wm_product {
   1029 	pci_vendor_id_t		wmp_vendor;
   1030 	pci_product_id_t	wmp_product;
   1031 	const char		*wmp_name;
   1032 	wm_chip_type		wmp_type;
   1033 	uint32_t		wmp_flags;
   1034 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1035 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1036 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1037 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1038 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1039 } wm_products[] = {
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1041 	  "Intel i82542 1000BASE-X Ethernet",
   1042 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1045 	  "Intel i82543GC 1000BASE-X Ethernet",
   1046 	  WM_T_82543,		WMP_F_FIBER },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1049 	  "Intel i82543GC 1000BASE-T Ethernet",
   1050 	  WM_T_82543,		WMP_F_COPPER },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1053 	  "Intel i82544EI 1000BASE-T Ethernet",
   1054 	  WM_T_82544,		WMP_F_COPPER },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1057 	  "Intel i82544EI 1000BASE-X Ethernet",
   1058 	  WM_T_82544,		WMP_F_FIBER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1061 	  "Intel i82544GC 1000BASE-T Ethernet",
   1062 	  WM_T_82544,		WMP_F_COPPER },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1065 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1066 	  WM_T_82544,		WMP_F_COPPER },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1069 	  "Intel i82540EM 1000BASE-T Ethernet",
   1070 	  WM_T_82540,		WMP_F_COPPER },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1073 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1074 	  WM_T_82540,		WMP_F_COPPER },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1077 	  "Intel i82540EP 1000BASE-T Ethernet",
   1078 	  WM_T_82540,		WMP_F_COPPER },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1081 	  "Intel i82540EP 1000BASE-T Ethernet",
   1082 	  WM_T_82540,		WMP_F_COPPER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1085 	  "Intel i82540EP 1000BASE-T Ethernet",
   1086 	  WM_T_82540,		WMP_F_COPPER },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1089 	  "Intel i82545EM 1000BASE-T Ethernet",
   1090 	  WM_T_82545,		WMP_F_COPPER },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1093 	  "Intel i82545GM 1000BASE-T Ethernet",
   1094 	  WM_T_82545_3,		WMP_F_COPPER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1097 	  "Intel i82545GM 1000BASE-X Ethernet",
   1098 	  WM_T_82545_3,		WMP_F_FIBER },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1101 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1102 	  WM_T_82545_3,		WMP_F_SERDES },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1105 	  "Intel i82546EB 1000BASE-T Ethernet",
   1106 	  WM_T_82546,		WMP_F_COPPER },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1109 	  "Intel i82546EB 1000BASE-T Ethernet",
   1110 	  WM_T_82546,		WMP_F_COPPER },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1113 	  "Intel i82545EM 1000BASE-X Ethernet",
   1114 	  WM_T_82545,		WMP_F_FIBER },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1117 	  "Intel i82546EB 1000BASE-X Ethernet",
   1118 	  WM_T_82546,		WMP_F_FIBER },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1121 	  "Intel i82546GB 1000BASE-T Ethernet",
   1122 	  WM_T_82546_3,		WMP_F_COPPER },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1125 	  "Intel i82546GB 1000BASE-X Ethernet",
   1126 	  WM_T_82546_3,		WMP_F_FIBER },
   1127 
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1129 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1130 	  WM_T_82546_3,		WMP_F_SERDES },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1133 	  "i82546GB quad-port Gigabit Ethernet",
   1134 	  WM_T_82546_3,		WMP_F_COPPER },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1137 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1138 	  WM_T_82546_3,		WMP_F_COPPER },
   1139 
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1141 	  "Intel PRO/1000MT (82546GB)",
   1142 	  WM_T_82546_3,		WMP_F_COPPER },
   1143 
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1145 	  "Intel i82541EI 1000BASE-T Ethernet",
   1146 	  WM_T_82541,		WMP_F_COPPER },
   1147 
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1149 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1150 	  WM_T_82541,		WMP_F_COPPER },
   1151 
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1153 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1154 	  WM_T_82541,		WMP_F_COPPER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1157 	  "Intel i82541ER 1000BASE-T Ethernet",
   1158 	  WM_T_82541_2,		WMP_F_COPPER },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1161 	  "Intel i82541GI 1000BASE-T Ethernet",
   1162 	  WM_T_82541_2,		WMP_F_COPPER },
   1163 
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1165 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1166 	  WM_T_82541_2,		WMP_F_COPPER },
   1167 
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1169 	  "Intel i82541PI 1000BASE-T Ethernet",
   1170 	  WM_T_82541_2,		WMP_F_COPPER },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1173 	  "Intel i82547EI 1000BASE-T Ethernet",
   1174 	  WM_T_82547,		WMP_F_COPPER },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1177 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1178 	  WM_T_82547,		WMP_F_COPPER },
   1179 
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1181 	  "Intel i82547GI 1000BASE-T Ethernet",
   1182 	  WM_T_82547_2,		WMP_F_COPPER },
   1183 
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1185 	  "Intel PRO/1000 PT (82571EB)",
   1186 	  WM_T_82571,		WMP_F_COPPER },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1189 	  "Intel PRO/1000 PF (82571EB)",
   1190 	  WM_T_82571,		WMP_F_FIBER },
   1191 
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1193 	  "Intel PRO/1000 PB (82571EB)",
   1194 	  WM_T_82571,		WMP_F_SERDES },
   1195 
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1197 	  "Intel PRO/1000 QT (82571EB)",
   1198 	  WM_T_82571,		WMP_F_COPPER },
   1199 
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1201 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1202 	  WM_T_82571,		WMP_F_COPPER },
   1203 
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1205 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1206 	  WM_T_82571,		WMP_F_COPPER },
   1207 
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1209 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1210 	  WM_T_82571,		WMP_F_SERDES },
   1211 
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1213 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1214 	  WM_T_82571,		WMP_F_SERDES },
   1215 
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1217 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1218 	  WM_T_82571,		WMP_F_FIBER },
   1219 
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1221 	  "Intel i82572EI 1000baseT Ethernet",
   1222 	  WM_T_82572,		WMP_F_COPPER },
   1223 
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1225 	  "Intel i82572EI 1000baseX Ethernet",
   1226 	  WM_T_82572,		WMP_F_FIBER },
   1227 
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1229 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1230 	  WM_T_82572,		WMP_F_SERDES },
   1231 
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1233 	  "Intel i82572EI 1000baseT Ethernet",
   1234 	  WM_T_82572,		WMP_F_COPPER },
   1235 
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1237 	  "Intel i82573E",
   1238 	  WM_T_82573,		WMP_F_COPPER },
   1239 
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1241 	  "Intel i82573E IAMT",
   1242 	  WM_T_82573,		WMP_F_COPPER },
   1243 
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1245 	  "Intel i82573L Gigabit Ethernet",
   1246 	  WM_T_82573,		WMP_F_COPPER },
   1247 
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1249 	  "Intel i82574L",
   1250 	  WM_T_82574,		WMP_F_COPPER },
   1251 
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1253 	  "Intel i82574L",
   1254 	  WM_T_82574,		WMP_F_COPPER },
   1255 
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1257 	  "Intel i82583V",
   1258 	  WM_T_82583,		WMP_F_COPPER },
   1259 
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1261 	  "i80003 dual 1000baseT Ethernet",
   1262 	  WM_T_80003,		WMP_F_COPPER },
   1263 
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1265 	  "i80003 dual 1000baseX Ethernet",
   1266 	  WM_T_80003,		WMP_F_COPPER },
   1267 
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1269 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1270 	  WM_T_80003,		WMP_F_SERDES },
   1271 
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1273 	  "Intel i80003 1000baseT Ethernet",
   1274 	  WM_T_80003,		WMP_F_COPPER },
   1275 
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1277 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1278 	  WM_T_80003,		WMP_F_SERDES },
   1279 
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1281 	  "Intel i82801H (M_AMT) LAN Controller",
   1282 	  WM_T_ICH8,		WMP_F_COPPER },
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1284 	  "Intel i82801H (AMT) LAN Controller",
   1285 	  WM_T_ICH8,		WMP_F_COPPER },
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1287 	  "Intel i82801H LAN Controller",
   1288 	  WM_T_ICH8,		WMP_F_COPPER },
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1290 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1291 	  WM_T_ICH8,		WMP_F_COPPER },
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1293 	  "Intel i82801H (M) LAN Controller",
   1294 	  WM_T_ICH8,		WMP_F_COPPER },
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1296 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1297 	  WM_T_ICH8,		WMP_F_COPPER },
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1299 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1300 	  WM_T_ICH8,		WMP_F_COPPER },
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1302 	  "82567V-3 LAN Controller",
   1303 	  WM_T_ICH8,		WMP_F_COPPER },
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1305 	  "82801I (AMT) LAN Controller",
   1306 	  WM_T_ICH9,		WMP_F_COPPER },
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1308 	  "82801I 10/100 LAN Controller",
   1309 	  WM_T_ICH9,		WMP_F_COPPER },
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1311 	  "82801I (G) 10/100 LAN Controller",
   1312 	  WM_T_ICH9,		WMP_F_COPPER },
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1314 	  "82801I (GT) 10/100 LAN Controller",
   1315 	  WM_T_ICH9,		WMP_F_COPPER },
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1317 	  "82801I (C) LAN Controller",
   1318 	  WM_T_ICH9,		WMP_F_COPPER },
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1320 	  "82801I mobile LAN Controller",
   1321 	  WM_T_ICH9,		WMP_F_COPPER },
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1323 	  "82801I mobile (V) LAN Controller",
   1324 	  WM_T_ICH9,		WMP_F_COPPER },
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1326 	  "82801I mobile (AMT) LAN Controller",
   1327 	  WM_T_ICH9,		WMP_F_COPPER },
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1329 	  "82567LM-4 LAN Controller",
   1330 	  WM_T_ICH9,		WMP_F_COPPER },
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1332 	  "82567LM-2 LAN Controller",
   1333 	  WM_T_ICH10,		WMP_F_COPPER },
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1335 	  "82567LF-2 LAN Controller",
   1336 	  WM_T_ICH10,		WMP_F_COPPER },
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1338 	  "82567LM-3 LAN Controller",
   1339 	  WM_T_ICH10,		WMP_F_COPPER },
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1341 	  "82567LF-3 LAN Controller",
   1342 	  WM_T_ICH10,		WMP_F_COPPER },
   1343 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1344 	  "82567V-2 LAN Controller",
   1345 	  WM_T_ICH10,		WMP_F_COPPER },
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1347 	  "82567V-3? LAN Controller",
   1348 	  WM_T_ICH10,		WMP_F_COPPER },
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1350 	  "HANKSVILLE LAN Controller",
   1351 	  WM_T_ICH10,		WMP_F_COPPER },
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1353 	  "PCH LAN (82577LM) Controller",
   1354 	  WM_T_PCH,		WMP_F_COPPER },
   1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1356 	  "PCH LAN (82577LC) Controller",
   1357 	  WM_T_PCH,		WMP_F_COPPER },
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1359 	  "PCH LAN (82578DM) Controller",
   1360 	  WM_T_PCH,		WMP_F_COPPER },
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1362 	  "PCH LAN (82578DC) Controller",
   1363 	  WM_T_PCH,		WMP_F_COPPER },
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1365 	  "PCH2 LAN (82579LM) Controller",
   1366 	  WM_T_PCH2,		WMP_F_COPPER },
   1367 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1368 	  "PCH2 LAN (82579V) Controller",
   1369 	  WM_T_PCH2,		WMP_F_COPPER },
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1371 	  "82575EB dual-1000baseT Ethernet",
   1372 	  WM_T_82575,		WMP_F_COPPER },
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1374 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1375 	  WM_T_82575,		WMP_F_SERDES },
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1377 	  "82575GB quad-1000baseT Ethernet",
   1378 	  WM_T_82575,		WMP_F_COPPER },
   1379 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1380 	  "82575GB quad-1000baseT Ethernet (PM)",
   1381 	  WM_T_82575,		WMP_F_COPPER },
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1383 	  "82576 1000BaseT Ethernet",
   1384 	  WM_T_82576,		WMP_F_COPPER },
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1386 	  "82576 1000BaseX Ethernet",
   1387 	  WM_T_82576,		WMP_F_FIBER },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1390 	  "82576 gigabit Ethernet (SERDES)",
   1391 	  WM_T_82576,		WMP_F_SERDES },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1394 	  "82576 quad-1000BaseT Ethernet",
   1395 	  WM_T_82576,		WMP_F_COPPER },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1398 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1399 	  WM_T_82576,		WMP_F_COPPER },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1402 	  "82576 gigabit Ethernet",
   1403 	  WM_T_82576,		WMP_F_COPPER },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1406 	  "82576 gigabit Ethernet (SERDES)",
   1407 	  WM_T_82576,		WMP_F_SERDES },
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1409 	  "82576 quad-gigabit Ethernet (SERDES)",
   1410 	  WM_T_82576,		WMP_F_SERDES },
   1411 
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1413 	  "82580 1000BaseT Ethernet",
   1414 	  WM_T_82580,		WMP_F_COPPER },
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1416 	  "82580 1000BaseX Ethernet",
   1417 	  WM_T_82580,		WMP_F_FIBER },
   1418 
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1420 	  "82580 1000BaseT Ethernet (SERDES)",
   1421 	  WM_T_82580,		WMP_F_SERDES },
   1422 
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1424 	  "82580 gigabit Ethernet (SGMII)",
   1425 	  WM_T_82580,		WMP_F_COPPER },
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1427 	  "82580 dual-1000BaseT Ethernet",
   1428 	  WM_T_82580,		WMP_F_COPPER },
   1429 
   1430 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1431 	  "82580 quad-1000BaseX Ethernet",
   1432 	  WM_T_82580,		WMP_F_FIBER },
   1433 
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1435 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1436 	  WM_T_82580,		WMP_F_COPPER },
   1437 
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1439 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1440 	  WM_T_82580,		WMP_F_SERDES },
   1441 
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1443 	  "DH89XXCC 1000BASE-KX Ethernet",
   1444 	  WM_T_82580,		WMP_F_SERDES },
   1445 
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1447 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1448 	  WM_T_82580,		WMP_F_SERDES },
   1449 
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1451 	  "I350 Gigabit Network Connection",
   1452 	  WM_T_I350,		WMP_F_COPPER },
   1453 
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1455 	  "I350 Gigabit Fiber Network Connection",
   1456 	  WM_T_I350,		WMP_F_FIBER },
   1457 
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1459 	  "I350 Gigabit Backplane Connection",
   1460 	  WM_T_I350,		WMP_F_SERDES },
   1461 
   1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1463 	  "I350 Quad Port Gigabit Ethernet",
   1464 	  WM_T_I350,		WMP_F_SERDES },
   1465 
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1467 	  "I350 Gigabit Connection",
   1468 	  WM_T_I350,		WMP_F_COPPER },
   1469 
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1471 	  "I354 Gigabit Ethernet (KX)",
   1472 	  WM_T_I354,		WMP_F_SERDES },
   1473 
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1475 	  "I354 Gigabit Ethernet (SGMII)",
   1476 	  WM_T_I354,		WMP_F_COPPER },
   1477 
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1479 	  "I354 Gigabit Ethernet (2.5G)",
   1480 	  WM_T_I354,		WMP_F_COPPER },
   1481 
   1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1483 	  "I210-T1 Ethernet Server Adapter",
   1484 	  WM_T_I210,		WMP_F_COPPER },
   1485 
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1487 	  "I210 Ethernet (Copper OEM)",
   1488 	  WM_T_I210,		WMP_F_COPPER },
   1489 
   1490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1491 	  "I210 Ethernet (Copper IT)",
   1492 	  WM_T_I210,		WMP_F_COPPER },
   1493 
   1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1495 	  "I210 Ethernet (Copper, FLASH less)",
   1496 	  WM_T_I210,		WMP_F_COPPER },
   1497 
   1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1499 	  "I210 Gigabit Ethernet (Fiber)",
   1500 	  WM_T_I210,		WMP_F_FIBER },
   1501 
   1502 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1503 	  "I210 Gigabit Ethernet (SERDES)",
   1504 	  WM_T_I210,		WMP_F_SERDES },
   1505 
   1506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1507 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1508 	  WM_T_I210,		WMP_F_SERDES },
   1509 
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1511 	  "I210 Gigabit Ethernet (SGMII)",
   1512 	  WM_T_I210,		WMP_F_COPPER },
   1513 
   1514 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1515 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1516 	  WM_T_I210,		WMP_F_COPPER },
   1517 
   1518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1519 	  "I211 Ethernet (COPPER)",
   1520 	  WM_T_I211,		WMP_F_COPPER },
   1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1522 	  "I217 V Ethernet Connection",
   1523 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1525 	  "I217 LM Ethernet Connection",
   1526 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1528 	  "I218 V Ethernet Connection",
   1529 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1530 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1531 	  "I218 V Ethernet Connection",
   1532 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1534 	  "I218 V Ethernet Connection",
   1535 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1537 	  "I218 LM Ethernet Connection",
   1538 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1540 	  "I218 LM Ethernet Connection",
   1541 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1543 	  "I218 LM Ethernet Connection",
   1544 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1545 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1546 	  "I219 LM Ethernet Connection",
   1547 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1548 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1549 	  "I219 LM Ethernet Connection",
   1550 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1552 	  "I219 LM Ethernet Connection",
   1553 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1555 	  "I219 LM Ethernet Connection",
   1556 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1557 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1558 	  "I219 LM Ethernet Connection",
   1559 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1560 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1561 	  "I219 LM Ethernet Connection",
   1562 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1563 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1564 	  "I219 LM Ethernet Connection",
   1565 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1566 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1567 	  "I219 LM Ethernet Connection",
   1568 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1569 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1570 	  "I219 LM Ethernet Connection",
   1571 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1573 	  "I219 LM Ethernet Connection",
   1574 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1575 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1576 	  "I219 LM Ethernet Connection",
   1577 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1578 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1579 	  "I219 LM Ethernet Connection",
   1580 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1581 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1582 	  "I219 LM Ethernet Connection",
   1583 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1584 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1585 	  "I219 LM Ethernet Connection",
   1586 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1587 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1588 	  "I219 LM Ethernet Connection",
   1589 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1590 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1591 	  "I219 V Ethernet Connection",
   1592 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1593 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1594 	  "I219 V Ethernet Connection",
   1595 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1596 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1597 	  "I219 V Ethernet Connection",
   1598 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1599 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1600 	  "I219 V Ethernet Connection",
   1601 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1602 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1603 	  "I219 V Ethernet Connection",
   1604 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1605 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1606 	  "I219 V Ethernet Connection",
   1607 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1608 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1609 	  "I219 V Ethernet Connection",
   1610 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1611 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1612 	  "I219 V Ethernet Connection",
   1613 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1614 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1615 	  "I219 V Ethernet Connection",
   1616 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1617 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1618 	  "I219 V Ethernet Connection",
   1619 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1620 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1621 	  "I219 V Ethernet Connection",
   1622 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1623 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1624 	  "I219 V Ethernet Connection",
   1625 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1626 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1627 	  "I219 V Ethernet Connection",
   1628 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1629 	{ 0,			0,
   1630 	  NULL,
   1631 	  0,			0 },
   1632 };
   1633 
   1634 /*
   1635  * Register read/write functions.
   1636  * Other than CSR_{READ|WRITE}().
   1637  */
   1638 
   1639 #if 0 /* Not currently used */
   1640 static inline uint32_t
   1641 wm_io_read(struct wm_softc *sc, int reg)
   1642 {
   1643 
   1644 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1645 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1646 }
   1647 #endif
   1648 
   1649 static inline void
   1650 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1651 {
   1652 
   1653 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1654 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1655 }
   1656 
   1657 static inline void
   1658 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1659     uint32_t data)
   1660 {
   1661 	uint32_t regval;
   1662 	int i;
   1663 
   1664 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1665 
   1666 	CSR_WRITE(sc, reg, regval);
   1667 
   1668 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1669 		delay(5);
   1670 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1671 			break;
   1672 	}
   1673 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1674 		aprint_error("%s: WARNING:"
   1675 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1676 		    device_xname(sc->sc_dev), reg);
   1677 	}
   1678 }
   1679 
   1680 static inline void
   1681 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1682 {
   1683 	wa->wa_low = htole32(v & 0xffffffffU);
   1684 	if (sizeof(bus_addr_t) == 8)
   1685 		wa->wa_high = htole32((uint64_t) v >> 32);
   1686 	else
   1687 		wa->wa_high = 0;
   1688 }
   1689 
   1690 /*
   1691  * Descriptor sync/init functions.
   1692  */
   1693 static inline void
   1694 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1695 {
   1696 	struct wm_softc *sc = txq->txq_sc;
   1697 
   1698 	/* If it will wrap around, sync to the end of the ring. */
   1699 	if ((start + num) > WM_NTXDESC(txq)) {
   1700 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1701 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1702 		    (WM_NTXDESC(txq) - start), ops);
   1703 		num -= (WM_NTXDESC(txq) - start);
   1704 		start = 0;
   1705 	}
   1706 
   1707 	/* Now sync whatever is left. */
   1708 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1709 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1710 }
   1711 
   1712 static inline void
   1713 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1714 {
   1715 	struct wm_softc *sc = rxq->rxq_sc;
   1716 
   1717 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1718 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1719 }
   1720 
   1721 static inline void
   1722 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1723 {
   1724 	struct wm_softc *sc = rxq->rxq_sc;
   1725 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1726 	struct mbuf *m = rxs->rxs_mbuf;
   1727 
   1728 	/*
   1729 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1730 	 * so that the payload after the Ethernet header is aligned
   1731 	 * to a 4-byte boundary.
   1732 
   1733 	 * XXX BRAINDAMAGE ALERT!
   1734 	 * The stupid chip uses the same size for every buffer, which
   1735 	 * is set in the Receive Control register.  We are using the 2K
   1736 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1737 	 * reason, we can't "scoot" packets longer than the standard
   1738 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1739 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1740 	 * the upper layer copy the headers.
   1741 	 */
   1742 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1743 
   1744 	if (sc->sc_type == WM_T_82574) {
   1745 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1746 		rxd->erx_data.erxd_addr =
   1747 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1748 		rxd->erx_data.erxd_dd = 0;
   1749 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1750 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1751 
   1752 		rxd->nqrx_data.nrxd_paddr =
   1753 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1754 		/* Currently, split header is not supported. */
   1755 		rxd->nqrx_data.nrxd_haddr = 0;
   1756 	} else {
   1757 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1758 
   1759 		wm_set_dma_addr(&rxd->wrx_addr,
   1760 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1761 		rxd->wrx_len = 0;
   1762 		rxd->wrx_cksum = 0;
   1763 		rxd->wrx_status = 0;
   1764 		rxd->wrx_errors = 0;
   1765 		rxd->wrx_special = 0;
   1766 	}
   1767 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1768 
   1769 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1770 }
   1771 
   1772 /*
   1773  * Device driver interface functions and commonly used functions.
   1774  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1775  */
   1776 
   1777 /* Lookup supported device table */
   1778 static const struct wm_product *
   1779 wm_lookup(const struct pci_attach_args *pa)
   1780 {
   1781 	const struct wm_product *wmp;
   1782 
   1783 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1784 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1785 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1786 			return wmp;
   1787 	}
   1788 	return NULL;
   1789 }
   1790 
   1791 /* The match function (ca_match) */
   1792 static int
   1793 wm_match(device_t parent, cfdata_t cf, void *aux)
   1794 {
   1795 	struct pci_attach_args *pa = aux;
   1796 
   1797 	if (wm_lookup(pa) != NULL)
   1798 		return 1;
   1799 
   1800 	return 0;
   1801 }
   1802 
   1803 /* The attach function (ca_attach) */
   1804 static void
   1805 wm_attach(device_t parent, device_t self, void *aux)
   1806 {
   1807 	struct wm_softc *sc = device_private(self);
   1808 	struct pci_attach_args *pa = aux;
   1809 	prop_dictionary_t dict;
   1810 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1811 	pci_chipset_tag_t pc = pa->pa_pc;
   1812 	int counts[PCI_INTR_TYPE_SIZE];
   1813 	pci_intr_type_t max_type;
   1814 	const char *eetype, *xname;
   1815 	bus_space_tag_t memt;
   1816 	bus_space_handle_t memh;
   1817 	bus_size_t memsize;
   1818 	int memh_valid;
   1819 	int i, error;
   1820 	const struct wm_product *wmp;
   1821 	prop_data_t ea;
   1822 	prop_number_t pn;
   1823 	uint8_t enaddr[ETHER_ADDR_LEN];
   1824 	char buf[256];
   1825 	char wqname[MAXCOMLEN];
   1826 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1827 	pcireg_t preg, memtype;
   1828 	uint16_t eeprom_data, apme_mask;
   1829 	bool force_clear_smbi;
   1830 	uint32_t link_mode;
   1831 	uint32_t reg;
   1832 
   1833 	sc->sc_dev = self;
   1834 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1835 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1836 	sc->sc_core_stopping = false;
   1837 
   1838 	wmp = wm_lookup(pa);
   1839 #ifdef DIAGNOSTIC
   1840 	if (wmp == NULL) {
   1841 		printf("\n");
   1842 		panic("wm_attach: impossible");
   1843 	}
   1844 #endif
   1845 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1846 
   1847 	sc->sc_pc = pa->pa_pc;
   1848 	sc->sc_pcitag = pa->pa_tag;
   1849 
   1850 	if (pci_dma64_available(pa))
   1851 		sc->sc_dmat = pa->pa_dmat64;
   1852 	else
   1853 		sc->sc_dmat = pa->pa_dmat;
   1854 
   1855 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1856 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1857 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1858 
   1859 	sc->sc_type = wmp->wmp_type;
   1860 
   1861 	/* Set default function pointers */
   1862 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1863 	sc->phy.release = sc->nvm.release = wm_put_null;
   1864 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1865 
   1866 	if (sc->sc_type < WM_T_82543) {
   1867 		if (sc->sc_rev < 2) {
   1868 			aprint_error_dev(sc->sc_dev,
   1869 			    "i82542 must be at least rev. 2\n");
   1870 			return;
   1871 		}
   1872 		if (sc->sc_rev < 3)
   1873 			sc->sc_type = WM_T_82542_2_0;
   1874 	}
   1875 
   1876 	/*
   1877 	 * Disable MSI for Errata:
   1878 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1879 	 *
   1880 	 *  82544: Errata 25
   1881 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1882 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1883 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1884 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1885 	 *
   1886 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1887 	 *
   1888 	 *  82571 & 82572: Errata 63
   1889 	 */
   1890 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1891 	    || (sc->sc_type == WM_T_82572))
   1892 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1893 
   1894 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1895 	    || (sc->sc_type == WM_T_82580)
   1896 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1897 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1898 		sc->sc_flags |= WM_F_NEWQUEUE;
   1899 
   1900 	/* Set device properties (mactype) */
   1901 	dict = device_properties(sc->sc_dev);
   1902 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1903 
   1904 	/*
   1905 	 * Map the device.  All devices support memory-mapped acccess,
   1906 	 * and it is really required for normal operation.
   1907 	 */
   1908 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1909 	switch (memtype) {
   1910 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1911 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1912 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1913 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1914 		break;
   1915 	default:
   1916 		memh_valid = 0;
   1917 		break;
   1918 	}
   1919 
   1920 	if (memh_valid) {
   1921 		sc->sc_st = memt;
   1922 		sc->sc_sh = memh;
   1923 		sc->sc_ss = memsize;
   1924 	} else {
   1925 		aprint_error_dev(sc->sc_dev,
   1926 		    "unable to map device registers\n");
   1927 		return;
   1928 	}
   1929 
   1930 	/*
   1931 	 * In addition, i82544 and later support I/O mapped indirect
   1932 	 * register access.  It is not desirable (nor supported in
   1933 	 * this driver) to use it for normal operation, though it is
   1934 	 * required to work around bugs in some chip versions.
   1935 	 */
   1936 	if (sc->sc_type >= WM_T_82544) {
   1937 		/* First we have to find the I/O BAR. */
   1938 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1939 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1940 			if (memtype == PCI_MAPREG_TYPE_IO)
   1941 				break;
   1942 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1943 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1944 				i += 4;	/* skip high bits, too */
   1945 		}
   1946 		if (i < PCI_MAPREG_END) {
   1947 			/*
   1948 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1949 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1950 			 * It's no problem because newer chips has no this
   1951 			 * bug.
   1952 			 *
   1953 			 * The i8254x doesn't apparently respond when the
   1954 			 * I/O BAR is 0, which looks somewhat like it's not
   1955 			 * been configured.
   1956 			 */
   1957 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1958 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1959 				aprint_error_dev(sc->sc_dev,
   1960 				    "WARNING: I/O BAR at zero.\n");
   1961 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1962 					0, &sc->sc_iot, &sc->sc_ioh,
   1963 					NULL, &sc->sc_ios) == 0) {
   1964 				sc->sc_flags |= WM_F_IOH_VALID;
   1965 			} else
   1966 				aprint_error_dev(sc->sc_dev,
   1967 				    "WARNING: unable to map I/O space\n");
   1968 		}
   1969 
   1970 	}
   1971 
   1972 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1973 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1974 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1975 	if (sc->sc_type < WM_T_82542_2_1)
   1976 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1977 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1978 
   1979 	/* Power up chip */
   1980 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1981 	    && error != EOPNOTSUPP) {
   1982 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1983 		return;
   1984 	}
   1985 
   1986 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1987 	/*
   1988 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1989 	 * resource.
   1990 	 */
   1991 	if (sc->sc_nqueues > 1) {
   1992 		max_type = PCI_INTR_TYPE_MSIX;
   1993 		/*
   1994 		 *  82583 has a MSI-X capability in the PCI configuration space
   1995 		 * but it doesn't support it. At least the document doesn't
   1996 		 * say anything about MSI-X.
   1997 		 */
   1998 		counts[PCI_INTR_TYPE_MSIX]
   1999 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2000 	} else {
   2001 		max_type = PCI_INTR_TYPE_MSI;
   2002 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2003 	}
   2004 
   2005 	/* Allocation settings */
   2006 	counts[PCI_INTR_TYPE_MSI] = 1;
   2007 	counts[PCI_INTR_TYPE_INTX] = 1;
   2008 	/* overridden by disable flags */
   2009 	if (wm_disable_msi != 0) {
   2010 		counts[PCI_INTR_TYPE_MSI] = 0;
   2011 		if (wm_disable_msix != 0) {
   2012 			max_type = PCI_INTR_TYPE_INTX;
   2013 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2014 		}
   2015 	} else if (wm_disable_msix != 0) {
   2016 		max_type = PCI_INTR_TYPE_MSI;
   2017 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2018 	}
   2019 
   2020 alloc_retry:
   2021 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2022 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2023 		return;
   2024 	}
   2025 
   2026 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2027 		error = wm_setup_msix(sc);
   2028 		if (error) {
   2029 			pci_intr_release(pc, sc->sc_intrs,
   2030 			    counts[PCI_INTR_TYPE_MSIX]);
   2031 
   2032 			/* Setup for MSI: Disable MSI-X */
   2033 			max_type = PCI_INTR_TYPE_MSI;
   2034 			counts[PCI_INTR_TYPE_MSI] = 1;
   2035 			counts[PCI_INTR_TYPE_INTX] = 1;
   2036 			goto alloc_retry;
   2037 		}
   2038 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2039 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2040 		error = wm_setup_legacy(sc);
   2041 		if (error) {
   2042 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2043 			    counts[PCI_INTR_TYPE_MSI]);
   2044 
   2045 			/* The next try is for INTx: Disable MSI */
   2046 			max_type = PCI_INTR_TYPE_INTX;
   2047 			counts[PCI_INTR_TYPE_INTX] = 1;
   2048 			goto alloc_retry;
   2049 		}
   2050 	} else {
   2051 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2052 		error = wm_setup_legacy(sc);
   2053 		if (error) {
   2054 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2055 			    counts[PCI_INTR_TYPE_INTX]);
   2056 			return;
   2057 		}
   2058 	}
   2059 
   2060 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2061 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2062 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2063 	    WM_WORKQUEUE_FLAGS);
   2064 	if (error) {
   2065 		aprint_error_dev(sc->sc_dev,
   2066 		    "unable to create workqueue\n");
   2067 		goto out;
   2068 	}
   2069 
   2070 	/*
   2071 	 * Check the function ID (unit number of the chip).
   2072 	 */
   2073 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2074 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2075 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2076 	    || (sc->sc_type == WM_T_82580)
   2077 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2078 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2079 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2080 	else
   2081 		sc->sc_funcid = 0;
   2082 
   2083 	/*
   2084 	 * Determine a few things about the bus we're connected to.
   2085 	 */
   2086 	if (sc->sc_type < WM_T_82543) {
   2087 		/* We don't really know the bus characteristics here. */
   2088 		sc->sc_bus_speed = 33;
   2089 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2090 		/*
   2091 		 * CSA (Communication Streaming Architecture) is about as fast
   2092 		 * a 32-bit 66MHz PCI Bus.
   2093 		 */
   2094 		sc->sc_flags |= WM_F_CSA;
   2095 		sc->sc_bus_speed = 66;
   2096 		aprint_verbose_dev(sc->sc_dev,
   2097 		    "Communication Streaming Architecture\n");
   2098 		if (sc->sc_type == WM_T_82547) {
   2099 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2100 			callout_setfunc(&sc->sc_txfifo_ch,
   2101 			    wm_82547_txfifo_stall, sc);
   2102 			aprint_verbose_dev(sc->sc_dev,
   2103 			    "using 82547 Tx FIFO stall work-around\n");
   2104 		}
   2105 	} else if (sc->sc_type >= WM_T_82571) {
   2106 		sc->sc_flags |= WM_F_PCIE;
   2107 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2108 		    && (sc->sc_type != WM_T_ICH10)
   2109 		    && (sc->sc_type != WM_T_PCH)
   2110 		    && (sc->sc_type != WM_T_PCH2)
   2111 		    && (sc->sc_type != WM_T_PCH_LPT)
   2112 		    && (sc->sc_type != WM_T_PCH_SPT)
   2113 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2114 			/* ICH* and PCH* have no PCIe capability registers */
   2115 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2116 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2117 				NULL) == 0)
   2118 				aprint_error_dev(sc->sc_dev,
   2119 				    "unable to find PCIe capability\n");
   2120 		}
   2121 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2122 	} else {
   2123 		reg = CSR_READ(sc, WMREG_STATUS);
   2124 		if (reg & STATUS_BUS64)
   2125 			sc->sc_flags |= WM_F_BUS64;
   2126 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2127 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2128 
   2129 			sc->sc_flags |= WM_F_PCIX;
   2130 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2131 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2132 				aprint_error_dev(sc->sc_dev,
   2133 				    "unable to find PCIX capability\n");
   2134 			else if (sc->sc_type != WM_T_82545_3 &&
   2135 				 sc->sc_type != WM_T_82546_3) {
   2136 				/*
   2137 				 * Work around a problem caused by the BIOS
   2138 				 * setting the max memory read byte count
   2139 				 * incorrectly.
   2140 				 */
   2141 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2142 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2143 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2144 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2145 
   2146 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2147 				    PCIX_CMD_BYTECNT_SHIFT;
   2148 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2149 				    PCIX_STATUS_MAXB_SHIFT;
   2150 				if (bytecnt > maxb) {
   2151 					aprint_verbose_dev(sc->sc_dev,
   2152 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2153 					    512 << bytecnt, 512 << maxb);
   2154 					pcix_cmd = (pcix_cmd &
   2155 					    ~PCIX_CMD_BYTECNT_MASK) |
   2156 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2157 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2158 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2159 					    pcix_cmd);
   2160 				}
   2161 			}
   2162 		}
   2163 		/*
   2164 		 * The quad port adapter is special; it has a PCIX-PCIX
   2165 		 * bridge on the board, and can run the secondary bus at
   2166 		 * a higher speed.
   2167 		 */
   2168 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2169 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2170 								      : 66;
   2171 		} else if (sc->sc_flags & WM_F_PCIX) {
   2172 			switch (reg & STATUS_PCIXSPD_MASK) {
   2173 			case STATUS_PCIXSPD_50_66:
   2174 				sc->sc_bus_speed = 66;
   2175 				break;
   2176 			case STATUS_PCIXSPD_66_100:
   2177 				sc->sc_bus_speed = 100;
   2178 				break;
   2179 			case STATUS_PCIXSPD_100_133:
   2180 				sc->sc_bus_speed = 133;
   2181 				break;
   2182 			default:
   2183 				aprint_error_dev(sc->sc_dev,
   2184 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2185 				    reg & STATUS_PCIXSPD_MASK);
   2186 				sc->sc_bus_speed = 66;
   2187 				break;
   2188 			}
   2189 		} else
   2190 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2191 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2192 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2193 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2194 	}
   2195 
   2196 	/* clear interesting stat counters */
   2197 	CSR_READ(sc, WMREG_COLC);
   2198 	CSR_READ(sc, WMREG_RXERRC);
   2199 
   2200 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2201 	    || (sc->sc_type >= WM_T_ICH8))
   2202 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2203 	if (sc->sc_type >= WM_T_ICH8)
   2204 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2205 
   2206 	/* Set PHY, NVM mutex related stuff */
   2207 	switch (sc->sc_type) {
   2208 	case WM_T_82542_2_0:
   2209 	case WM_T_82542_2_1:
   2210 	case WM_T_82543:
   2211 	case WM_T_82544:
   2212 		/* Microwire */
   2213 		sc->nvm.read = wm_nvm_read_uwire;
   2214 		sc->sc_nvm_wordsize = 64;
   2215 		sc->sc_nvm_addrbits = 6;
   2216 		break;
   2217 	case WM_T_82540:
   2218 	case WM_T_82545:
   2219 	case WM_T_82545_3:
   2220 	case WM_T_82546:
   2221 	case WM_T_82546_3:
   2222 		/* Microwire */
   2223 		sc->nvm.read = wm_nvm_read_uwire;
   2224 		reg = CSR_READ(sc, WMREG_EECD);
   2225 		if (reg & EECD_EE_SIZE) {
   2226 			sc->sc_nvm_wordsize = 256;
   2227 			sc->sc_nvm_addrbits = 8;
   2228 		} else {
   2229 			sc->sc_nvm_wordsize = 64;
   2230 			sc->sc_nvm_addrbits = 6;
   2231 		}
   2232 		sc->sc_flags |= WM_F_LOCK_EECD;
   2233 		sc->nvm.acquire = wm_get_eecd;
   2234 		sc->nvm.release = wm_put_eecd;
   2235 		break;
   2236 	case WM_T_82541:
   2237 	case WM_T_82541_2:
   2238 	case WM_T_82547:
   2239 	case WM_T_82547_2:
   2240 		reg = CSR_READ(sc, WMREG_EECD);
   2241 		/*
   2242 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2243 		 * on 8254[17], so set flags and functios before calling it.
   2244 		 */
   2245 		sc->sc_flags |= WM_F_LOCK_EECD;
   2246 		sc->nvm.acquire = wm_get_eecd;
   2247 		sc->nvm.release = wm_put_eecd;
   2248 		if (reg & EECD_EE_TYPE) {
   2249 			/* SPI */
   2250 			sc->nvm.read = wm_nvm_read_spi;
   2251 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2252 			wm_nvm_set_addrbits_size_eecd(sc);
   2253 		} else {
   2254 			/* Microwire */
   2255 			sc->nvm.read = wm_nvm_read_uwire;
   2256 			if ((reg & EECD_EE_ABITS) != 0) {
   2257 				sc->sc_nvm_wordsize = 256;
   2258 				sc->sc_nvm_addrbits = 8;
   2259 			} else {
   2260 				sc->sc_nvm_wordsize = 64;
   2261 				sc->sc_nvm_addrbits = 6;
   2262 			}
   2263 		}
   2264 		break;
   2265 	case WM_T_82571:
   2266 	case WM_T_82572:
   2267 		/* SPI */
   2268 		sc->nvm.read = wm_nvm_read_eerd;
   2269 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2270 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2271 		wm_nvm_set_addrbits_size_eecd(sc);
   2272 		sc->phy.acquire = wm_get_swsm_semaphore;
   2273 		sc->phy.release = wm_put_swsm_semaphore;
   2274 		sc->nvm.acquire = wm_get_nvm_82571;
   2275 		sc->nvm.release = wm_put_nvm_82571;
   2276 		break;
   2277 	case WM_T_82573:
   2278 	case WM_T_82574:
   2279 	case WM_T_82583:
   2280 		sc->nvm.read = wm_nvm_read_eerd;
   2281 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2282 		if (sc->sc_type == WM_T_82573) {
   2283 			sc->phy.acquire = wm_get_swsm_semaphore;
   2284 			sc->phy.release = wm_put_swsm_semaphore;
   2285 			sc->nvm.acquire = wm_get_nvm_82571;
   2286 			sc->nvm.release = wm_put_nvm_82571;
   2287 		} else {
   2288 			/* Both PHY and NVM use the same semaphore. */
   2289 			sc->phy.acquire = sc->nvm.acquire
   2290 			    = wm_get_swfwhw_semaphore;
   2291 			sc->phy.release = sc->nvm.release
   2292 			    = wm_put_swfwhw_semaphore;
   2293 		}
   2294 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2295 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2296 			sc->sc_nvm_wordsize = 2048;
   2297 		} else {
   2298 			/* SPI */
   2299 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2300 			wm_nvm_set_addrbits_size_eecd(sc);
   2301 		}
   2302 		break;
   2303 	case WM_T_82575:
   2304 	case WM_T_82576:
   2305 	case WM_T_82580:
   2306 	case WM_T_I350:
   2307 	case WM_T_I354:
   2308 	case WM_T_80003:
   2309 		/* SPI */
   2310 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2311 		wm_nvm_set_addrbits_size_eecd(sc);
   2312 		if ((sc->sc_type == WM_T_80003)
   2313 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2314 			sc->nvm.read = wm_nvm_read_eerd;
   2315 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2316 		} else {
   2317 			sc->nvm.read = wm_nvm_read_spi;
   2318 			sc->sc_flags |= WM_F_LOCK_EECD;
   2319 		}
   2320 		sc->phy.acquire = wm_get_phy_82575;
   2321 		sc->phy.release = wm_put_phy_82575;
   2322 		sc->nvm.acquire = wm_get_nvm_80003;
   2323 		sc->nvm.release = wm_put_nvm_80003;
   2324 		break;
   2325 	case WM_T_ICH8:
   2326 	case WM_T_ICH9:
   2327 	case WM_T_ICH10:
   2328 	case WM_T_PCH:
   2329 	case WM_T_PCH2:
   2330 	case WM_T_PCH_LPT:
   2331 		sc->nvm.read = wm_nvm_read_ich8;
   2332 		/* FLASH */
   2333 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2334 		sc->sc_nvm_wordsize = 2048;
   2335 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2336 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2337 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2338 			aprint_error_dev(sc->sc_dev,
   2339 			    "can't map FLASH registers\n");
   2340 			goto out;
   2341 		}
   2342 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2343 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2344 		    ICH_FLASH_SECTOR_SIZE;
   2345 		sc->sc_ich8_flash_bank_size =
   2346 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2347 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2348 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2349 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2350 		sc->sc_flashreg_offset = 0;
   2351 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2352 		sc->phy.release = wm_put_swflag_ich8lan;
   2353 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2354 		sc->nvm.release = wm_put_nvm_ich8lan;
   2355 		break;
   2356 	case WM_T_PCH_SPT:
   2357 	case WM_T_PCH_CNP:
   2358 		sc->nvm.read = wm_nvm_read_spt;
   2359 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2360 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2361 		sc->sc_flasht = sc->sc_st;
   2362 		sc->sc_flashh = sc->sc_sh;
   2363 		sc->sc_ich8_flash_base = 0;
   2364 		sc->sc_nvm_wordsize =
   2365 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2366 		    * NVM_SIZE_MULTIPLIER;
   2367 		/* It is size in bytes, we want words */
   2368 		sc->sc_nvm_wordsize /= 2;
   2369 		/* Assume 2 banks */
   2370 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2371 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2372 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2373 		sc->phy.release = wm_put_swflag_ich8lan;
   2374 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2375 		sc->nvm.release = wm_put_nvm_ich8lan;
   2376 		break;
   2377 	case WM_T_I210:
   2378 	case WM_T_I211:
   2379 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2380 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2381 		if (wm_nvm_flash_presence_i210(sc)) {
   2382 			sc->nvm.read = wm_nvm_read_eerd;
   2383 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2384 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2385 			wm_nvm_set_addrbits_size_eecd(sc);
   2386 		} else {
   2387 			sc->nvm.read = wm_nvm_read_invm;
   2388 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2389 			sc->sc_nvm_wordsize = INVM_SIZE;
   2390 		}
   2391 		sc->phy.acquire = wm_get_phy_82575;
   2392 		sc->phy.release = wm_put_phy_82575;
   2393 		sc->nvm.acquire = wm_get_nvm_80003;
   2394 		sc->nvm.release = wm_put_nvm_80003;
   2395 		break;
   2396 	default:
   2397 		break;
   2398 	}
   2399 
   2400 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2401 	switch (sc->sc_type) {
   2402 	case WM_T_82571:
   2403 	case WM_T_82572:
   2404 		reg = CSR_READ(sc, WMREG_SWSM2);
   2405 		if ((reg & SWSM2_LOCK) == 0) {
   2406 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2407 			force_clear_smbi = true;
   2408 		} else
   2409 			force_clear_smbi = false;
   2410 		break;
   2411 	case WM_T_82573:
   2412 	case WM_T_82574:
   2413 	case WM_T_82583:
   2414 		force_clear_smbi = true;
   2415 		break;
   2416 	default:
   2417 		force_clear_smbi = false;
   2418 		break;
   2419 	}
   2420 	if (force_clear_smbi) {
   2421 		reg = CSR_READ(sc, WMREG_SWSM);
   2422 		if ((reg & SWSM_SMBI) != 0)
   2423 			aprint_error_dev(sc->sc_dev,
   2424 			    "Please update the Bootagent\n");
   2425 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2426 	}
   2427 
   2428 	/*
   2429 	 * Defer printing the EEPROM type until after verifying the checksum
   2430 	 * This allows the EEPROM type to be printed correctly in the case
   2431 	 * that no EEPROM is attached.
   2432 	 */
   2433 	/*
   2434 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2435 	 * this for later, so we can fail future reads from the EEPROM.
   2436 	 */
   2437 	if (wm_nvm_validate_checksum(sc)) {
   2438 		/*
   2439 		 * Read twice again because some PCI-e parts fail the
   2440 		 * first check due to the link being in sleep state.
   2441 		 */
   2442 		if (wm_nvm_validate_checksum(sc))
   2443 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2444 	}
   2445 
   2446 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2447 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2448 	else {
   2449 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2450 		    sc->sc_nvm_wordsize);
   2451 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2452 			aprint_verbose("iNVM");
   2453 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2454 			aprint_verbose("FLASH(HW)");
   2455 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2456 			aprint_verbose("FLASH");
   2457 		else {
   2458 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2459 				eetype = "SPI";
   2460 			else
   2461 				eetype = "MicroWire";
   2462 			aprint_verbose("(%d address bits) %s EEPROM",
   2463 			    sc->sc_nvm_addrbits, eetype);
   2464 		}
   2465 	}
   2466 	wm_nvm_version(sc);
   2467 	aprint_verbose("\n");
   2468 
   2469 	/*
   2470 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2471 	 * incorrect.
   2472 	 */
   2473 	wm_gmii_setup_phytype(sc, 0, 0);
   2474 
   2475 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2476 	switch (sc->sc_type) {
   2477 	case WM_T_ICH8:
   2478 	case WM_T_ICH9:
   2479 	case WM_T_ICH10:
   2480 	case WM_T_PCH:
   2481 	case WM_T_PCH2:
   2482 	case WM_T_PCH_LPT:
   2483 	case WM_T_PCH_SPT:
   2484 	case WM_T_PCH_CNP:
   2485 		apme_mask = WUC_APME;
   2486 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2487 		if ((eeprom_data & apme_mask) != 0)
   2488 			sc->sc_flags |= WM_F_WOL;
   2489 		break;
   2490 	default:
   2491 		break;
   2492 	}
   2493 
   2494 	/* Reset the chip to a known state. */
   2495 	wm_reset(sc);
   2496 
   2497 	/*
   2498 	 * Check for I21[01] PLL workaround.
   2499 	 *
   2500 	 * Three cases:
   2501 	 * a) Chip is I211.
   2502 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2503 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2504 	 */
   2505 	if (sc->sc_type == WM_T_I211)
   2506 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2507 	if (sc->sc_type == WM_T_I210) {
   2508 		if (!wm_nvm_flash_presence_i210(sc))
   2509 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2510 		else if ((sc->sc_nvm_ver_major < 3)
   2511 		    || ((sc->sc_nvm_ver_major == 3)
   2512 			&& (sc->sc_nvm_ver_minor < 25))) {
   2513 			aprint_verbose_dev(sc->sc_dev,
   2514 			    "ROM image version %d.%d is older than 3.25\n",
   2515 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2516 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2517 		}
   2518 	}
   2519 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2520 		wm_pll_workaround_i210(sc);
   2521 
   2522 	wm_get_wakeup(sc);
   2523 
   2524 	/* Non-AMT based hardware can now take control from firmware */
   2525 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2526 		wm_get_hw_control(sc);
   2527 
   2528 	/*
   2529 	 * Read the Ethernet address from the EEPROM, if not first found
   2530 	 * in device properties.
   2531 	 */
   2532 	ea = prop_dictionary_get(dict, "mac-address");
   2533 	if (ea != NULL) {
   2534 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2535 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2536 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2537 	} else {
   2538 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2539 			aprint_error_dev(sc->sc_dev,
   2540 			    "unable to read Ethernet address\n");
   2541 			goto out;
   2542 		}
   2543 	}
   2544 
   2545 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2546 	    ether_sprintf(enaddr));
   2547 
   2548 	/*
   2549 	 * Read the config info from the EEPROM, and set up various
   2550 	 * bits in the control registers based on their contents.
   2551 	 */
   2552 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2553 	if (pn != NULL) {
   2554 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2555 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2556 	} else {
   2557 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2558 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2559 			goto out;
   2560 		}
   2561 	}
   2562 
   2563 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2564 	if (pn != NULL) {
   2565 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2566 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2567 	} else {
   2568 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2569 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2570 			goto out;
   2571 		}
   2572 	}
   2573 
   2574 	/* check for WM_F_WOL */
   2575 	switch (sc->sc_type) {
   2576 	case WM_T_82542_2_0:
   2577 	case WM_T_82542_2_1:
   2578 	case WM_T_82543:
   2579 		/* dummy? */
   2580 		eeprom_data = 0;
   2581 		apme_mask = NVM_CFG3_APME;
   2582 		break;
   2583 	case WM_T_82544:
   2584 		apme_mask = NVM_CFG2_82544_APM_EN;
   2585 		eeprom_data = cfg2;
   2586 		break;
   2587 	case WM_T_82546:
   2588 	case WM_T_82546_3:
   2589 	case WM_T_82571:
   2590 	case WM_T_82572:
   2591 	case WM_T_82573:
   2592 	case WM_T_82574:
   2593 	case WM_T_82583:
   2594 	case WM_T_80003:
   2595 	case WM_T_82575:
   2596 	case WM_T_82576:
   2597 		apme_mask = NVM_CFG3_APME;
   2598 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2599 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2600 		break;
   2601 	case WM_T_82580:
   2602 	case WM_T_I350:
   2603 	case WM_T_I354:
   2604 	case WM_T_I210:
   2605 	case WM_T_I211:
   2606 		apme_mask = NVM_CFG3_APME;
   2607 		wm_nvm_read(sc,
   2608 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2609 		    1, &eeprom_data);
   2610 		break;
   2611 	case WM_T_ICH8:
   2612 	case WM_T_ICH9:
   2613 	case WM_T_ICH10:
   2614 	case WM_T_PCH:
   2615 	case WM_T_PCH2:
   2616 	case WM_T_PCH_LPT:
   2617 	case WM_T_PCH_SPT:
   2618 	case WM_T_PCH_CNP:
   2619 		/* Already checked before wm_reset () */
   2620 		apme_mask = eeprom_data = 0;
   2621 		break;
   2622 	default: /* XXX 82540 */
   2623 		apme_mask = NVM_CFG3_APME;
   2624 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2625 		break;
   2626 	}
   2627 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2628 	if ((eeprom_data & apme_mask) != 0)
   2629 		sc->sc_flags |= WM_F_WOL;
   2630 
   2631 	/*
   2632 	 * We have the eeprom settings, now apply the special cases
   2633 	 * where the eeprom may be wrong or the board won't support
   2634 	 * wake on lan on a particular port
   2635 	 */
   2636 	switch (sc->sc_pcidevid) {
   2637 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2638 		sc->sc_flags &= ~WM_F_WOL;
   2639 		break;
   2640 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2641 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2642 		/* Wake events only supported on port A for dual fiber
   2643 		 * regardless of eeprom setting */
   2644 		if (sc->sc_funcid == 1)
   2645 			sc->sc_flags &= ~WM_F_WOL;
   2646 		break;
   2647 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2648 		/* If quad port adapter, disable WoL on all but port A */
   2649 		if (sc->sc_funcid != 0)
   2650 			sc->sc_flags &= ~WM_F_WOL;
   2651 		break;
   2652 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2653 		/* Wake events only supported on port A for dual fiber
   2654 		 * regardless of eeprom setting */
   2655 		if (sc->sc_funcid == 1)
   2656 			sc->sc_flags &= ~WM_F_WOL;
   2657 		break;
   2658 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2659 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2660 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2661 		/* If quad port adapter, disable WoL on all but port A */
   2662 		if (sc->sc_funcid != 0)
   2663 			sc->sc_flags &= ~WM_F_WOL;
   2664 		break;
   2665 	}
   2666 
   2667 	if (sc->sc_type >= WM_T_82575) {
   2668 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2669 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2670 			    nvmword);
   2671 			if ((sc->sc_type == WM_T_82575) ||
   2672 			    (sc->sc_type == WM_T_82576)) {
   2673 				/* Check NVM for autonegotiation */
   2674 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2675 				    != 0)
   2676 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2677 			}
   2678 			if ((sc->sc_type == WM_T_82575) ||
   2679 			    (sc->sc_type == WM_T_I350)) {
   2680 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2681 					sc->sc_flags |= WM_F_MAS;
   2682 			}
   2683 		}
   2684 	}
   2685 
   2686 	/*
   2687 	 * XXX need special handling for some multiple port cards
   2688 	 * to disable a paticular port.
   2689 	 */
   2690 
   2691 	if (sc->sc_type >= WM_T_82544) {
   2692 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2693 		if (pn != NULL) {
   2694 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2695 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2696 		} else {
   2697 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2698 				aprint_error_dev(sc->sc_dev,
   2699 				    "unable to read SWDPIN\n");
   2700 				goto out;
   2701 			}
   2702 		}
   2703 	}
   2704 
   2705 	if (cfg1 & NVM_CFG1_ILOS)
   2706 		sc->sc_ctrl |= CTRL_ILOS;
   2707 
   2708 	/*
   2709 	 * XXX
   2710 	 * This code isn't correct because pin 2 and 3 are located
   2711 	 * in different position on newer chips. Check all datasheet.
   2712 	 *
   2713 	 * Until resolve this problem, check if a chip < 82580
   2714 	 */
   2715 	if (sc->sc_type <= WM_T_82580) {
   2716 		if (sc->sc_type >= WM_T_82544) {
   2717 			sc->sc_ctrl |=
   2718 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2719 			    CTRL_SWDPIO_SHIFT;
   2720 			sc->sc_ctrl |=
   2721 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2722 			    CTRL_SWDPINS_SHIFT;
   2723 		} else {
   2724 			sc->sc_ctrl |=
   2725 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2726 			    CTRL_SWDPIO_SHIFT;
   2727 		}
   2728 	}
   2729 
   2730 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2731 		wm_nvm_read(sc,
   2732 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2733 		    1, &nvmword);
   2734 		if (nvmword & NVM_CFG3_ILOS)
   2735 			sc->sc_ctrl |= CTRL_ILOS;
   2736 	}
   2737 
   2738 #if 0
   2739 	if (sc->sc_type >= WM_T_82544) {
   2740 		if (cfg1 & NVM_CFG1_IPS0)
   2741 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2742 		if (cfg1 & NVM_CFG1_IPS1)
   2743 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2744 		sc->sc_ctrl_ext |=
   2745 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2746 		    CTRL_EXT_SWDPIO_SHIFT;
   2747 		sc->sc_ctrl_ext |=
   2748 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2749 		    CTRL_EXT_SWDPINS_SHIFT;
   2750 	} else {
   2751 		sc->sc_ctrl_ext |=
   2752 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2753 		    CTRL_EXT_SWDPIO_SHIFT;
   2754 	}
   2755 #endif
   2756 
   2757 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2758 #if 0
   2759 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2760 #endif
   2761 
   2762 	if (sc->sc_type == WM_T_PCH) {
   2763 		uint16_t val;
   2764 
   2765 		/* Save the NVM K1 bit setting */
   2766 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2767 
   2768 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2769 			sc->sc_nvm_k1_enabled = 1;
   2770 		else
   2771 			sc->sc_nvm_k1_enabled = 0;
   2772 	}
   2773 
   2774 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2775 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2776 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2777 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2778 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2779 	    || sc->sc_type == WM_T_82573
   2780 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2781 		/* Copper only */
   2782 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2783 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2784 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2785 	    || (sc->sc_type ==WM_T_I211)) {
   2786 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2787 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2788 		switch (link_mode) {
   2789 		case CTRL_EXT_LINK_MODE_1000KX:
   2790 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2791 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2792 			break;
   2793 		case CTRL_EXT_LINK_MODE_SGMII:
   2794 			if (wm_sgmii_uses_mdio(sc)) {
   2795 				aprint_normal_dev(sc->sc_dev,
   2796 				    "SGMII(MDIO)\n");
   2797 				sc->sc_flags |= WM_F_SGMII;
   2798 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2799 				break;
   2800 			}
   2801 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2802 			/*FALLTHROUGH*/
   2803 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2804 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2805 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2806 				if (link_mode
   2807 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2808 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2809 					sc->sc_flags |= WM_F_SGMII;
   2810 					aprint_verbose_dev(sc->sc_dev,
   2811 					    "SGMII\n");
   2812 				} else {
   2813 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2814 					aprint_verbose_dev(sc->sc_dev,
   2815 					    "SERDES\n");
   2816 				}
   2817 				break;
   2818 			}
   2819 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2820 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2821 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2822 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2823 				sc->sc_flags |= WM_F_SGMII;
   2824 			}
   2825 			/* Do not change link mode for 100BaseFX */
   2826 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2827 				break;
   2828 
   2829 			/* Change current link mode setting */
   2830 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2831 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2832 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2833 			else
   2834 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2835 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2836 			break;
   2837 		case CTRL_EXT_LINK_MODE_GMII:
   2838 		default:
   2839 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2840 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2841 			break;
   2842 		}
   2843 
   2844 		reg &= ~CTRL_EXT_I2C_ENA;
   2845 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2846 			reg |= CTRL_EXT_I2C_ENA;
   2847 		else
   2848 			reg &= ~CTRL_EXT_I2C_ENA;
   2849 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2850 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   2851 			wm_gmii_setup_phytype(sc, 0, 0);
   2852 			wm_reset_mdicnfg_82580(sc);
   2853 		}
   2854 	} else if (sc->sc_type < WM_T_82543 ||
   2855 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2856 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2857 			aprint_error_dev(sc->sc_dev,
   2858 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2859 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2860 		}
   2861 	} else {
   2862 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2863 			aprint_error_dev(sc->sc_dev,
   2864 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2865 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2866 		}
   2867 	}
   2868 
   2869 	if (sc->sc_type >= WM_T_PCH2)
   2870 		sc->sc_flags |= WM_F_EEE;
   2871 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2872 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2873 		/* XXX: Need special handling for I354. (not yet) */
   2874 		if (sc->sc_type != WM_T_I354)
   2875 			sc->sc_flags |= WM_F_EEE;
   2876 	}
   2877 
   2878 	/* Set device properties (macflags) */
   2879 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2880 
   2881 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2882 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2883 
   2884 #ifdef WM_MPSAFE
   2885 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2886 #else
   2887 	sc->sc_core_lock = NULL;
   2888 #endif
   2889 
   2890 	/* Initialize the media structures accordingly. */
   2891 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2892 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2893 	else
   2894 		wm_tbi_mediainit(sc); /* All others */
   2895 
   2896 	ifp = &sc->sc_ethercom.ec_if;
   2897 	xname = device_xname(sc->sc_dev);
   2898 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2899 	ifp->if_softc = sc;
   2900 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2901 #ifdef WM_MPSAFE
   2902 	ifp->if_extflags = IFEF_MPSAFE;
   2903 #endif
   2904 	ifp->if_ioctl = wm_ioctl;
   2905 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2906 		ifp->if_start = wm_nq_start;
   2907 		/*
   2908 		 * When the number of CPUs is one and the controller can use
   2909 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2910 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2911 		 * and the other is used for link status changing.
   2912 		 * In this situation, wm_nq_transmit() is disadvantageous
   2913 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2914 		 */
   2915 		if (wm_is_using_multiqueue(sc))
   2916 			ifp->if_transmit = wm_nq_transmit;
   2917 	} else {
   2918 		ifp->if_start = wm_start;
   2919 		/*
   2920 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2921 		 */
   2922 		if (wm_is_using_multiqueue(sc))
   2923 			ifp->if_transmit = wm_transmit;
   2924 	}
   2925 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2926 	ifp->if_init = wm_init;
   2927 	ifp->if_stop = wm_stop;
   2928 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2929 	IFQ_SET_READY(&ifp->if_snd);
   2930 
   2931 	/* Check for jumbo frame */
   2932 	switch (sc->sc_type) {
   2933 	case WM_T_82573:
   2934 		/* XXX limited to 9234 if ASPM is disabled */
   2935 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2936 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2937 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2938 		break;
   2939 	case WM_T_82571:
   2940 	case WM_T_82572:
   2941 	case WM_T_82574:
   2942 	case WM_T_82583:
   2943 	case WM_T_82575:
   2944 	case WM_T_82576:
   2945 	case WM_T_82580:
   2946 	case WM_T_I350:
   2947 	case WM_T_I354:
   2948 	case WM_T_I210:
   2949 	case WM_T_I211:
   2950 	case WM_T_80003:
   2951 	case WM_T_ICH9:
   2952 	case WM_T_ICH10:
   2953 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2954 	case WM_T_PCH_LPT:
   2955 	case WM_T_PCH_SPT:
   2956 	case WM_T_PCH_CNP:
   2957 		/* XXX limited to 9234 */
   2958 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2959 		break;
   2960 	case WM_T_PCH:
   2961 		/* XXX limited to 4096 */
   2962 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2963 		break;
   2964 	case WM_T_82542_2_0:
   2965 	case WM_T_82542_2_1:
   2966 	case WM_T_ICH8:
   2967 		/* No support for jumbo frame */
   2968 		break;
   2969 	default:
   2970 		/* ETHER_MAX_LEN_JUMBO */
   2971 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2972 		break;
   2973 	}
   2974 
   2975 	/* If we're a i82543 or greater, we can support VLANs. */
   2976 	if (sc->sc_type >= WM_T_82543) {
   2977 		sc->sc_ethercom.ec_capabilities |=
   2978 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2979 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   2980 	}
   2981 
   2982 	if ((sc->sc_flags & WM_F_EEE) != 0)
   2983 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   2984 
   2985 	/*
   2986 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2987 	 * on i82543 and later.
   2988 	 */
   2989 	if (sc->sc_type >= WM_T_82543) {
   2990 		ifp->if_capabilities |=
   2991 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2992 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2993 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2994 		    IFCAP_CSUM_TCPv6_Tx |
   2995 		    IFCAP_CSUM_UDPv6_Tx;
   2996 	}
   2997 
   2998 	/*
   2999 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3000 	 *
   3001 	 *	82541GI (8086:1076) ... no
   3002 	 *	82572EI (8086:10b9) ... yes
   3003 	 */
   3004 	if (sc->sc_type >= WM_T_82571) {
   3005 		ifp->if_capabilities |=
   3006 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3007 	}
   3008 
   3009 	/*
   3010 	 * If we're a i82544 or greater (except i82547), we can do
   3011 	 * TCP segmentation offload.
   3012 	 */
   3013 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   3014 		ifp->if_capabilities |= IFCAP_TSOv4;
   3015 	}
   3016 
   3017 	if (sc->sc_type >= WM_T_82571) {
   3018 		ifp->if_capabilities |= IFCAP_TSOv6;
   3019 	}
   3020 
   3021 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3022 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3023 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3024 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3025 
   3026 	/* Attach the interface. */
   3027 	error = if_initialize(ifp);
   3028 	if (error != 0) {
   3029 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   3030 		    error);
   3031 		return; /* Error */
   3032 	}
   3033 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3034 	ether_ifattach(ifp, enaddr);
   3035 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3036 	if_register(ifp);
   3037 
   3038 #ifdef WM_EVENT_COUNTERS
   3039 	/* Attach event counters. */
   3040 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3041 	    NULL, xname, "linkintr");
   3042 
   3043 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3044 	    NULL, xname, "tx_xoff");
   3045 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3046 	    NULL, xname, "tx_xon");
   3047 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3048 	    NULL, xname, "rx_xoff");
   3049 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3050 	    NULL, xname, "rx_xon");
   3051 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3052 	    NULL, xname, "rx_macctl");
   3053 #endif /* WM_EVENT_COUNTERS */
   3054 
   3055 	sc->sc_txrx_use_workqueue = false;
   3056 
   3057 	wm_init_sysctls(sc);
   3058 
   3059 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3060 		pmf_class_network_register(self, ifp);
   3061 	else
   3062 		aprint_error_dev(self, "couldn't establish power handler\n");
   3063 
   3064 	sc->sc_flags |= WM_F_ATTACHED;
   3065 out:
   3066 	return;
   3067 }
   3068 
   3069 /* The detach function (ca_detach) */
   3070 static int
   3071 wm_detach(device_t self, int flags __unused)
   3072 {
   3073 	struct wm_softc *sc = device_private(self);
   3074 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3075 	int i;
   3076 
   3077 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3078 		return 0;
   3079 
   3080 	/* Stop the interface. Callouts are stopped in it. */
   3081 	wm_stop(ifp, 1);
   3082 
   3083 	pmf_device_deregister(self);
   3084 
   3085 	sysctl_teardown(&sc->sc_sysctllog);
   3086 
   3087 #ifdef WM_EVENT_COUNTERS
   3088 	evcnt_detach(&sc->sc_ev_linkintr);
   3089 
   3090 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3091 	evcnt_detach(&sc->sc_ev_tx_xon);
   3092 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3093 	evcnt_detach(&sc->sc_ev_rx_xon);
   3094 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3095 #endif /* WM_EVENT_COUNTERS */
   3096 
   3097 	/* Tell the firmware about the release */
   3098 	WM_CORE_LOCK(sc);
   3099 	wm_release_manageability(sc);
   3100 	wm_release_hw_control(sc);
   3101 	wm_enable_wakeup(sc);
   3102 	WM_CORE_UNLOCK(sc);
   3103 
   3104 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3105 
   3106 	ether_ifdetach(ifp);
   3107 	if_detach(ifp);
   3108 	if_percpuq_destroy(sc->sc_ipq);
   3109 
   3110 	/* Delete all remaining media. */
   3111 	ifmedia_fini(&sc->sc_mii.mii_media);
   3112 
   3113 	/* Unload RX dmamaps and free mbufs */
   3114 	for (i = 0; i < sc->sc_nqueues; i++) {
   3115 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3116 		mutex_enter(rxq->rxq_lock);
   3117 		wm_rxdrain(rxq);
   3118 		mutex_exit(rxq->rxq_lock);
   3119 	}
   3120 	/* Must unlock here */
   3121 
   3122 	/* Disestablish the interrupt handler */
   3123 	for (i = 0; i < sc->sc_nintrs; i++) {
   3124 		if (sc->sc_ihs[i] != NULL) {
   3125 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3126 			sc->sc_ihs[i] = NULL;
   3127 		}
   3128 	}
   3129 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3130 
   3131 	/* wm_stop() ensure workqueue is stopped. */
   3132 	workqueue_destroy(sc->sc_queue_wq);
   3133 
   3134 	for (i = 0; i < sc->sc_nqueues; i++)
   3135 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3136 
   3137 	wm_free_txrx_queues(sc);
   3138 
   3139 	/* Unmap the registers */
   3140 	if (sc->sc_ss) {
   3141 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3142 		sc->sc_ss = 0;
   3143 	}
   3144 	if (sc->sc_ios) {
   3145 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3146 		sc->sc_ios = 0;
   3147 	}
   3148 	if (sc->sc_flashs) {
   3149 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3150 		sc->sc_flashs = 0;
   3151 	}
   3152 
   3153 	if (sc->sc_core_lock)
   3154 		mutex_obj_free(sc->sc_core_lock);
   3155 	if (sc->sc_ich_phymtx)
   3156 		mutex_obj_free(sc->sc_ich_phymtx);
   3157 	if (sc->sc_ich_nvmmtx)
   3158 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3159 
   3160 	return 0;
   3161 }
   3162 
   3163 static bool
   3164 wm_suspend(device_t self, const pmf_qual_t *qual)
   3165 {
   3166 	struct wm_softc *sc = device_private(self);
   3167 
   3168 	wm_release_manageability(sc);
   3169 	wm_release_hw_control(sc);
   3170 	wm_enable_wakeup(sc);
   3171 
   3172 	return true;
   3173 }
   3174 
   3175 static bool
   3176 wm_resume(device_t self, const pmf_qual_t *qual)
   3177 {
   3178 	struct wm_softc *sc = device_private(self);
   3179 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3180 	pcireg_t reg;
   3181 	char buf[256];
   3182 
   3183 	reg = CSR_READ(sc, WMREG_WUS);
   3184 	if (reg != 0) {
   3185 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3186 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3187 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3188 	}
   3189 
   3190 	if (sc->sc_type >= WM_T_PCH2)
   3191 		wm_resume_workarounds_pchlan(sc);
   3192 	if ((ifp->if_flags & IFF_UP) == 0) {
   3193 		wm_reset(sc);
   3194 		/* Non-AMT based hardware can now take control from firmware */
   3195 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3196 			wm_get_hw_control(sc);
   3197 		wm_init_manageability(sc);
   3198 	} else {
   3199 		/*
   3200 		 * We called pmf_class_network_register(), so if_init() is
   3201 		 * automatically called when IFF_UP. wm_reset(),
   3202 		 * wm_get_hw_control() and wm_init_manageability() are called
   3203 		 * via wm_init().
   3204 		 */
   3205 	}
   3206 
   3207 	return true;
   3208 }
   3209 
   3210 /*
   3211  * wm_watchdog:		[ifnet interface function]
   3212  *
   3213  *	Watchdog timer handler.
   3214  */
   3215 static void
   3216 wm_watchdog(struct ifnet *ifp)
   3217 {
   3218 	int qid;
   3219 	struct wm_softc *sc = ifp->if_softc;
   3220 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3221 
   3222 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3223 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3224 
   3225 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3226 	}
   3227 
   3228 	/* IF any of queues hanged up, reset the interface. */
   3229 	if (hang_queue != 0) {
   3230 		(void)wm_init(ifp);
   3231 
   3232 		/*
   3233 		 * There are still some upper layer processing which call
   3234 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3235 		 */
   3236 		/* Try to get more packets going. */
   3237 		ifp->if_start(ifp);
   3238 	}
   3239 }
   3240 
   3241 
   3242 static void
   3243 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3244 {
   3245 
   3246 	mutex_enter(txq->txq_lock);
   3247 	if (txq->txq_sending &&
   3248 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3249 		wm_watchdog_txq_locked(ifp, txq, hang);
   3250 
   3251 	mutex_exit(txq->txq_lock);
   3252 }
   3253 
   3254 static void
   3255 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3256     uint16_t *hang)
   3257 {
   3258 	struct wm_softc *sc = ifp->if_softc;
   3259 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3260 
   3261 	KASSERT(mutex_owned(txq->txq_lock));
   3262 
   3263 	/*
   3264 	 * Since we're using delayed interrupts, sweep up
   3265 	 * before we report an error.
   3266 	 */
   3267 	wm_txeof(txq, UINT_MAX);
   3268 
   3269 	if (txq->txq_sending)
   3270 		*hang |= __BIT(wmq->wmq_id);
   3271 
   3272 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3273 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3274 		    device_xname(sc->sc_dev));
   3275 	} else {
   3276 #ifdef WM_DEBUG
   3277 		int i, j;
   3278 		struct wm_txsoft *txs;
   3279 #endif
   3280 		log(LOG_ERR,
   3281 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3282 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3283 		    txq->txq_next);
   3284 		if_statinc(ifp, if_oerrors);
   3285 #ifdef WM_DEBUG
   3286 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3287 		    i = WM_NEXTTXS(txq, i)) {
   3288 			txs = &txq->txq_soft[i];
   3289 			printf("txs %d tx %d -> %d\n",
   3290 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3291 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3292 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3293 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3294 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3295 					printf("\t %#08x%08x\n",
   3296 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3297 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3298 				} else {
   3299 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3300 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3301 					    txq->txq_descs[j].wtx_addr.wa_low);
   3302 					printf("\t %#04x%02x%02x%08x\n",
   3303 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3304 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3305 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3306 					    txq->txq_descs[j].wtx_cmdlen);
   3307 				}
   3308 				if (j == txs->txs_lastdesc)
   3309 					break;
   3310 			}
   3311 		}
   3312 #endif
   3313 	}
   3314 }
   3315 
   3316 /*
   3317  * wm_tick:
   3318  *
   3319  *	One second timer, used to check link status, sweep up
   3320  *	completed transmit jobs, etc.
   3321  */
   3322 static void
   3323 wm_tick(void *arg)
   3324 {
   3325 	struct wm_softc *sc = arg;
   3326 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3327 #ifndef WM_MPSAFE
   3328 	int s = splnet();
   3329 #endif
   3330 
   3331 	WM_CORE_LOCK(sc);
   3332 
   3333 	if (sc->sc_core_stopping) {
   3334 		WM_CORE_UNLOCK(sc);
   3335 #ifndef WM_MPSAFE
   3336 		splx(s);
   3337 #endif
   3338 		return;
   3339 	}
   3340 
   3341 	if (sc->sc_type >= WM_T_82542_2_1) {
   3342 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3343 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3344 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3345 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3346 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3347 	}
   3348 
   3349 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3350 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
   3351 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
   3352 	    + CSR_READ(sc, WMREG_CRCERRS)
   3353 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3354 	    + CSR_READ(sc, WMREG_SYMERRC)
   3355 	    + CSR_READ(sc, WMREG_RXERRC)
   3356 	    + CSR_READ(sc, WMREG_SEC)
   3357 	    + CSR_READ(sc, WMREG_CEXTERR)
   3358 	    + CSR_READ(sc, WMREG_RLEC));
   3359 	/*
   3360 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3361 	 * memory. It does not mean the number of dropped packet. Because
   3362 	 * ethernet controller can receive packets in such case if there is
   3363 	 * space in phy's FIFO.
   3364 	 *
   3365 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3366 	 * own EVCNT instead of if_iqdrops.
   3367 	 */
   3368 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
   3369 	IF_STAT_PUTREF(ifp);
   3370 
   3371 	if (sc->sc_flags & WM_F_HAS_MII)
   3372 		mii_tick(&sc->sc_mii);
   3373 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3374 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3375 		wm_serdes_tick(sc);
   3376 	else
   3377 		wm_tbi_tick(sc);
   3378 
   3379 	WM_CORE_UNLOCK(sc);
   3380 
   3381 	wm_watchdog(ifp);
   3382 
   3383 	callout_schedule(&sc->sc_tick_ch, hz);
   3384 }
   3385 
   3386 static int
   3387 wm_ifflags_cb(struct ethercom *ec)
   3388 {
   3389 	struct ifnet *ifp = &ec->ec_if;
   3390 	struct wm_softc *sc = ifp->if_softc;
   3391 	u_short iffchange;
   3392 	int ecchange;
   3393 	bool needreset = false;
   3394 	int rc = 0;
   3395 
   3396 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3397 		device_xname(sc->sc_dev), __func__));
   3398 
   3399 	WM_CORE_LOCK(sc);
   3400 
   3401 	/*
   3402 	 * Check for if_flags.
   3403 	 * Main usage is to prevent linkdown when opening bpf.
   3404 	 */
   3405 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3406 	sc->sc_if_flags = ifp->if_flags;
   3407 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3408 		needreset = true;
   3409 		goto ec;
   3410 	}
   3411 
   3412 	/* iff related updates */
   3413 	if ((iffchange & IFF_PROMISC) != 0)
   3414 		wm_set_filter(sc);
   3415 
   3416 	wm_set_vlan(sc);
   3417 
   3418 ec:
   3419 	/* Check for ec_capenable. */
   3420 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3421 	sc->sc_ec_capenable = ec->ec_capenable;
   3422 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3423 		needreset = true;
   3424 		goto out;
   3425 	}
   3426 
   3427 	/* ec related updates */
   3428 	wm_set_eee(sc);
   3429 
   3430 out:
   3431 	if (needreset)
   3432 		rc = ENETRESET;
   3433 	WM_CORE_UNLOCK(sc);
   3434 
   3435 	return rc;
   3436 }
   3437 
   3438 /*
   3439  * wm_ioctl:		[ifnet interface function]
   3440  *
   3441  *	Handle control requests from the operator.
   3442  */
   3443 static int
   3444 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3445 {
   3446 	struct wm_softc *sc = ifp->if_softc;
   3447 	struct ifreq *ifr = (struct ifreq *)data;
   3448 	struct ifaddr *ifa = (struct ifaddr *)data;
   3449 	struct sockaddr_dl *sdl;
   3450 	int s, error;
   3451 
   3452 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3453 		device_xname(sc->sc_dev), __func__));
   3454 
   3455 #ifndef WM_MPSAFE
   3456 	s = splnet();
   3457 #endif
   3458 	switch (cmd) {
   3459 	case SIOCSIFMEDIA:
   3460 		WM_CORE_LOCK(sc);
   3461 		/* Flow control requires full-duplex mode. */
   3462 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3463 		    (ifr->ifr_media & IFM_FDX) == 0)
   3464 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3465 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3466 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3467 				/* We can do both TXPAUSE and RXPAUSE. */
   3468 				ifr->ifr_media |=
   3469 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3470 			}
   3471 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3472 		}
   3473 		WM_CORE_UNLOCK(sc);
   3474 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3475 		break;
   3476 	case SIOCINITIFADDR:
   3477 		WM_CORE_LOCK(sc);
   3478 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3479 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3480 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3481 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3482 			/* Unicast address is the first multicast entry */
   3483 			wm_set_filter(sc);
   3484 			error = 0;
   3485 			WM_CORE_UNLOCK(sc);
   3486 			break;
   3487 		}
   3488 		WM_CORE_UNLOCK(sc);
   3489 		/*FALLTHROUGH*/
   3490 	default:
   3491 #ifdef WM_MPSAFE
   3492 		s = splnet();
   3493 #endif
   3494 		/* It may call wm_start, so unlock here */
   3495 		error = ether_ioctl(ifp, cmd, data);
   3496 #ifdef WM_MPSAFE
   3497 		splx(s);
   3498 #endif
   3499 		if (error != ENETRESET)
   3500 			break;
   3501 
   3502 		error = 0;
   3503 
   3504 		if (cmd == SIOCSIFCAP)
   3505 			error = (*ifp->if_init)(ifp);
   3506 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3507 			;
   3508 		else if (ifp->if_flags & IFF_RUNNING) {
   3509 			/*
   3510 			 * Multicast list has changed; set the hardware filter
   3511 			 * accordingly.
   3512 			 */
   3513 			WM_CORE_LOCK(sc);
   3514 			wm_set_filter(sc);
   3515 			WM_CORE_UNLOCK(sc);
   3516 		}
   3517 		break;
   3518 	}
   3519 
   3520 #ifndef WM_MPSAFE
   3521 	splx(s);
   3522 #endif
   3523 	return error;
   3524 }
   3525 
   3526 /* MAC address related */
   3527 
   3528 /*
   3529  * Get the offset of MAC address and return it.
   3530  * If error occured, use offset 0.
   3531  */
   3532 static uint16_t
   3533 wm_check_alt_mac_addr(struct wm_softc *sc)
   3534 {
   3535 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3536 	uint16_t offset = NVM_OFF_MACADDR;
   3537 
   3538 	/* Try to read alternative MAC address pointer */
   3539 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3540 		return 0;
   3541 
   3542 	/* Check pointer if it's valid or not. */
   3543 	if ((offset == 0x0000) || (offset == 0xffff))
   3544 		return 0;
   3545 
   3546 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3547 	/*
   3548 	 * Check whether alternative MAC address is valid or not.
   3549 	 * Some cards have non 0xffff pointer but those don't use
   3550 	 * alternative MAC address in reality.
   3551 	 *
   3552 	 * Check whether the broadcast bit is set or not.
   3553 	 */
   3554 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3555 		if (((myea[0] & 0xff) & 0x01) == 0)
   3556 			return offset; /* Found */
   3557 
   3558 	/* Not found */
   3559 	return 0;
   3560 }
   3561 
   3562 static int
   3563 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3564 {
   3565 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3566 	uint16_t offset = NVM_OFF_MACADDR;
   3567 	int do_invert = 0;
   3568 
   3569 	switch (sc->sc_type) {
   3570 	case WM_T_82580:
   3571 	case WM_T_I350:
   3572 	case WM_T_I354:
   3573 		/* EEPROM Top Level Partitioning */
   3574 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3575 		break;
   3576 	case WM_T_82571:
   3577 	case WM_T_82575:
   3578 	case WM_T_82576:
   3579 	case WM_T_80003:
   3580 	case WM_T_I210:
   3581 	case WM_T_I211:
   3582 		offset = wm_check_alt_mac_addr(sc);
   3583 		if (offset == 0)
   3584 			if ((sc->sc_funcid & 0x01) == 1)
   3585 				do_invert = 1;
   3586 		break;
   3587 	default:
   3588 		if ((sc->sc_funcid & 0x01) == 1)
   3589 			do_invert = 1;
   3590 		break;
   3591 	}
   3592 
   3593 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3594 		goto bad;
   3595 
   3596 	enaddr[0] = myea[0] & 0xff;
   3597 	enaddr[1] = myea[0] >> 8;
   3598 	enaddr[2] = myea[1] & 0xff;
   3599 	enaddr[3] = myea[1] >> 8;
   3600 	enaddr[4] = myea[2] & 0xff;
   3601 	enaddr[5] = myea[2] >> 8;
   3602 
   3603 	/*
   3604 	 * Toggle the LSB of the MAC address on the second port
   3605 	 * of some dual port cards.
   3606 	 */
   3607 	if (do_invert != 0)
   3608 		enaddr[5] ^= 1;
   3609 
   3610 	return 0;
   3611 
   3612  bad:
   3613 	return -1;
   3614 }
   3615 
   3616 /*
   3617  * wm_set_ral:
   3618  *
   3619  *	Set an entery in the receive address list.
   3620  */
   3621 static void
   3622 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3623 {
   3624 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3625 	uint32_t wlock_mac;
   3626 	int rv;
   3627 
   3628 	if (enaddr != NULL) {
   3629 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3630 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3631 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3632 		ral_hi |= RAL_AV;
   3633 	} else {
   3634 		ral_lo = 0;
   3635 		ral_hi = 0;
   3636 	}
   3637 
   3638 	switch (sc->sc_type) {
   3639 	case WM_T_82542_2_0:
   3640 	case WM_T_82542_2_1:
   3641 	case WM_T_82543:
   3642 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3643 		CSR_WRITE_FLUSH(sc);
   3644 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3645 		CSR_WRITE_FLUSH(sc);
   3646 		break;
   3647 	case WM_T_PCH2:
   3648 	case WM_T_PCH_LPT:
   3649 	case WM_T_PCH_SPT:
   3650 	case WM_T_PCH_CNP:
   3651 		if (idx == 0) {
   3652 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3653 			CSR_WRITE_FLUSH(sc);
   3654 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3655 			CSR_WRITE_FLUSH(sc);
   3656 			return;
   3657 		}
   3658 		if (sc->sc_type != WM_T_PCH2) {
   3659 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3660 			    FWSM_WLOCK_MAC);
   3661 			addrl = WMREG_SHRAL(idx - 1);
   3662 			addrh = WMREG_SHRAH(idx - 1);
   3663 		} else {
   3664 			wlock_mac = 0;
   3665 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3666 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3667 		}
   3668 
   3669 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3670 			rv = wm_get_swflag_ich8lan(sc);
   3671 			if (rv != 0)
   3672 				return;
   3673 			CSR_WRITE(sc, addrl, ral_lo);
   3674 			CSR_WRITE_FLUSH(sc);
   3675 			CSR_WRITE(sc, addrh, ral_hi);
   3676 			CSR_WRITE_FLUSH(sc);
   3677 			wm_put_swflag_ich8lan(sc);
   3678 		}
   3679 
   3680 		break;
   3681 	default:
   3682 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3683 		CSR_WRITE_FLUSH(sc);
   3684 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3685 		CSR_WRITE_FLUSH(sc);
   3686 		break;
   3687 	}
   3688 }
   3689 
   3690 /*
   3691  * wm_mchash:
   3692  *
   3693  *	Compute the hash of the multicast address for the 4096-bit
   3694  *	multicast filter.
   3695  */
   3696 static uint32_t
   3697 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3698 {
   3699 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3700 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3701 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3702 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3703 	uint32_t hash;
   3704 
   3705 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3706 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3707 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3708 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3709 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3710 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3711 		return (hash & 0x3ff);
   3712 	}
   3713 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3714 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3715 
   3716 	return (hash & 0xfff);
   3717 }
   3718 
   3719 /*
   3720  *
   3721  *
   3722  */
   3723 static int
   3724 wm_rar_count(struct wm_softc *sc)
   3725 {
   3726 	int size;
   3727 
   3728 	switch (sc->sc_type) {
   3729 	case WM_T_ICH8:
   3730 		size = WM_RAL_TABSIZE_ICH8 -1;
   3731 		break;
   3732 	case WM_T_ICH9:
   3733 	case WM_T_ICH10:
   3734 	case WM_T_PCH:
   3735 		size = WM_RAL_TABSIZE_ICH8;
   3736 		break;
   3737 	case WM_T_PCH2:
   3738 		size = WM_RAL_TABSIZE_PCH2;
   3739 		break;
   3740 	case WM_T_PCH_LPT:
   3741 	case WM_T_PCH_SPT:
   3742 	case WM_T_PCH_CNP:
   3743 		size = WM_RAL_TABSIZE_PCH_LPT;
   3744 		break;
   3745 	case WM_T_82575:
   3746 	case WM_T_I210:
   3747 	case WM_T_I211:
   3748 		size = WM_RAL_TABSIZE_82575;
   3749 		break;
   3750 	case WM_T_82576:
   3751 	case WM_T_82580:
   3752 		size = WM_RAL_TABSIZE_82576;
   3753 		break;
   3754 	case WM_T_I350:
   3755 	case WM_T_I354:
   3756 		size = WM_RAL_TABSIZE_I350;
   3757 		break;
   3758 	default:
   3759 		size = WM_RAL_TABSIZE;
   3760 	}
   3761 
   3762 	return size;
   3763 }
   3764 
   3765 /*
   3766  * wm_set_filter:
   3767  *
   3768  *	Set up the receive filter.
   3769  */
   3770 static void
   3771 wm_set_filter(struct wm_softc *sc)
   3772 {
   3773 	struct ethercom *ec = &sc->sc_ethercom;
   3774 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3775 	struct ether_multi *enm;
   3776 	struct ether_multistep step;
   3777 	bus_addr_t mta_reg;
   3778 	uint32_t hash, reg, bit;
   3779 	int i, size, ralmax;
   3780 
   3781 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3782 		device_xname(sc->sc_dev), __func__));
   3783 
   3784 	if (sc->sc_type >= WM_T_82544)
   3785 		mta_reg = WMREG_CORDOVA_MTA;
   3786 	else
   3787 		mta_reg = WMREG_MTA;
   3788 
   3789 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3790 
   3791 	if (ifp->if_flags & IFF_BROADCAST)
   3792 		sc->sc_rctl |= RCTL_BAM;
   3793 	if (ifp->if_flags & IFF_PROMISC) {
   3794 		sc->sc_rctl |= RCTL_UPE;
   3795 		ETHER_LOCK(ec);
   3796 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3797 		ETHER_UNLOCK(ec);
   3798 		goto allmulti;
   3799 	}
   3800 
   3801 	/*
   3802 	 * Set the station address in the first RAL slot, and
   3803 	 * clear the remaining slots.
   3804 	 */
   3805 	size = wm_rar_count(sc);
   3806 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3807 
   3808 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3809 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3810 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3811 		switch (i) {
   3812 		case 0:
   3813 			/* We can use all entries */
   3814 			ralmax = size;
   3815 			break;
   3816 		case 1:
   3817 			/* Only RAR[0] */
   3818 			ralmax = 1;
   3819 			break;
   3820 		default:
   3821 			/* Available SHRA + RAR[0] */
   3822 			ralmax = i + 1;
   3823 		}
   3824 	} else
   3825 		ralmax = size;
   3826 	for (i = 1; i < size; i++) {
   3827 		if (i < ralmax)
   3828 			wm_set_ral(sc, NULL, i);
   3829 	}
   3830 
   3831 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3832 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3833 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3834 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3835 		size = WM_ICH8_MC_TABSIZE;
   3836 	else
   3837 		size = WM_MC_TABSIZE;
   3838 	/* Clear out the multicast table. */
   3839 	for (i = 0; i < size; i++) {
   3840 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3841 		CSR_WRITE_FLUSH(sc);
   3842 	}
   3843 
   3844 	ETHER_LOCK(ec);
   3845 	ETHER_FIRST_MULTI(step, ec, enm);
   3846 	while (enm != NULL) {
   3847 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3848 			ec->ec_flags |= ETHER_F_ALLMULTI;
   3849 			ETHER_UNLOCK(ec);
   3850 			/*
   3851 			 * We must listen to a range of multicast addresses.
   3852 			 * For now, just accept all multicasts, rather than
   3853 			 * trying to set only those filter bits needed to match
   3854 			 * the range.  (At this time, the only use of address
   3855 			 * ranges is for IP multicast routing, for which the
   3856 			 * range is big enough to require all bits set.)
   3857 			 */
   3858 			goto allmulti;
   3859 		}
   3860 
   3861 		hash = wm_mchash(sc, enm->enm_addrlo);
   3862 
   3863 		reg = (hash >> 5);
   3864 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3865 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3866 		    || (sc->sc_type == WM_T_PCH2)
   3867 		    || (sc->sc_type == WM_T_PCH_LPT)
   3868 		    || (sc->sc_type == WM_T_PCH_SPT)
   3869 		    || (sc->sc_type == WM_T_PCH_CNP))
   3870 			reg &= 0x1f;
   3871 		else
   3872 			reg &= 0x7f;
   3873 		bit = hash & 0x1f;
   3874 
   3875 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3876 		hash |= 1U << bit;
   3877 
   3878 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3879 			/*
   3880 			 * 82544 Errata 9: Certain register cannot be written
   3881 			 * with particular alignments in PCI-X bus operation
   3882 			 * (FCAH, MTA and VFTA).
   3883 			 */
   3884 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3885 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3886 			CSR_WRITE_FLUSH(sc);
   3887 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3888 			CSR_WRITE_FLUSH(sc);
   3889 		} else {
   3890 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3891 			CSR_WRITE_FLUSH(sc);
   3892 		}
   3893 
   3894 		ETHER_NEXT_MULTI(step, enm);
   3895 	}
   3896 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   3897 	ETHER_UNLOCK(ec);
   3898 
   3899 	goto setit;
   3900 
   3901  allmulti:
   3902 	sc->sc_rctl |= RCTL_MPE;
   3903 
   3904  setit:
   3905 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3906 }
   3907 
   3908 /* Reset and init related */
   3909 
   3910 static void
   3911 wm_set_vlan(struct wm_softc *sc)
   3912 {
   3913 
   3914 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3915 		device_xname(sc->sc_dev), __func__));
   3916 
   3917 	/* Deal with VLAN enables. */
   3918 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3919 		sc->sc_ctrl |= CTRL_VME;
   3920 	else
   3921 		sc->sc_ctrl &= ~CTRL_VME;
   3922 
   3923 	/* Write the control registers. */
   3924 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3925 }
   3926 
   3927 static void
   3928 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3929 {
   3930 	uint32_t gcr;
   3931 	pcireg_t ctrl2;
   3932 
   3933 	gcr = CSR_READ(sc, WMREG_GCR);
   3934 
   3935 	/* Only take action if timeout value is defaulted to 0 */
   3936 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3937 		goto out;
   3938 
   3939 	if ((gcr & GCR_CAP_VER2) == 0) {
   3940 		gcr |= GCR_CMPL_TMOUT_10MS;
   3941 		goto out;
   3942 	}
   3943 
   3944 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3945 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3946 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3947 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3948 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3949 
   3950 out:
   3951 	/* Disable completion timeout resend */
   3952 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3953 
   3954 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3955 }
   3956 
   3957 void
   3958 wm_get_auto_rd_done(struct wm_softc *sc)
   3959 {
   3960 	int i;
   3961 
   3962 	/* wait for eeprom to reload */
   3963 	switch (sc->sc_type) {
   3964 	case WM_T_82571:
   3965 	case WM_T_82572:
   3966 	case WM_T_82573:
   3967 	case WM_T_82574:
   3968 	case WM_T_82583:
   3969 	case WM_T_82575:
   3970 	case WM_T_82576:
   3971 	case WM_T_82580:
   3972 	case WM_T_I350:
   3973 	case WM_T_I354:
   3974 	case WM_T_I210:
   3975 	case WM_T_I211:
   3976 	case WM_T_80003:
   3977 	case WM_T_ICH8:
   3978 	case WM_T_ICH9:
   3979 		for (i = 0; i < 10; i++) {
   3980 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3981 				break;
   3982 			delay(1000);
   3983 		}
   3984 		if (i == 10) {
   3985 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3986 			    "complete\n", device_xname(sc->sc_dev));
   3987 		}
   3988 		break;
   3989 	default:
   3990 		break;
   3991 	}
   3992 }
   3993 
   3994 void
   3995 wm_lan_init_done(struct wm_softc *sc)
   3996 {
   3997 	uint32_t reg = 0;
   3998 	int i;
   3999 
   4000 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4001 		device_xname(sc->sc_dev), __func__));
   4002 
   4003 	/* Wait for eeprom to reload */
   4004 	switch (sc->sc_type) {
   4005 	case WM_T_ICH10:
   4006 	case WM_T_PCH:
   4007 	case WM_T_PCH2:
   4008 	case WM_T_PCH_LPT:
   4009 	case WM_T_PCH_SPT:
   4010 	case WM_T_PCH_CNP:
   4011 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4012 			reg = CSR_READ(sc, WMREG_STATUS);
   4013 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4014 				break;
   4015 			delay(100);
   4016 		}
   4017 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4018 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4019 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4020 		}
   4021 		break;
   4022 	default:
   4023 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4024 		    __func__);
   4025 		break;
   4026 	}
   4027 
   4028 	reg &= ~STATUS_LAN_INIT_DONE;
   4029 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4030 }
   4031 
   4032 void
   4033 wm_get_cfg_done(struct wm_softc *sc)
   4034 {
   4035 	int mask;
   4036 	uint32_t reg;
   4037 	int i;
   4038 
   4039 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4040 		device_xname(sc->sc_dev), __func__));
   4041 
   4042 	/* Wait for eeprom to reload */
   4043 	switch (sc->sc_type) {
   4044 	case WM_T_82542_2_0:
   4045 	case WM_T_82542_2_1:
   4046 		/* null */
   4047 		break;
   4048 	case WM_T_82543:
   4049 	case WM_T_82544:
   4050 	case WM_T_82540:
   4051 	case WM_T_82545:
   4052 	case WM_T_82545_3:
   4053 	case WM_T_82546:
   4054 	case WM_T_82546_3:
   4055 	case WM_T_82541:
   4056 	case WM_T_82541_2:
   4057 	case WM_T_82547:
   4058 	case WM_T_82547_2:
   4059 	case WM_T_82573:
   4060 	case WM_T_82574:
   4061 	case WM_T_82583:
   4062 		/* generic */
   4063 		delay(10*1000);
   4064 		break;
   4065 	case WM_T_80003:
   4066 	case WM_T_82571:
   4067 	case WM_T_82572:
   4068 	case WM_T_82575:
   4069 	case WM_T_82576:
   4070 	case WM_T_82580:
   4071 	case WM_T_I350:
   4072 	case WM_T_I354:
   4073 	case WM_T_I210:
   4074 	case WM_T_I211:
   4075 		if (sc->sc_type == WM_T_82571) {
   4076 			/* Only 82571 shares port 0 */
   4077 			mask = EEMNGCTL_CFGDONE_0;
   4078 		} else
   4079 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4080 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4081 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4082 				break;
   4083 			delay(1000);
   4084 		}
   4085 		if (i >= WM_PHY_CFG_TIMEOUT)
   4086 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   4087 				device_xname(sc->sc_dev), __func__));
   4088 		break;
   4089 	case WM_T_ICH8:
   4090 	case WM_T_ICH9:
   4091 	case WM_T_ICH10:
   4092 	case WM_T_PCH:
   4093 	case WM_T_PCH2:
   4094 	case WM_T_PCH_LPT:
   4095 	case WM_T_PCH_SPT:
   4096 	case WM_T_PCH_CNP:
   4097 		delay(10*1000);
   4098 		if (sc->sc_type >= WM_T_ICH10)
   4099 			wm_lan_init_done(sc);
   4100 		else
   4101 			wm_get_auto_rd_done(sc);
   4102 
   4103 		/* Clear PHY Reset Asserted bit */
   4104 		reg = CSR_READ(sc, WMREG_STATUS);
   4105 		if ((reg & STATUS_PHYRA) != 0)
   4106 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4107 		break;
   4108 	default:
   4109 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4110 		    __func__);
   4111 		break;
   4112 	}
   4113 }
   4114 
   4115 int
   4116 wm_phy_post_reset(struct wm_softc *sc)
   4117 {
   4118 	device_t dev = sc->sc_dev;
   4119 	uint16_t reg;
   4120 	int rv = 0;
   4121 
   4122 	/* This function is only for ICH8 and newer. */
   4123 	if (sc->sc_type < WM_T_ICH8)
   4124 		return 0;
   4125 
   4126 	if (wm_phy_resetisblocked(sc)) {
   4127 		/* XXX */
   4128 		device_printf(dev, "PHY is blocked\n");
   4129 		return -1;
   4130 	}
   4131 
   4132 	/* Allow time for h/w to get to quiescent state after reset */
   4133 	delay(10*1000);
   4134 
   4135 	/* Perform any necessary post-reset workarounds */
   4136 	if (sc->sc_type == WM_T_PCH)
   4137 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4138 	else if (sc->sc_type == WM_T_PCH2)
   4139 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4140 	if (rv != 0)
   4141 		return rv;
   4142 
   4143 	/* Clear the host wakeup bit after lcd reset */
   4144 	if (sc->sc_type >= WM_T_PCH) {
   4145 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4146 		reg &= ~BM_WUC_HOST_WU_BIT;
   4147 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4148 	}
   4149 
   4150 	/* Configure the LCD with the extended configuration region in NVM */
   4151 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4152 		return rv;
   4153 
   4154 	/* Configure the LCD with the OEM bits in NVM */
   4155 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4156 
   4157 	if (sc->sc_type == WM_T_PCH2) {
   4158 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4159 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4160 			delay(10 * 1000);
   4161 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4162 		}
   4163 		/* Set EEE LPI Update Timer to 200usec */
   4164 		rv = sc->phy.acquire(sc);
   4165 		if (rv)
   4166 			return rv;
   4167 		rv = wm_write_emi_reg_locked(dev,
   4168 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4169 		sc->phy.release(sc);
   4170 	}
   4171 
   4172 	return rv;
   4173 }
   4174 
   4175 /* Only for PCH and newer */
   4176 static int
   4177 wm_write_smbus_addr(struct wm_softc *sc)
   4178 {
   4179 	uint32_t strap, freq;
   4180 	uint16_t phy_data;
   4181 	int rv;
   4182 
   4183 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4184 		device_xname(sc->sc_dev), __func__));
   4185 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4186 
   4187 	strap = CSR_READ(sc, WMREG_STRAP);
   4188 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4189 
   4190 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4191 	if (rv != 0)
   4192 		return -1;
   4193 
   4194 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4195 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4196 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4197 
   4198 	if (sc->sc_phytype == WMPHY_I217) {
   4199 		/* Restore SMBus frequency */
   4200 		if (freq --) {
   4201 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4202 			    | HV_SMB_ADDR_FREQ_HIGH);
   4203 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4204 			    HV_SMB_ADDR_FREQ_LOW);
   4205 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4206 			    HV_SMB_ADDR_FREQ_HIGH);
   4207 		} else
   4208 			DPRINTF(WM_DEBUG_INIT,
   4209 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4210 				device_xname(sc->sc_dev), __func__));
   4211 	}
   4212 
   4213 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4214 	    phy_data);
   4215 }
   4216 
   4217 static int
   4218 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4219 {
   4220 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4221 	uint16_t phy_page = 0;
   4222 	int rv = 0;
   4223 
   4224 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4225 		device_xname(sc->sc_dev), __func__));
   4226 
   4227 	switch (sc->sc_type) {
   4228 	case WM_T_ICH8:
   4229 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4230 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4231 			return 0;
   4232 
   4233 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4234 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4235 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4236 			break;
   4237 		}
   4238 		/* FALLTHROUGH */
   4239 	case WM_T_PCH:
   4240 	case WM_T_PCH2:
   4241 	case WM_T_PCH_LPT:
   4242 	case WM_T_PCH_SPT:
   4243 	case WM_T_PCH_CNP:
   4244 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4245 		break;
   4246 	default:
   4247 		return 0;
   4248 	}
   4249 
   4250 	if ((rv = sc->phy.acquire(sc)) != 0)
   4251 		return rv;
   4252 
   4253 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4254 	if ((reg & sw_cfg_mask) == 0)
   4255 		goto release;
   4256 
   4257 	/*
   4258 	 * Make sure HW does not configure LCD from PHY extended configuration
   4259 	 * before SW configuration
   4260 	 */
   4261 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4262 	if ((sc->sc_type < WM_T_PCH2)
   4263 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4264 		goto release;
   4265 
   4266 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4267 		device_xname(sc->sc_dev), __func__));
   4268 	/* word_addr is in DWORD */
   4269 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4270 
   4271 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4272 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4273 	if (cnf_size == 0)
   4274 		goto release;
   4275 
   4276 	if (((sc->sc_type == WM_T_PCH)
   4277 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4278 	    || (sc->sc_type > WM_T_PCH)) {
   4279 		/*
   4280 		 * HW configures the SMBus address and LEDs when the OEM and
   4281 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4282 		 * are cleared, SW will configure them instead.
   4283 		 */
   4284 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4285 			device_xname(sc->sc_dev), __func__));
   4286 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4287 			goto release;
   4288 
   4289 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4290 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4291 		    (uint16_t)reg);
   4292 		if (rv != 0)
   4293 			goto release;
   4294 	}
   4295 
   4296 	/* Configure LCD from extended configuration region. */
   4297 	for (i = 0; i < cnf_size; i++) {
   4298 		uint16_t reg_data, reg_addr;
   4299 
   4300 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4301 			goto release;
   4302 
   4303 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4304 			goto release;
   4305 
   4306 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4307 			phy_page = reg_data;
   4308 
   4309 		reg_addr &= IGPHY_MAXREGADDR;
   4310 		reg_addr |= phy_page;
   4311 
   4312 		KASSERT(sc->phy.writereg_locked != NULL);
   4313 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4314 		    reg_data);
   4315 	}
   4316 
   4317 release:
   4318 	sc->phy.release(sc);
   4319 	return rv;
   4320 }
   4321 
   4322 /*
   4323  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4324  *  @sc:       pointer to the HW structure
   4325  *  @d0_state: boolean if entering d0 or d3 device state
   4326  *
   4327  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4328  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4329  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4330  */
   4331 int
   4332 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4333 {
   4334 	uint32_t mac_reg;
   4335 	uint16_t oem_reg;
   4336 	int rv;
   4337 
   4338 	if (sc->sc_type < WM_T_PCH)
   4339 		return 0;
   4340 
   4341 	rv = sc->phy.acquire(sc);
   4342 	if (rv != 0)
   4343 		return rv;
   4344 
   4345 	if (sc->sc_type == WM_T_PCH) {
   4346 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4347 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4348 			goto release;
   4349 	}
   4350 
   4351 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4352 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4353 		goto release;
   4354 
   4355 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4356 
   4357 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4358 	if (rv != 0)
   4359 		goto release;
   4360 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4361 
   4362 	if (d0_state) {
   4363 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4364 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4365 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4366 			oem_reg |= HV_OEM_BITS_LPLU;
   4367 	} else {
   4368 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4369 		    != 0)
   4370 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4371 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4372 		    != 0)
   4373 			oem_reg |= HV_OEM_BITS_LPLU;
   4374 	}
   4375 
   4376 	/* Set Restart auto-neg to activate the bits */
   4377 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4378 	    && (wm_phy_resetisblocked(sc) == false))
   4379 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4380 
   4381 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4382 
   4383 release:
   4384 	sc->phy.release(sc);
   4385 
   4386 	return rv;
   4387 }
   4388 
   4389 /* Init hardware bits */
   4390 void
   4391 wm_initialize_hardware_bits(struct wm_softc *sc)
   4392 {
   4393 	uint32_t tarc0, tarc1, reg;
   4394 
   4395 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4396 		device_xname(sc->sc_dev), __func__));
   4397 
   4398 	/* For 82571 variant, 80003 and ICHs */
   4399 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4400 	    || (sc->sc_type >= WM_T_80003)) {
   4401 
   4402 		/* Transmit Descriptor Control 0 */
   4403 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4404 		reg |= TXDCTL_COUNT_DESC;
   4405 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4406 
   4407 		/* Transmit Descriptor Control 1 */
   4408 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4409 		reg |= TXDCTL_COUNT_DESC;
   4410 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4411 
   4412 		/* TARC0 */
   4413 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4414 		switch (sc->sc_type) {
   4415 		case WM_T_82571:
   4416 		case WM_T_82572:
   4417 		case WM_T_82573:
   4418 		case WM_T_82574:
   4419 		case WM_T_82583:
   4420 		case WM_T_80003:
   4421 			/* Clear bits 30..27 */
   4422 			tarc0 &= ~__BITS(30, 27);
   4423 			break;
   4424 		default:
   4425 			break;
   4426 		}
   4427 
   4428 		switch (sc->sc_type) {
   4429 		case WM_T_82571:
   4430 		case WM_T_82572:
   4431 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4432 
   4433 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4434 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4435 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4436 			/* 8257[12] Errata No.7 */
   4437 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4438 
   4439 			/* TARC1 bit 28 */
   4440 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4441 				tarc1 &= ~__BIT(28);
   4442 			else
   4443 				tarc1 |= __BIT(28);
   4444 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4445 
   4446 			/*
   4447 			 * 8257[12] Errata No.13
   4448 			 * Disable Dyamic Clock Gating.
   4449 			 */
   4450 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4451 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4452 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4453 			break;
   4454 		case WM_T_82573:
   4455 		case WM_T_82574:
   4456 		case WM_T_82583:
   4457 			if ((sc->sc_type == WM_T_82574)
   4458 			    || (sc->sc_type == WM_T_82583))
   4459 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4460 
   4461 			/* Extended Device Control */
   4462 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4463 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4464 			reg |= __BIT(22);	/* Set bit 22 */
   4465 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4466 
   4467 			/* Device Control */
   4468 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4469 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4470 
   4471 			/* PCIe Control Register */
   4472 			/*
   4473 			 * 82573 Errata (unknown).
   4474 			 *
   4475 			 * 82574 Errata 25 and 82583 Errata 12
   4476 			 * "Dropped Rx Packets":
   4477 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4478 			 */
   4479 			reg = CSR_READ(sc, WMREG_GCR);
   4480 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4481 			CSR_WRITE(sc, WMREG_GCR, reg);
   4482 
   4483 			if ((sc->sc_type == WM_T_82574)
   4484 			    || (sc->sc_type == WM_T_82583)) {
   4485 				/*
   4486 				 * Document says this bit must be set for
   4487 				 * proper operation.
   4488 				 */
   4489 				reg = CSR_READ(sc, WMREG_GCR);
   4490 				reg |= __BIT(22);
   4491 				CSR_WRITE(sc, WMREG_GCR, reg);
   4492 
   4493 				/*
   4494 				 * Apply workaround for hardware errata
   4495 				 * documented in errata docs Fixes issue where
   4496 				 * some error prone or unreliable PCIe
   4497 				 * completions are occurring, particularly
   4498 				 * with ASPM enabled. Without fix, issue can
   4499 				 * cause Tx timeouts.
   4500 				 */
   4501 				reg = CSR_READ(sc, WMREG_GCR2);
   4502 				reg |= __BIT(0);
   4503 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4504 			}
   4505 			break;
   4506 		case WM_T_80003:
   4507 			/* TARC0 */
   4508 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4509 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4510 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4511 
   4512 			/* TARC1 bit 28 */
   4513 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4514 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4515 				tarc1 &= ~__BIT(28);
   4516 			else
   4517 				tarc1 |= __BIT(28);
   4518 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4519 			break;
   4520 		case WM_T_ICH8:
   4521 		case WM_T_ICH9:
   4522 		case WM_T_ICH10:
   4523 		case WM_T_PCH:
   4524 		case WM_T_PCH2:
   4525 		case WM_T_PCH_LPT:
   4526 		case WM_T_PCH_SPT:
   4527 		case WM_T_PCH_CNP:
   4528 			/* TARC0 */
   4529 			if (sc->sc_type == WM_T_ICH8) {
   4530 				/* Set TARC0 bits 29 and 28 */
   4531 				tarc0 |= __BITS(29, 28);
   4532 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4533 				tarc0 |= __BIT(29);
   4534 				/*
   4535 				 *  Drop bit 28. From Linux.
   4536 				 * See I218/I219 spec update
   4537 				 * "5. Buffer Overrun While the I219 is
   4538 				 * Processing DMA Transactions"
   4539 				 */
   4540 				tarc0 &= ~__BIT(28);
   4541 			}
   4542 			/* Set TARC0 bits 23,24,26,27 */
   4543 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4544 
   4545 			/* CTRL_EXT */
   4546 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4547 			reg |= __BIT(22);	/* Set bit 22 */
   4548 			/*
   4549 			 * Enable PHY low-power state when MAC is at D3
   4550 			 * w/o WoL
   4551 			 */
   4552 			if (sc->sc_type >= WM_T_PCH)
   4553 				reg |= CTRL_EXT_PHYPDEN;
   4554 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4555 
   4556 			/* TARC1 */
   4557 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4558 			/* bit 28 */
   4559 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4560 				tarc1 &= ~__BIT(28);
   4561 			else
   4562 				tarc1 |= __BIT(28);
   4563 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4564 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4565 
   4566 			/* Device Status */
   4567 			if (sc->sc_type == WM_T_ICH8) {
   4568 				reg = CSR_READ(sc, WMREG_STATUS);
   4569 				reg &= ~__BIT(31);
   4570 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4571 
   4572 			}
   4573 
   4574 			/* IOSFPC */
   4575 			if (sc->sc_type == WM_T_PCH_SPT) {
   4576 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4577 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4578 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4579 			}
   4580 			/*
   4581 			 * Work-around descriptor data corruption issue during
   4582 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4583 			 * capability.
   4584 			 */
   4585 			reg = CSR_READ(sc, WMREG_RFCTL);
   4586 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4587 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4588 			break;
   4589 		default:
   4590 			break;
   4591 		}
   4592 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4593 
   4594 		switch (sc->sc_type) {
   4595 		/*
   4596 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4597 		 * Avoid RSS Hash Value bug.
   4598 		 */
   4599 		case WM_T_82571:
   4600 		case WM_T_82572:
   4601 		case WM_T_82573:
   4602 		case WM_T_80003:
   4603 		case WM_T_ICH8:
   4604 			reg = CSR_READ(sc, WMREG_RFCTL);
   4605 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4606 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4607 			break;
   4608 		case WM_T_82574:
   4609 			/* Use extened Rx descriptor. */
   4610 			reg = CSR_READ(sc, WMREG_RFCTL);
   4611 			reg |= WMREG_RFCTL_EXSTEN;
   4612 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4613 			break;
   4614 		default:
   4615 			break;
   4616 		}
   4617 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4618 		/*
   4619 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4620 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4621 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4622 		 * Correctly by the Device"
   4623 		 *
   4624 		 * I354(C2000) Errata AVR53:
   4625 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4626 		 * Hang"
   4627 		 */
   4628 		reg = CSR_READ(sc, WMREG_RFCTL);
   4629 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4630 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4631 	}
   4632 }
   4633 
   4634 static uint32_t
   4635 wm_rxpbs_adjust_82580(uint32_t val)
   4636 {
   4637 	uint32_t rv = 0;
   4638 
   4639 	if (val < __arraycount(wm_82580_rxpbs_table))
   4640 		rv = wm_82580_rxpbs_table[val];
   4641 
   4642 	return rv;
   4643 }
   4644 
   4645 /*
   4646  * wm_reset_phy:
   4647  *
   4648  *	generic PHY reset function.
   4649  *	Same as e1000_phy_hw_reset_generic()
   4650  */
   4651 static int
   4652 wm_reset_phy(struct wm_softc *sc)
   4653 {
   4654 	uint32_t reg;
   4655 
   4656 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4657 		device_xname(sc->sc_dev), __func__));
   4658 	if (wm_phy_resetisblocked(sc))
   4659 		return -1;
   4660 
   4661 	sc->phy.acquire(sc);
   4662 
   4663 	reg = CSR_READ(sc, WMREG_CTRL);
   4664 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4665 	CSR_WRITE_FLUSH(sc);
   4666 
   4667 	delay(sc->phy.reset_delay_us);
   4668 
   4669 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4670 	CSR_WRITE_FLUSH(sc);
   4671 
   4672 	delay(150);
   4673 
   4674 	sc->phy.release(sc);
   4675 
   4676 	wm_get_cfg_done(sc);
   4677 	wm_phy_post_reset(sc);
   4678 
   4679 	return 0;
   4680 }
   4681 
   4682 /*
   4683  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4684  * so it is enough to check sc->sc_queue[0] only.
   4685  */
   4686 static void
   4687 wm_flush_desc_rings(struct wm_softc *sc)
   4688 {
   4689 	pcireg_t preg;
   4690 	uint32_t reg;
   4691 	struct wm_txqueue *txq;
   4692 	wiseman_txdesc_t *txd;
   4693 	int nexttx;
   4694 	uint32_t rctl;
   4695 
   4696 	/* First, disable MULR fix in FEXTNVM11 */
   4697 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4698 	reg |= FEXTNVM11_DIS_MULRFIX;
   4699 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4700 
   4701 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4702 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4703 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4704 		return;
   4705 
   4706 	/* TX */
   4707 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
   4708 	    preg, reg);
   4709 	reg = CSR_READ(sc, WMREG_TCTL);
   4710 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4711 
   4712 	txq = &sc->sc_queue[0].wmq_txq;
   4713 	nexttx = txq->txq_next;
   4714 	txd = &txq->txq_descs[nexttx];
   4715 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4716 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4717 	txd->wtx_fields.wtxu_status = 0;
   4718 	txd->wtx_fields.wtxu_options = 0;
   4719 	txd->wtx_fields.wtxu_vlan = 0;
   4720 
   4721 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4722 	    BUS_SPACE_BARRIER_WRITE);
   4723 
   4724 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4725 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4726 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4727 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4728 	delay(250);
   4729 
   4730 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4731 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4732 		return;
   4733 
   4734 	/* RX */
   4735 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4736 	rctl = CSR_READ(sc, WMREG_RCTL);
   4737 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4738 	CSR_WRITE_FLUSH(sc);
   4739 	delay(150);
   4740 
   4741 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4742 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4743 	reg &= 0xffffc000;
   4744 	/*
   4745 	 * Update thresholds: prefetch threshold to 31, host threshold
   4746 	 * to 1 and make sure the granularity is "descriptors" and not
   4747 	 * "cache lines"
   4748 	 */
   4749 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4750 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4751 
   4752 	/* Momentarily enable the RX ring for the changes to take effect */
   4753 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4754 	CSR_WRITE_FLUSH(sc);
   4755 	delay(150);
   4756 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4757 }
   4758 
   4759 /*
   4760  * wm_reset:
   4761  *
   4762  *	Reset the i82542 chip.
   4763  */
   4764 static void
   4765 wm_reset(struct wm_softc *sc)
   4766 {
   4767 	int phy_reset = 0;
   4768 	int i, error = 0;
   4769 	uint32_t reg;
   4770 	uint16_t kmreg;
   4771 	int rv;
   4772 
   4773 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4774 		device_xname(sc->sc_dev), __func__));
   4775 	KASSERT(sc->sc_type != 0);
   4776 
   4777 	/*
   4778 	 * Allocate on-chip memory according to the MTU size.
   4779 	 * The Packet Buffer Allocation register must be written
   4780 	 * before the chip is reset.
   4781 	 */
   4782 	switch (sc->sc_type) {
   4783 	case WM_T_82547:
   4784 	case WM_T_82547_2:
   4785 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4786 		    PBA_22K : PBA_30K;
   4787 		for (i = 0; i < sc->sc_nqueues; i++) {
   4788 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4789 			txq->txq_fifo_head = 0;
   4790 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4791 			txq->txq_fifo_size =
   4792 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4793 			txq->txq_fifo_stall = 0;
   4794 		}
   4795 		break;
   4796 	case WM_T_82571:
   4797 	case WM_T_82572:
   4798 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4799 	case WM_T_80003:
   4800 		sc->sc_pba = PBA_32K;
   4801 		break;
   4802 	case WM_T_82573:
   4803 		sc->sc_pba = PBA_12K;
   4804 		break;
   4805 	case WM_T_82574:
   4806 	case WM_T_82583:
   4807 		sc->sc_pba = PBA_20K;
   4808 		break;
   4809 	case WM_T_82576:
   4810 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4811 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4812 		break;
   4813 	case WM_T_82580:
   4814 	case WM_T_I350:
   4815 	case WM_T_I354:
   4816 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4817 		break;
   4818 	case WM_T_I210:
   4819 	case WM_T_I211:
   4820 		sc->sc_pba = PBA_34K;
   4821 		break;
   4822 	case WM_T_ICH8:
   4823 		/* Workaround for a bit corruption issue in FIFO memory */
   4824 		sc->sc_pba = PBA_8K;
   4825 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4826 		break;
   4827 	case WM_T_ICH9:
   4828 	case WM_T_ICH10:
   4829 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4830 		    PBA_14K : PBA_10K;
   4831 		break;
   4832 	case WM_T_PCH:
   4833 	case WM_T_PCH2:	/* XXX 14K? */
   4834 	case WM_T_PCH_LPT:
   4835 	case WM_T_PCH_SPT:
   4836 	case WM_T_PCH_CNP:
   4837 		sc->sc_pba = PBA_26K;
   4838 		break;
   4839 	default:
   4840 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4841 		    PBA_40K : PBA_48K;
   4842 		break;
   4843 	}
   4844 	/*
   4845 	 * Only old or non-multiqueue devices have the PBA register
   4846 	 * XXX Need special handling for 82575.
   4847 	 */
   4848 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4849 	    || (sc->sc_type == WM_T_82575))
   4850 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4851 
   4852 	/* Prevent the PCI-E bus from sticking */
   4853 	if (sc->sc_flags & WM_F_PCIE) {
   4854 		int timeout = 800;
   4855 
   4856 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4857 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4858 
   4859 		while (timeout--) {
   4860 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4861 			    == 0)
   4862 				break;
   4863 			delay(100);
   4864 		}
   4865 		if (timeout == 0)
   4866 			device_printf(sc->sc_dev,
   4867 			    "failed to disable busmastering\n");
   4868 	}
   4869 
   4870 	/* Set the completion timeout for interface */
   4871 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4872 	    || (sc->sc_type == WM_T_82580)
   4873 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4874 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4875 		wm_set_pcie_completion_timeout(sc);
   4876 
   4877 	/* Clear interrupt */
   4878 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4879 	if (wm_is_using_msix(sc)) {
   4880 		if (sc->sc_type != WM_T_82574) {
   4881 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4882 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4883 		} else
   4884 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4885 	}
   4886 
   4887 	/* Stop the transmit and receive processes. */
   4888 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4889 	sc->sc_rctl &= ~RCTL_EN;
   4890 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4891 	CSR_WRITE_FLUSH(sc);
   4892 
   4893 	/* XXX set_tbi_sbp_82543() */
   4894 
   4895 	delay(10*1000);
   4896 
   4897 	/* Must acquire the MDIO ownership before MAC reset */
   4898 	switch (sc->sc_type) {
   4899 	case WM_T_82573:
   4900 	case WM_T_82574:
   4901 	case WM_T_82583:
   4902 		error = wm_get_hw_semaphore_82573(sc);
   4903 		break;
   4904 	default:
   4905 		break;
   4906 	}
   4907 
   4908 	/*
   4909 	 * 82541 Errata 29? & 82547 Errata 28?
   4910 	 * See also the description about PHY_RST bit in CTRL register
   4911 	 * in 8254x_GBe_SDM.pdf.
   4912 	 */
   4913 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4914 		CSR_WRITE(sc, WMREG_CTRL,
   4915 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4916 		CSR_WRITE_FLUSH(sc);
   4917 		delay(5000);
   4918 	}
   4919 
   4920 	switch (sc->sc_type) {
   4921 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4922 	case WM_T_82541:
   4923 	case WM_T_82541_2:
   4924 	case WM_T_82547:
   4925 	case WM_T_82547_2:
   4926 		/*
   4927 		 * On some chipsets, a reset through a memory-mapped write
   4928 		 * cycle can cause the chip to reset before completing the
   4929 		 * write cycle. This causes major headache that can be avoided
   4930 		 * by issuing the reset via indirect register writes through
   4931 		 * I/O space.
   4932 		 *
   4933 		 * So, if we successfully mapped the I/O BAR at attach time,
   4934 		 * use that. Otherwise, try our luck with a memory-mapped
   4935 		 * reset.
   4936 		 */
   4937 		if (sc->sc_flags & WM_F_IOH_VALID)
   4938 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4939 		else
   4940 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4941 		break;
   4942 	case WM_T_82545_3:
   4943 	case WM_T_82546_3:
   4944 		/* Use the shadow control register on these chips. */
   4945 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4946 		break;
   4947 	case WM_T_80003:
   4948 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4949 		sc->phy.acquire(sc);
   4950 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4951 		sc->phy.release(sc);
   4952 		break;
   4953 	case WM_T_ICH8:
   4954 	case WM_T_ICH9:
   4955 	case WM_T_ICH10:
   4956 	case WM_T_PCH:
   4957 	case WM_T_PCH2:
   4958 	case WM_T_PCH_LPT:
   4959 	case WM_T_PCH_SPT:
   4960 	case WM_T_PCH_CNP:
   4961 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4962 		if (wm_phy_resetisblocked(sc) == false) {
   4963 			/*
   4964 			 * Gate automatic PHY configuration by hardware on
   4965 			 * non-managed 82579
   4966 			 */
   4967 			if ((sc->sc_type == WM_T_PCH2)
   4968 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4969 				== 0))
   4970 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4971 
   4972 			reg |= CTRL_PHY_RESET;
   4973 			phy_reset = 1;
   4974 		} else
   4975 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   4976 		sc->phy.acquire(sc);
   4977 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4978 		/* Don't insert a completion barrier when reset */
   4979 		delay(20*1000);
   4980 		mutex_exit(sc->sc_ich_phymtx);
   4981 		break;
   4982 	case WM_T_82580:
   4983 	case WM_T_I350:
   4984 	case WM_T_I354:
   4985 	case WM_T_I210:
   4986 	case WM_T_I211:
   4987 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4988 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4989 			CSR_WRITE_FLUSH(sc);
   4990 		delay(5000);
   4991 		break;
   4992 	case WM_T_82542_2_0:
   4993 	case WM_T_82542_2_1:
   4994 	case WM_T_82543:
   4995 	case WM_T_82540:
   4996 	case WM_T_82545:
   4997 	case WM_T_82546:
   4998 	case WM_T_82571:
   4999 	case WM_T_82572:
   5000 	case WM_T_82573:
   5001 	case WM_T_82574:
   5002 	case WM_T_82575:
   5003 	case WM_T_82576:
   5004 	case WM_T_82583:
   5005 	default:
   5006 		/* Everything else can safely use the documented method. */
   5007 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5008 		break;
   5009 	}
   5010 
   5011 	/* Must release the MDIO ownership after MAC reset */
   5012 	switch (sc->sc_type) {
   5013 	case WM_T_82573:
   5014 	case WM_T_82574:
   5015 	case WM_T_82583:
   5016 		if (error == 0)
   5017 			wm_put_hw_semaphore_82573(sc);
   5018 		break;
   5019 	default:
   5020 		break;
   5021 	}
   5022 
   5023 	/* Set Phy Config Counter to 50msec */
   5024 	if (sc->sc_type == WM_T_PCH2) {
   5025 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5026 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5027 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5028 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5029 	}
   5030 
   5031 	if (phy_reset != 0)
   5032 		wm_get_cfg_done(sc);
   5033 
   5034 	/* Reload EEPROM */
   5035 	switch (sc->sc_type) {
   5036 	case WM_T_82542_2_0:
   5037 	case WM_T_82542_2_1:
   5038 	case WM_T_82543:
   5039 	case WM_T_82544:
   5040 		delay(10);
   5041 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5042 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5043 		CSR_WRITE_FLUSH(sc);
   5044 		delay(2000);
   5045 		break;
   5046 	case WM_T_82540:
   5047 	case WM_T_82545:
   5048 	case WM_T_82545_3:
   5049 	case WM_T_82546:
   5050 	case WM_T_82546_3:
   5051 		delay(5*1000);
   5052 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5053 		break;
   5054 	case WM_T_82541:
   5055 	case WM_T_82541_2:
   5056 	case WM_T_82547:
   5057 	case WM_T_82547_2:
   5058 		delay(20000);
   5059 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5060 		break;
   5061 	case WM_T_82571:
   5062 	case WM_T_82572:
   5063 	case WM_T_82573:
   5064 	case WM_T_82574:
   5065 	case WM_T_82583:
   5066 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5067 			delay(10);
   5068 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5069 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5070 			CSR_WRITE_FLUSH(sc);
   5071 		}
   5072 		/* check EECD_EE_AUTORD */
   5073 		wm_get_auto_rd_done(sc);
   5074 		/*
   5075 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5076 		 * is set.
   5077 		 */
   5078 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5079 		    || (sc->sc_type == WM_T_82583))
   5080 			delay(25*1000);
   5081 		break;
   5082 	case WM_T_82575:
   5083 	case WM_T_82576:
   5084 	case WM_T_82580:
   5085 	case WM_T_I350:
   5086 	case WM_T_I354:
   5087 	case WM_T_I210:
   5088 	case WM_T_I211:
   5089 	case WM_T_80003:
   5090 		/* check EECD_EE_AUTORD */
   5091 		wm_get_auto_rd_done(sc);
   5092 		break;
   5093 	case WM_T_ICH8:
   5094 	case WM_T_ICH9:
   5095 	case WM_T_ICH10:
   5096 	case WM_T_PCH:
   5097 	case WM_T_PCH2:
   5098 	case WM_T_PCH_LPT:
   5099 	case WM_T_PCH_SPT:
   5100 	case WM_T_PCH_CNP:
   5101 		break;
   5102 	default:
   5103 		panic("%s: unknown type\n", __func__);
   5104 	}
   5105 
   5106 	/* Check whether EEPROM is present or not */
   5107 	switch (sc->sc_type) {
   5108 	case WM_T_82575:
   5109 	case WM_T_82576:
   5110 	case WM_T_82580:
   5111 	case WM_T_I350:
   5112 	case WM_T_I354:
   5113 	case WM_T_ICH8:
   5114 	case WM_T_ICH9:
   5115 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5116 			/* Not found */
   5117 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5118 			if (sc->sc_type == WM_T_82575)
   5119 				wm_reset_init_script_82575(sc);
   5120 		}
   5121 		break;
   5122 	default:
   5123 		break;
   5124 	}
   5125 
   5126 	if (phy_reset != 0)
   5127 		wm_phy_post_reset(sc);
   5128 
   5129 	if ((sc->sc_type == WM_T_82580)
   5130 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5131 		/* Clear global device reset status bit */
   5132 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5133 	}
   5134 
   5135 	/* Clear any pending interrupt events. */
   5136 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5137 	reg = CSR_READ(sc, WMREG_ICR);
   5138 	if (wm_is_using_msix(sc)) {
   5139 		if (sc->sc_type != WM_T_82574) {
   5140 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5141 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5142 		} else
   5143 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5144 	}
   5145 
   5146 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5147 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5148 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5149 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5150 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5151 		reg |= KABGTXD_BGSQLBIAS;
   5152 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5153 	}
   5154 
   5155 	/* Reload sc_ctrl */
   5156 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5157 
   5158 	wm_set_eee(sc);
   5159 
   5160 	/*
   5161 	 * For PCH, this write will make sure that any noise will be detected
   5162 	 * as a CRC error and be dropped rather than show up as a bad packet
   5163 	 * to the DMA engine
   5164 	 */
   5165 	if (sc->sc_type == WM_T_PCH)
   5166 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5167 
   5168 	if (sc->sc_type >= WM_T_82544)
   5169 		CSR_WRITE(sc, WMREG_WUC, 0);
   5170 
   5171 	if (sc->sc_type < WM_T_82575)
   5172 		wm_disable_aspm(sc); /* Workaround for some chips */
   5173 
   5174 	wm_reset_mdicnfg_82580(sc);
   5175 
   5176 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5177 		wm_pll_workaround_i210(sc);
   5178 
   5179 	if (sc->sc_type == WM_T_80003) {
   5180 		/* Default to TRUE to enable the MDIC W/A */
   5181 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5182 
   5183 		rv = wm_kmrn_readreg(sc,
   5184 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5185 		if (rv == 0) {
   5186 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5187 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5188 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5189 			else
   5190 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5191 		}
   5192 	}
   5193 }
   5194 
   5195 /*
   5196  * wm_add_rxbuf:
   5197  *
   5198  *	Add a receive buffer to the indiciated descriptor.
   5199  */
   5200 static int
   5201 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5202 {
   5203 	struct wm_softc *sc = rxq->rxq_sc;
   5204 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5205 	struct mbuf *m;
   5206 	int error;
   5207 
   5208 	KASSERT(mutex_owned(rxq->rxq_lock));
   5209 
   5210 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5211 	if (m == NULL)
   5212 		return ENOBUFS;
   5213 
   5214 	MCLGET(m, M_DONTWAIT);
   5215 	if ((m->m_flags & M_EXT) == 0) {
   5216 		m_freem(m);
   5217 		return ENOBUFS;
   5218 	}
   5219 
   5220 	if (rxs->rxs_mbuf != NULL)
   5221 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5222 
   5223 	rxs->rxs_mbuf = m;
   5224 
   5225 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5226 	/*
   5227 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5228 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5229 	 */
   5230 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5231 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5232 	if (error) {
   5233 		/* XXX XXX XXX */
   5234 		aprint_error_dev(sc->sc_dev,
   5235 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5236 		panic("wm_add_rxbuf");
   5237 	}
   5238 
   5239 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5240 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5241 
   5242 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5243 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5244 			wm_init_rxdesc(rxq, idx);
   5245 	} else
   5246 		wm_init_rxdesc(rxq, idx);
   5247 
   5248 	return 0;
   5249 }
   5250 
   5251 /*
   5252  * wm_rxdrain:
   5253  *
   5254  *	Drain the receive queue.
   5255  */
   5256 static void
   5257 wm_rxdrain(struct wm_rxqueue *rxq)
   5258 {
   5259 	struct wm_softc *sc = rxq->rxq_sc;
   5260 	struct wm_rxsoft *rxs;
   5261 	int i;
   5262 
   5263 	KASSERT(mutex_owned(rxq->rxq_lock));
   5264 
   5265 	for (i = 0; i < WM_NRXDESC; i++) {
   5266 		rxs = &rxq->rxq_soft[i];
   5267 		if (rxs->rxs_mbuf != NULL) {
   5268 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5269 			m_freem(rxs->rxs_mbuf);
   5270 			rxs->rxs_mbuf = NULL;
   5271 		}
   5272 	}
   5273 }
   5274 
   5275 /*
   5276  * Setup registers for RSS.
   5277  *
   5278  * XXX not yet VMDq support
   5279  */
   5280 static void
   5281 wm_init_rss(struct wm_softc *sc)
   5282 {
   5283 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5284 	int i;
   5285 
   5286 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5287 
   5288 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5289 		unsigned int qid, reta_ent;
   5290 
   5291 		qid  = i % sc->sc_nqueues;
   5292 		switch (sc->sc_type) {
   5293 		case WM_T_82574:
   5294 			reta_ent = __SHIFTIN(qid,
   5295 			    RETA_ENT_QINDEX_MASK_82574);
   5296 			break;
   5297 		case WM_T_82575:
   5298 			reta_ent = __SHIFTIN(qid,
   5299 			    RETA_ENT_QINDEX1_MASK_82575);
   5300 			break;
   5301 		default:
   5302 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5303 			break;
   5304 		}
   5305 
   5306 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5307 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5308 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5309 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5310 	}
   5311 
   5312 	rss_getkey((uint8_t *)rss_key);
   5313 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5314 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5315 
   5316 	if (sc->sc_type == WM_T_82574)
   5317 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5318 	else
   5319 		mrqc = MRQC_ENABLE_RSS_MQ;
   5320 
   5321 	/*
   5322 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5323 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5324 	 */
   5325 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5326 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5327 #if 0
   5328 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5329 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5330 #endif
   5331 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5332 
   5333 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5334 }
   5335 
   5336 /*
   5337  * Adjust TX and RX queue numbers which the system actulally uses.
   5338  *
   5339  * The numbers are affected by below parameters.
   5340  *     - The nubmer of hardware queues
   5341  *     - The number of MSI-X vectors (= "nvectors" argument)
   5342  *     - ncpu
   5343  */
   5344 static void
   5345 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5346 {
   5347 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5348 
   5349 	if (nvectors < 2) {
   5350 		sc->sc_nqueues = 1;
   5351 		return;
   5352 	}
   5353 
   5354 	switch (sc->sc_type) {
   5355 	case WM_T_82572:
   5356 		hw_ntxqueues = 2;
   5357 		hw_nrxqueues = 2;
   5358 		break;
   5359 	case WM_T_82574:
   5360 		hw_ntxqueues = 2;
   5361 		hw_nrxqueues = 2;
   5362 		break;
   5363 	case WM_T_82575:
   5364 		hw_ntxqueues = 4;
   5365 		hw_nrxqueues = 4;
   5366 		break;
   5367 	case WM_T_82576:
   5368 		hw_ntxqueues = 16;
   5369 		hw_nrxqueues = 16;
   5370 		break;
   5371 	case WM_T_82580:
   5372 	case WM_T_I350:
   5373 	case WM_T_I354:
   5374 		hw_ntxqueues = 8;
   5375 		hw_nrxqueues = 8;
   5376 		break;
   5377 	case WM_T_I210:
   5378 		hw_ntxqueues = 4;
   5379 		hw_nrxqueues = 4;
   5380 		break;
   5381 	case WM_T_I211:
   5382 		hw_ntxqueues = 2;
   5383 		hw_nrxqueues = 2;
   5384 		break;
   5385 		/*
   5386 		 * As below ethernet controllers does not support MSI-X,
   5387 		 * this driver let them not use multiqueue.
   5388 		 *     - WM_T_80003
   5389 		 *     - WM_T_ICH8
   5390 		 *     - WM_T_ICH9
   5391 		 *     - WM_T_ICH10
   5392 		 *     - WM_T_PCH
   5393 		 *     - WM_T_PCH2
   5394 		 *     - WM_T_PCH_LPT
   5395 		 */
   5396 	default:
   5397 		hw_ntxqueues = 1;
   5398 		hw_nrxqueues = 1;
   5399 		break;
   5400 	}
   5401 
   5402 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5403 
   5404 	/*
   5405 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5406 	 * the number of queues used actually.
   5407 	 */
   5408 	if (nvectors < hw_nqueues + 1)
   5409 		sc->sc_nqueues = nvectors - 1;
   5410 	else
   5411 		sc->sc_nqueues = hw_nqueues;
   5412 
   5413 	/*
   5414 	 * As queues more then cpus cannot improve scaling, we limit
   5415 	 * the number of queues used actually.
   5416 	 */
   5417 	if (ncpu < sc->sc_nqueues)
   5418 		sc->sc_nqueues = ncpu;
   5419 }
   5420 
   5421 static inline bool
   5422 wm_is_using_msix(struct wm_softc *sc)
   5423 {
   5424 
   5425 	return (sc->sc_nintrs > 1);
   5426 }
   5427 
   5428 static inline bool
   5429 wm_is_using_multiqueue(struct wm_softc *sc)
   5430 {
   5431 
   5432 	return (sc->sc_nqueues > 1);
   5433 }
   5434 
   5435 static int
   5436 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5437 {
   5438 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5439 
   5440 	wmq->wmq_id = qidx;
   5441 	wmq->wmq_intr_idx = intr_idx;
   5442 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5443 #ifdef WM_MPSAFE
   5444 	    | SOFTINT_MPSAFE
   5445 #endif
   5446 	    , wm_handle_queue, wmq);
   5447 	if (wmq->wmq_si != NULL)
   5448 		return 0;
   5449 
   5450 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5451 	    wmq->wmq_id);
   5452 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5453 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5454 	return ENOMEM;
   5455 }
   5456 
   5457 /*
   5458  * Both single interrupt MSI and INTx can use this function.
   5459  */
   5460 static int
   5461 wm_setup_legacy(struct wm_softc *sc)
   5462 {
   5463 	pci_chipset_tag_t pc = sc->sc_pc;
   5464 	const char *intrstr = NULL;
   5465 	char intrbuf[PCI_INTRSTR_LEN];
   5466 	int error;
   5467 
   5468 	error = wm_alloc_txrx_queues(sc);
   5469 	if (error) {
   5470 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5471 		    error);
   5472 		return ENOMEM;
   5473 	}
   5474 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5475 	    sizeof(intrbuf));
   5476 #ifdef WM_MPSAFE
   5477 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5478 #endif
   5479 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5480 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5481 	if (sc->sc_ihs[0] == NULL) {
   5482 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5483 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5484 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5485 		return ENOMEM;
   5486 	}
   5487 
   5488 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5489 	sc->sc_nintrs = 1;
   5490 
   5491 	return wm_softint_establish(sc, 0, 0);
   5492 }
   5493 
   5494 static int
   5495 wm_setup_msix(struct wm_softc *sc)
   5496 {
   5497 	void *vih;
   5498 	kcpuset_t *affinity;
   5499 	int qidx, error, intr_idx, txrx_established;
   5500 	pci_chipset_tag_t pc = sc->sc_pc;
   5501 	const char *intrstr = NULL;
   5502 	char intrbuf[PCI_INTRSTR_LEN];
   5503 	char intr_xname[INTRDEVNAMEBUF];
   5504 
   5505 	if (sc->sc_nqueues < ncpu) {
   5506 		/*
   5507 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5508 		 * interrupts start from CPU#1.
   5509 		 */
   5510 		sc->sc_affinity_offset = 1;
   5511 	} else {
   5512 		/*
   5513 		 * In this case, this device use all CPUs. So, we unify
   5514 		 * affinitied cpu_index to msix vector number for readability.
   5515 		 */
   5516 		sc->sc_affinity_offset = 0;
   5517 	}
   5518 
   5519 	error = wm_alloc_txrx_queues(sc);
   5520 	if (error) {
   5521 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5522 		    error);
   5523 		return ENOMEM;
   5524 	}
   5525 
   5526 	kcpuset_create(&affinity, false);
   5527 	intr_idx = 0;
   5528 
   5529 	/*
   5530 	 * TX and RX
   5531 	 */
   5532 	txrx_established = 0;
   5533 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5534 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5535 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5536 
   5537 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5538 		    sizeof(intrbuf));
   5539 #ifdef WM_MPSAFE
   5540 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5541 		    PCI_INTR_MPSAFE, true);
   5542 #endif
   5543 		memset(intr_xname, 0, sizeof(intr_xname));
   5544 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5545 		    device_xname(sc->sc_dev), qidx);
   5546 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5547 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5548 		if (vih == NULL) {
   5549 			aprint_error_dev(sc->sc_dev,
   5550 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5551 			    intrstr ? " at " : "",
   5552 			    intrstr ? intrstr : "");
   5553 
   5554 			goto fail;
   5555 		}
   5556 		kcpuset_zero(affinity);
   5557 		/* Round-robin affinity */
   5558 		kcpuset_set(affinity, affinity_to);
   5559 		error = interrupt_distribute(vih, affinity, NULL);
   5560 		if (error == 0) {
   5561 			aprint_normal_dev(sc->sc_dev,
   5562 			    "for TX and RX interrupting at %s affinity to %u\n",
   5563 			    intrstr, affinity_to);
   5564 		} else {
   5565 			aprint_normal_dev(sc->sc_dev,
   5566 			    "for TX and RX interrupting at %s\n", intrstr);
   5567 		}
   5568 		sc->sc_ihs[intr_idx] = vih;
   5569 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5570 			goto fail;
   5571 		txrx_established++;
   5572 		intr_idx++;
   5573 	}
   5574 
   5575 	/* LINK */
   5576 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5577 	    sizeof(intrbuf));
   5578 #ifdef WM_MPSAFE
   5579 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5580 #endif
   5581 	memset(intr_xname, 0, sizeof(intr_xname));
   5582 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5583 	    device_xname(sc->sc_dev));
   5584 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5585 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5586 	if (vih == NULL) {
   5587 		aprint_error_dev(sc->sc_dev,
   5588 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5589 		    intrstr ? " at " : "",
   5590 		    intrstr ? intrstr : "");
   5591 
   5592 		goto fail;
   5593 	}
   5594 	/* Keep default affinity to LINK interrupt */
   5595 	aprint_normal_dev(sc->sc_dev,
   5596 	    "for LINK interrupting at %s\n", intrstr);
   5597 	sc->sc_ihs[intr_idx] = vih;
   5598 	sc->sc_link_intr_idx = intr_idx;
   5599 
   5600 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5601 	kcpuset_destroy(affinity);
   5602 	return 0;
   5603 
   5604  fail:
   5605 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5606 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5607 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5608 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5609 	}
   5610 
   5611 	kcpuset_destroy(affinity);
   5612 	return ENOMEM;
   5613 }
   5614 
   5615 static void
   5616 wm_unset_stopping_flags(struct wm_softc *sc)
   5617 {
   5618 	int i;
   5619 
   5620 	KASSERT(WM_CORE_LOCKED(sc));
   5621 
   5622 	/* Must unset stopping flags in ascending order. */
   5623 	for (i = 0; i < sc->sc_nqueues; i++) {
   5624 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5625 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5626 
   5627 		mutex_enter(txq->txq_lock);
   5628 		txq->txq_stopping = false;
   5629 		mutex_exit(txq->txq_lock);
   5630 
   5631 		mutex_enter(rxq->rxq_lock);
   5632 		rxq->rxq_stopping = false;
   5633 		mutex_exit(rxq->rxq_lock);
   5634 	}
   5635 
   5636 	sc->sc_core_stopping = false;
   5637 }
   5638 
   5639 static void
   5640 wm_set_stopping_flags(struct wm_softc *sc)
   5641 {
   5642 	int i;
   5643 
   5644 	KASSERT(WM_CORE_LOCKED(sc));
   5645 
   5646 	sc->sc_core_stopping = true;
   5647 
   5648 	/* Must set stopping flags in ascending order. */
   5649 	for (i = 0; i < sc->sc_nqueues; i++) {
   5650 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5651 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5652 
   5653 		mutex_enter(rxq->rxq_lock);
   5654 		rxq->rxq_stopping = true;
   5655 		mutex_exit(rxq->rxq_lock);
   5656 
   5657 		mutex_enter(txq->txq_lock);
   5658 		txq->txq_stopping = true;
   5659 		mutex_exit(txq->txq_lock);
   5660 	}
   5661 }
   5662 
   5663 /*
   5664  * Write interrupt interval value to ITR or EITR
   5665  */
   5666 static void
   5667 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5668 {
   5669 
   5670 	if (!wmq->wmq_set_itr)
   5671 		return;
   5672 
   5673 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5674 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5675 
   5676 		/*
   5677 		 * 82575 doesn't have CNT_INGR field.
   5678 		 * So, overwrite counter field by software.
   5679 		 */
   5680 		if (sc->sc_type == WM_T_82575)
   5681 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5682 		else
   5683 			eitr |= EITR_CNT_INGR;
   5684 
   5685 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5686 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5687 		/*
   5688 		 * 82574 has both ITR and EITR. SET EITR when we use
   5689 		 * the multi queue function with MSI-X.
   5690 		 */
   5691 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5692 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5693 	} else {
   5694 		KASSERT(wmq->wmq_id == 0);
   5695 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5696 	}
   5697 
   5698 	wmq->wmq_set_itr = false;
   5699 }
   5700 
   5701 /*
   5702  * TODO
   5703  * Below dynamic calculation of itr is almost the same as linux igb,
   5704  * however it does not fit to wm(4). So, we will have been disable AIM
   5705  * until we will find appropriate calculation of itr.
   5706  */
   5707 /*
   5708  * calculate interrupt interval value to be going to write register in
   5709  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5710  */
   5711 static void
   5712 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5713 {
   5714 #ifdef NOTYET
   5715 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5716 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5717 	uint32_t avg_size = 0;
   5718 	uint32_t new_itr;
   5719 
   5720 	if (rxq->rxq_packets)
   5721 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5722 	if (txq->txq_packets)
   5723 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5724 
   5725 	if (avg_size == 0) {
   5726 		new_itr = 450; /* restore default value */
   5727 		goto out;
   5728 	}
   5729 
   5730 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5731 	avg_size += 24;
   5732 
   5733 	/* Don't starve jumbo frames */
   5734 	avg_size = uimin(avg_size, 3000);
   5735 
   5736 	/* Give a little boost to mid-size frames */
   5737 	if ((avg_size > 300) && (avg_size < 1200))
   5738 		new_itr = avg_size / 3;
   5739 	else
   5740 		new_itr = avg_size / 2;
   5741 
   5742 out:
   5743 	/*
   5744 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5745 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5746 	 */
   5747 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5748 		new_itr *= 4;
   5749 
   5750 	if (new_itr != wmq->wmq_itr) {
   5751 		wmq->wmq_itr = new_itr;
   5752 		wmq->wmq_set_itr = true;
   5753 	} else
   5754 		wmq->wmq_set_itr = false;
   5755 
   5756 	rxq->rxq_packets = 0;
   5757 	rxq->rxq_bytes = 0;
   5758 	txq->txq_packets = 0;
   5759 	txq->txq_bytes = 0;
   5760 #endif
   5761 }
   5762 
   5763 static void
   5764 wm_init_sysctls(struct wm_softc *sc)
   5765 {
   5766 	struct sysctllog **log;
   5767 	const struct sysctlnode *rnode, *cnode;
   5768 	int rv;
   5769 	const char *dvname;
   5770 
   5771 	log = &sc->sc_sysctllog;
   5772 	dvname = device_xname(sc->sc_dev);
   5773 
   5774 	rv = sysctl_createv(log, 0, NULL, &rnode,
   5775 	    0, CTLTYPE_NODE, dvname,
   5776 	    SYSCTL_DESCR("wm information and settings"),
   5777 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   5778 	if (rv != 0)
   5779 		goto err;
   5780 
   5781 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5782 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   5783 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   5784 	if (rv != 0)
   5785 		goto teardown;
   5786 
   5787 	return;
   5788 
   5789 teardown:
   5790 	sysctl_teardown(log);
   5791 err:
   5792 	sc->sc_sysctllog = NULL;
   5793 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   5794 	    __func__, rv);
   5795 }
   5796 
   5797 /*
   5798  * wm_init:		[ifnet interface function]
   5799  *
   5800  *	Initialize the interface.
   5801  */
   5802 static int
   5803 wm_init(struct ifnet *ifp)
   5804 {
   5805 	struct wm_softc *sc = ifp->if_softc;
   5806 	int ret;
   5807 
   5808 	WM_CORE_LOCK(sc);
   5809 	ret = wm_init_locked(ifp);
   5810 	WM_CORE_UNLOCK(sc);
   5811 
   5812 	return ret;
   5813 }
   5814 
   5815 static int
   5816 wm_init_locked(struct ifnet *ifp)
   5817 {
   5818 	struct wm_softc *sc = ifp->if_softc;
   5819 	struct ethercom *ec = &sc->sc_ethercom;
   5820 	int i, j, trynum, error = 0;
   5821 	uint32_t reg, sfp_mask = 0;
   5822 
   5823 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5824 		device_xname(sc->sc_dev), __func__));
   5825 	KASSERT(WM_CORE_LOCKED(sc));
   5826 
   5827 	/*
   5828 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5829 	 * There is a small but measurable benefit to avoiding the adjusment
   5830 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5831 	 * on such platforms.  One possibility is that the DMA itself is
   5832 	 * slightly more efficient if the front of the entire packet (instead
   5833 	 * of the front of the headers) is aligned.
   5834 	 *
   5835 	 * Note we must always set align_tweak to 0 if we are using
   5836 	 * jumbo frames.
   5837 	 */
   5838 #ifdef __NO_STRICT_ALIGNMENT
   5839 	sc->sc_align_tweak = 0;
   5840 #else
   5841 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5842 		sc->sc_align_tweak = 0;
   5843 	else
   5844 		sc->sc_align_tweak = 2;
   5845 #endif /* __NO_STRICT_ALIGNMENT */
   5846 
   5847 	/* Cancel any pending I/O. */
   5848 	wm_stop_locked(ifp, false, false);
   5849 
   5850 	/* Update statistics before reset */
   5851 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   5852 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   5853 
   5854 	/* PCH_SPT hardware workaround */
   5855 	if (sc->sc_type == WM_T_PCH_SPT)
   5856 		wm_flush_desc_rings(sc);
   5857 
   5858 	/* Reset the chip to a known state. */
   5859 	wm_reset(sc);
   5860 
   5861 	/*
   5862 	 * AMT based hardware can now take control from firmware
   5863 	 * Do this after reset.
   5864 	 */
   5865 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5866 		wm_get_hw_control(sc);
   5867 
   5868 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5869 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5870 		wm_legacy_irq_quirk_spt(sc);
   5871 
   5872 	/* Init hardware bits */
   5873 	wm_initialize_hardware_bits(sc);
   5874 
   5875 	/* Reset the PHY. */
   5876 	if (sc->sc_flags & WM_F_HAS_MII)
   5877 		wm_gmii_reset(sc);
   5878 
   5879 	if (sc->sc_type >= WM_T_ICH8) {
   5880 		reg = CSR_READ(sc, WMREG_GCR);
   5881 		/*
   5882 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5883 		 * default after reset.
   5884 		 */
   5885 		if (sc->sc_type == WM_T_ICH8)
   5886 			reg |= GCR_NO_SNOOP_ALL;
   5887 		else
   5888 			reg &= ~GCR_NO_SNOOP_ALL;
   5889 		CSR_WRITE(sc, WMREG_GCR, reg);
   5890 	}
   5891 	if ((sc->sc_type >= WM_T_ICH8)
   5892 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5893 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5894 
   5895 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5896 		reg |= CTRL_EXT_RO_DIS;
   5897 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5898 	}
   5899 
   5900 	/* Calculate (E)ITR value */
   5901 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5902 		/*
   5903 		 * For NEWQUEUE's EITR (except for 82575).
   5904 		 * 82575's EITR should be set same throttling value as other
   5905 		 * old controllers' ITR because the interrupt/sec calculation
   5906 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5907 		 *
   5908 		 * 82574's EITR should be set same throttling value as ITR.
   5909 		 *
   5910 		 * For N interrupts/sec, set this value to:
   5911 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5912 		 */
   5913 		sc->sc_itr_init = 450;
   5914 	} else if (sc->sc_type >= WM_T_82543) {
   5915 		/*
   5916 		 * Set up the interrupt throttling register (units of 256ns)
   5917 		 * Note that a footnote in Intel's documentation says this
   5918 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5919 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5920 		 * that that is also true for the 1024ns units of the other
   5921 		 * interrupt-related timer registers -- so, really, we ought
   5922 		 * to divide this value by 4 when the link speed is low.
   5923 		 *
   5924 		 * XXX implement this division at link speed change!
   5925 		 */
   5926 
   5927 		/*
   5928 		 * For N interrupts/sec, set this value to:
   5929 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5930 		 * absolute and packet timer values to this value
   5931 		 * divided by 4 to get "simple timer" behavior.
   5932 		 */
   5933 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5934 	}
   5935 
   5936 	error = wm_init_txrx_queues(sc);
   5937 	if (error)
   5938 		goto out;
   5939 
   5940 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   5941 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   5942 	    (sc->sc_type >= WM_T_82575))
   5943 		wm_serdes_power_up_link_82575(sc);
   5944 
   5945 	/* Clear out the VLAN table -- we don't use it (yet). */
   5946 	CSR_WRITE(sc, WMREG_VET, 0);
   5947 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5948 		trynum = 10; /* Due to hw errata */
   5949 	else
   5950 		trynum = 1;
   5951 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5952 		for (j = 0; j < trynum; j++)
   5953 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5954 
   5955 	/*
   5956 	 * Set up flow-control parameters.
   5957 	 *
   5958 	 * XXX Values could probably stand some tuning.
   5959 	 */
   5960 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5961 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5962 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5963 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5964 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5965 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5966 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5967 	}
   5968 
   5969 	sc->sc_fcrtl = FCRTL_DFLT;
   5970 	if (sc->sc_type < WM_T_82543) {
   5971 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5972 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5973 	} else {
   5974 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5975 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5976 	}
   5977 
   5978 	if (sc->sc_type == WM_T_80003)
   5979 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5980 	else
   5981 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5982 
   5983 	/* Writes the control register. */
   5984 	wm_set_vlan(sc);
   5985 
   5986 	if (sc->sc_flags & WM_F_HAS_MII) {
   5987 		uint16_t kmreg;
   5988 
   5989 		switch (sc->sc_type) {
   5990 		case WM_T_80003:
   5991 		case WM_T_ICH8:
   5992 		case WM_T_ICH9:
   5993 		case WM_T_ICH10:
   5994 		case WM_T_PCH:
   5995 		case WM_T_PCH2:
   5996 		case WM_T_PCH_LPT:
   5997 		case WM_T_PCH_SPT:
   5998 		case WM_T_PCH_CNP:
   5999 			/*
   6000 			 * Set the mac to wait the maximum time between each
   6001 			 * iteration and increase the max iterations when
   6002 			 * polling the phy; this fixes erroneous timeouts at
   6003 			 * 10Mbps.
   6004 			 */
   6005 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6006 			    0xFFFF);
   6007 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6008 			    &kmreg);
   6009 			kmreg |= 0x3F;
   6010 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6011 			    kmreg);
   6012 			break;
   6013 		default:
   6014 			break;
   6015 		}
   6016 
   6017 		if (sc->sc_type == WM_T_80003) {
   6018 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6019 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6020 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6021 
   6022 			/* Bypass RX and TX FIFO's */
   6023 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6024 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6025 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6026 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6027 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6028 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6029 		}
   6030 	}
   6031 #if 0
   6032 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6033 #endif
   6034 
   6035 	/* Set up checksum offload parameters. */
   6036 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6037 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6038 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6039 		reg |= RXCSUM_IPOFL;
   6040 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6041 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6042 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6043 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6044 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6045 
   6046 	/* Set registers about MSI-X */
   6047 	if (wm_is_using_msix(sc)) {
   6048 		uint32_t ivar, qintr_idx;
   6049 		struct wm_queue *wmq;
   6050 		unsigned int qid;
   6051 
   6052 		if (sc->sc_type == WM_T_82575) {
   6053 			/* Interrupt control */
   6054 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6055 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6056 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6057 
   6058 			/* TX and RX */
   6059 			for (i = 0; i < sc->sc_nqueues; i++) {
   6060 				wmq = &sc->sc_queue[i];
   6061 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6062 				    EITR_TX_QUEUE(wmq->wmq_id)
   6063 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6064 			}
   6065 			/* Link status */
   6066 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6067 			    EITR_OTHER);
   6068 		} else if (sc->sc_type == WM_T_82574) {
   6069 			/* Interrupt control */
   6070 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6071 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6072 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6073 
   6074 			/*
   6075 			 * Workaround issue with spurious interrupts
   6076 			 * in MSI-X mode.
   6077 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6078 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6079 			 */
   6080 			reg = CSR_READ(sc, WMREG_RFCTL);
   6081 			reg |= WMREG_RFCTL_ACKDIS;
   6082 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6083 
   6084 			ivar = 0;
   6085 			/* TX and RX */
   6086 			for (i = 0; i < sc->sc_nqueues; i++) {
   6087 				wmq = &sc->sc_queue[i];
   6088 				qid = wmq->wmq_id;
   6089 				qintr_idx = wmq->wmq_intr_idx;
   6090 
   6091 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6092 				    IVAR_TX_MASK_Q_82574(qid));
   6093 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6094 				    IVAR_RX_MASK_Q_82574(qid));
   6095 			}
   6096 			/* Link status */
   6097 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6098 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6099 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6100 		} else {
   6101 			/* Interrupt control */
   6102 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6103 			    | GPIE_EIAME | GPIE_PBA);
   6104 
   6105 			switch (sc->sc_type) {
   6106 			case WM_T_82580:
   6107 			case WM_T_I350:
   6108 			case WM_T_I354:
   6109 			case WM_T_I210:
   6110 			case WM_T_I211:
   6111 				/* TX and RX */
   6112 				for (i = 0; i < sc->sc_nqueues; i++) {
   6113 					wmq = &sc->sc_queue[i];
   6114 					qid = wmq->wmq_id;
   6115 					qintr_idx = wmq->wmq_intr_idx;
   6116 
   6117 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6118 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6119 					ivar |= __SHIFTIN((qintr_idx
   6120 						| IVAR_VALID),
   6121 					    IVAR_TX_MASK_Q(qid));
   6122 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6123 					ivar |= __SHIFTIN((qintr_idx
   6124 						| IVAR_VALID),
   6125 					    IVAR_RX_MASK_Q(qid));
   6126 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6127 				}
   6128 				break;
   6129 			case WM_T_82576:
   6130 				/* TX and RX */
   6131 				for (i = 0; i < sc->sc_nqueues; i++) {
   6132 					wmq = &sc->sc_queue[i];
   6133 					qid = wmq->wmq_id;
   6134 					qintr_idx = wmq->wmq_intr_idx;
   6135 
   6136 					ivar = CSR_READ(sc,
   6137 					    WMREG_IVAR_Q_82576(qid));
   6138 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6139 					ivar |= __SHIFTIN((qintr_idx
   6140 						| IVAR_VALID),
   6141 					    IVAR_TX_MASK_Q_82576(qid));
   6142 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6143 					ivar |= __SHIFTIN((qintr_idx
   6144 						| IVAR_VALID),
   6145 					    IVAR_RX_MASK_Q_82576(qid));
   6146 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6147 					    ivar);
   6148 				}
   6149 				break;
   6150 			default:
   6151 				break;
   6152 			}
   6153 
   6154 			/* Link status */
   6155 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6156 			    IVAR_MISC_OTHER);
   6157 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6158 		}
   6159 
   6160 		if (wm_is_using_multiqueue(sc)) {
   6161 			wm_init_rss(sc);
   6162 
   6163 			/*
   6164 			** NOTE: Receive Full-Packet Checksum Offload
   6165 			** is mutually exclusive with Multiqueue. However
   6166 			** this is not the same as TCP/IP checksums which
   6167 			** still work.
   6168 			*/
   6169 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6170 			reg |= RXCSUM_PCSD;
   6171 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6172 		}
   6173 	}
   6174 
   6175 	/* Set up the interrupt registers. */
   6176 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6177 
   6178 	/* Enable SFP module insertion interrupt if it's required */
   6179 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6180 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6181 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6182 		sfp_mask = ICR_GPI(0);
   6183 	}
   6184 
   6185 	if (wm_is_using_msix(sc)) {
   6186 		uint32_t mask;
   6187 		struct wm_queue *wmq;
   6188 
   6189 		switch (sc->sc_type) {
   6190 		case WM_T_82574:
   6191 			mask = 0;
   6192 			for (i = 0; i < sc->sc_nqueues; i++) {
   6193 				wmq = &sc->sc_queue[i];
   6194 				mask |= ICR_TXQ(wmq->wmq_id);
   6195 				mask |= ICR_RXQ(wmq->wmq_id);
   6196 			}
   6197 			mask |= ICR_OTHER;
   6198 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6199 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6200 			break;
   6201 		default:
   6202 			if (sc->sc_type == WM_T_82575) {
   6203 				mask = 0;
   6204 				for (i = 0; i < sc->sc_nqueues; i++) {
   6205 					wmq = &sc->sc_queue[i];
   6206 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6207 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6208 				}
   6209 				mask |= EITR_OTHER;
   6210 			} else {
   6211 				mask = 0;
   6212 				for (i = 0; i < sc->sc_nqueues; i++) {
   6213 					wmq = &sc->sc_queue[i];
   6214 					mask |= 1 << wmq->wmq_intr_idx;
   6215 				}
   6216 				mask |= 1 << sc->sc_link_intr_idx;
   6217 			}
   6218 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6219 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6220 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6221 
   6222 			/* For other interrupts */
   6223 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6224 			break;
   6225 		}
   6226 	} else {
   6227 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6228 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6229 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6230 	}
   6231 
   6232 	/* Set up the inter-packet gap. */
   6233 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6234 
   6235 	if (sc->sc_type >= WM_T_82543) {
   6236 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6237 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6238 			wm_itrs_writereg(sc, wmq);
   6239 		}
   6240 		/*
   6241 		 * Link interrupts occur much less than TX
   6242 		 * interrupts and RX interrupts. So, we don't
   6243 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6244 		 * FreeBSD's if_igb.
   6245 		 */
   6246 	}
   6247 
   6248 	/* Set the VLAN ethernetype. */
   6249 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6250 
   6251 	/*
   6252 	 * Set up the transmit control register; we start out with
   6253 	 * a collision distance suitable for FDX, but update it whe
   6254 	 * we resolve the media type.
   6255 	 */
   6256 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6257 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6258 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6259 	if (sc->sc_type >= WM_T_82571)
   6260 		sc->sc_tctl |= TCTL_MULR;
   6261 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6262 
   6263 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6264 		/* Write TDT after TCTL.EN is set. See the document. */
   6265 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6266 	}
   6267 
   6268 	if (sc->sc_type == WM_T_80003) {
   6269 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6270 		reg &= ~TCTL_EXT_GCEX_MASK;
   6271 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6272 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6273 	}
   6274 
   6275 	/* Set the media. */
   6276 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6277 		goto out;
   6278 
   6279 	/* Configure for OS presence */
   6280 	wm_init_manageability(sc);
   6281 
   6282 	/*
   6283 	 * Set up the receive control register; we actually program the
   6284 	 * register when we set the receive filter. Use multicast address
   6285 	 * offset type 0.
   6286 	 *
   6287 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6288 	 * don't enable that feature.
   6289 	 */
   6290 	sc->sc_mchash_type = 0;
   6291 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6292 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6293 
   6294 	/* 82574 use one buffer extended Rx descriptor. */
   6295 	if (sc->sc_type == WM_T_82574)
   6296 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6297 
   6298 	/*
   6299 	 * The I350 has a bug where it always strips the CRC whether
   6300 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6301 	 */
   6302 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6303 	    || (sc->sc_type == WM_T_I210))
   6304 		sc->sc_rctl |= RCTL_SECRC;
   6305 
   6306 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6307 	    && (ifp->if_mtu > ETHERMTU)) {
   6308 		sc->sc_rctl |= RCTL_LPE;
   6309 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6310 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6311 	}
   6312 
   6313 	if (MCLBYTES == 2048)
   6314 		sc->sc_rctl |= RCTL_2k;
   6315 	else {
   6316 		if (sc->sc_type >= WM_T_82543) {
   6317 			switch (MCLBYTES) {
   6318 			case 4096:
   6319 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6320 				break;
   6321 			case 8192:
   6322 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6323 				break;
   6324 			case 16384:
   6325 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6326 				break;
   6327 			default:
   6328 				panic("wm_init: MCLBYTES %d unsupported",
   6329 				    MCLBYTES);
   6330 				break;
   6331 			}
   6332 		} else
   6333 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6334 	}
   6335 
   6336 	/* Enable ECC */
   6337 	switch (sc->sc_type) {
   6338 	case WM_T_82571:
   6339 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6340 		reg |= PBA_ECC_CORR_EN;
   6341 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6342 		break;
   6343 	case WM_T_PCH_LPT:
   6344 	case WM_T_PCH_SPT:
   6345 	case WM_T_PCH_CNP:
   6346 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6347 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6348 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6349 
   6350 		sc->sc_ctrl |= CTRL_MEHE;
   6351 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6352 		break;
   6353 	default:
   6354 		break;
   6355 	}
   6356 
   6357 	/*
   6358 	 * Set the receive filter.
   6359 	 *
   6360 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6361 	 * the setting of RCTL.EN in wm_set_filter()
   6362 	 */
   6363 	wm_set_filter(sc);
   6364 
   6365 	/* On 575 and later set RDT only if RX enabled */
   6366 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6367 		int qidx;
   6368 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6369 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6370 			for (i = 0; i < WM_NRXDESC; i++) {
   6371 				mutex_enter(rxq->rxq_lock);
   6372 				wm_init_rxdesc(rxq, i);
   6373 				mutex_exit(rxq->rxq_lock);
   6374 
   6375 			}
   6376 		}
   6377 	}
   6378 
   6379 	wm_unset_stopping_flags(sc);
   6380 
   6381 	/* Start the one second link check clock. */
   6382 	callout_schedule(&sc->sc_tick_ch, hz);
   6383 
   6384 	/* ...all done! */
   6385 	ifp->if_flags |= IFF_RUNNING;
   6386 	ifp->if_flags &= ~IFF_OACTIVE;
   6387 
   6388  out:
   6389 	/* Save last flags for the callback */
   6390 	sc->sc_if_flags = ifp->if_flags;
   6391 	sc->sc_ec_capenable = ec->ec_capenable;
   6392 	if (error)
   6393 		log(LOG_ERR, "%s: interface not running\n",
   6394 		    device_xname(sc->sc_dev));
   6395 	return error;
   6396 }
   6397 
   6398 /*
   6399  * wm_stop:		[ifnet interface function]
   6400  *
   6401  *	Stop transmission on the interface.
   6402  */
   6403 static void
   6404 wm_stop(struct ifnet *ifp, int disable)
   6405 {
   6406 	struct wm_softc *sc = ifp->if_softc;
   6407 
   6408 	ASSERT_SLEEPABLE();
   6409 
   6410 	WM_CORE_LOCK(sc);
   6411 	wm_stop_locked(ifp, disable ? true : false, true);
   6412 	WM_CORE_UNLOCK(sc);
   6413 
   6414 	/*
   6415 	 * After wm_set_stopping_flags(), it is guaranteed
   6416 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   6417 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   6418 	 * because it can sleep...
   6419 	 * so, call workqueue_wait() here.
   6420 	 */
   6421 	for (int i = 0; i < sc->sc_nqueues; i++)
   6422 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   6423 }
   6424 
   6425 static void
   6426 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   6427 {
   6428 	struct wm_softc *sc = ifp->if_softc;
   6429 	struct wm_txsoft *txs;
   6430 	int i, qidx;
   6431 
   6432 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6433 		device_xname(sc->sc_dev), __func__));
   6434 	KASSERT(WM_CORE_LOCKED(sc));
   6435 
   6436 	wm_set_stopping_flags(sc);
   6437 
   6438 	if (sc->sc_flags & WM_F_HAS_MII) {
   6439 		/* Down the MII. */
   6440 		mii_down(&sc->sc_mii);
   6441 	} else {
   6442 #if 0
   6443 		/* Should we clear PHY's status properly? */
   6444 		wm_reset(sc);
   6445 #endif
   6446 	}
   6447 
   6448 	/* Stop the transmit and receive processes. */
   6449 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6450 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6451 	sc->sc_rctl &= ~RCTL_EN;
   6452 
   6453 	/*
   6454 	 * Clear the interrupt mask to ensure the device cannot assert its
   6455 	 * interrupt line.
   6456 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6457 	 * service any currently pending or shared interrupt.
   6458 	 */
   6459 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6460 	sc->sc_icr = 0;
   6461 	if (wm_is_using_msix(sc)) {
   6462 		if (sc->sc_type != WM_T_82574) {
   6463 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6464 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6465 		} else
   6466 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6467 	}
   6468 
   6469 	/*
   6470 	 * Stop callouts after interrupts are disabled; if we have
   6471 	 * to wait for them, we will be releasing the CORE_LOCK
   6472 	 * briefly, which will unblock interrupts on the current CPU.
   6473 	 */
   6474 
   6475 	/* Stop the one second clock. */
   6476 	if (wait)
   6477 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   6478 	else
   6479 		callout_stop(&sc->sc_tick_ch);
   6480 
   6481 	/* Stop the 82547 Tx FIFO stall check timer. */
   6482 	if (sc->sc_type == WM_T_82547) {
   6483 		if (wait)
   6484 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   6485 		else
   6486 			callout_stop(&sc->sc_txfifo_ch);
   6487 	}
   6488 
   6489 	/* Release any queued transmit buffers. */
   6490 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6491 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6492 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6493 		mutex_enter(txq->txq_lock);
   6494 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6495 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6496 			txs = &txq->txq_soft[i];
   6497 			if (txs->txs_mbuf != NULL) {
   6498 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6499 				m_freem(txs->txs_mbuf);
   6500 				txs->txs_mbuf = NULL;
   6501 			}
   6502 		}
   6503 		mutex_exit(txq->txq_lock);
   6504 	}
   6505 
   6506 	/* Mark the interface as down and cancel the watchdog timer. */
   6507 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6508 
   6509 	if (disable) {
   6510 		for (i = 0; i < sc->sc_nqueues; i++) {
   6511 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6512 			mutex_enter(rxq->rxq_lock);
   6513 			wm_rxdrain(rxq);
   6514 			mutex_exit(rxq->rxq_lock);
   6515 		}
   6516 	}
   6517 
   6518 #if 0 /* notyet */
   6519 	if (sc->sc_type >= WM_T_82544)
   6520 		CSR_WRITE(sc, WMREG_WUC, 0);
   6521 #endif
   6522 }
   6523 
   6524 static void
   6525 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6526 {
   6527 	struct mbuf *m;
   6528 	int i;
   6529 
   6530 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6531 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6532 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6533 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6534 		    m->m_data, m->m_len, m->m_flags);
   6535 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6536 	    i, i == 1 ? "" : "s");
   6537 }
   6538 
   6539 /*
   6540  * wm_82547_txfifo_stall:
   6541  *
   6542  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6543  *	reset the FIFO pointers, and restart packet transmission.
   6544  */
   6545 static void
   6546 wm_82547_txfifo_stall(void *arg)
   6547 {
   6548 	struct wm_softc *sc = arg;
   6549 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6550 
   6551 	mutex_enter(txq->txq_lock);
   6552 
   6553 	if (txq->txq_stopping)
   6554 		goto out;
   6555 
   6556 	if (txq->txq_fifo_stall) {
   6557 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6558 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6559 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6560 			/*
   6561 			 * Packets have drained.  Stop transmitter, reset
   6562 			 * FIFO pointers, restart transmitter, and kick
   6563 			 * the packet queue.
   6564 			 */
   6565 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6566 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6567 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6568 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6569 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6570 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6571 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6572 			CSR_WRITE_FLUSH(sc);
   6573 
   6574 			txq->txq_fifo_head = 0;
   6575 			txq->txq_fifo_stall = 0;
   6576 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6577 		} else {
   6578 			/*
   6579 			 * Still waiting for packets to drain; try again in
   6580 			 * another tick.
   6581 			 */
   6582 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6583 		}
   6584 	}
   6585 
   6586 out:
   6587 	mutex_exit(txq->txq_lock);
   6588 }
   6589 
   6590 /*
   6591  * wm_82547_txfifo_bugchk:
   6592  *
   6593  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6594  *	prevent enqueueing a packet that would wrap around the end
   6595  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6596  *
   6597  *	We do this by checking the amount of space before the end
   6598  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6599  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6600  *	the internal FIFO pointers to the beginning, and restart
   6601  *	transmission on the interface.
   6602  */
   6603 #define	WM_FIFO_HDR		0x10
   6604 #define	WM_82547_PAD_LEN	0x3e0
   6605 static int
   6606 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6607 {
   6608 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6609 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6610 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6611 
   6612 	/* Just return if already stalled. */
   6613 	if (txq->txq_fifo_stall)
   6614 		return 1;
   6615 
   6616 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6617 		/* Stall only occurs in half-duplex mode. */
   6618 		goto send_packet;
   6619 	}
   6620 
   6621 	if (len >= WM_82547_PAD_LEN + space) {
   6622 		txq->txq_fifo_stall = 1;
   6623 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6624 		return 1;
   6625 	}
   6626 
   6627  send_packet:
   6628 	txq->txq_fifo_head += len;
   6629 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6630 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6631 
   6632 	return 0;
   6633 }
   6634 
   6635 static int
   6636 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6637 {
   6638 	int error;
   6639 
   6640 	/*
   6641 	 * Allocate the control data structures, and create and load the
   6642 	 * DMA map for it.
   6643 	 *
   6644 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6645 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6646 	 * both sets within the same 4G segment.
   6647 	 */
   6648 	if (sc->sc_type < WM_T_82544)
   6649 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6650 	else
   6651 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6652 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6653 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6654 	else
   6655 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6656 
   6657 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6658 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6659 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6660 		aprint_error_dev(sc->sc_dev,
   6661 		    "unable to allocate TX control data, error = %d\n",
   6662 		    error);
   6663 		goto fail_0;
   6664 	}
   6665 
   6666 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6667 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6668 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6669 		aprint_error_dev(sc->sc_dev,
   6670 		    "unable to map TX control data, error = %d\n", error);
   6671 		goto fail_1;
   6672 	}
   6673 
   6674 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6675 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6676 		aprint_error_dev(sc->sc_dev,
   6677 		    "unable to create TX control data DMA map, error = %d\n",
   6678 		    error);
   6679 		goto fail_2;
   6680 	}
   6681 
   6682 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6683 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6684 		aprint_error_dev(sc->sc_dev,
   6685 		    "unable to load TX control data DMA map, error = %d\n",
   6686 		    error);
   6687 		goto fail_3;
   6688 	}
   6689 
   6690 	return 0;
   6691 
   6692  fail_3:
   6693 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6694  fail_2:
   6695 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6696 	    WM_TXDESCS_SIZE(txq));
   6697  fail_1:
   6698 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6699  fail_0:
   6700 	return error;
   6701 }
   6702 
   6703 static void
   6704 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6705 {
   6706 
   6707 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6708 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6709 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6710 	    WM_TXDESCS_SIZE(txq));
   6711 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6712 }
   6713 
   6714 static int
   6715 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6716 {
   6717 	int error;
   6718 	size_t rxq_descs_size;
   6719 
   6720 	/*
   6721 	 * Allocate the control data structures, and create and load the
   6722 	 * DMA map for it.
   6723 	 *
   6724 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6725 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6726 	 * both sets within the same 4G segment.
   6727 	 */
   6728 	rxq->rxq_ndesc = WM_NRXDESC;
   6729 	if (sc->sc_type == WM_T_82574)
   6730 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6731 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6732 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6733 	else
   6734 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6735 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6736 
   6737 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6738 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6739 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6740 		aprint_error_dev(sc->sc_dev,
   6741 		    "unable to allocate RX control data, error = %d\n",
   6742 		    error);
   6743 		goto fail_0;
   6744 	}
   6745 
   6746 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6747 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6748 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6749 		aprint_error_dev(sc->sc_dev,
   6750 		    "unable to map RX control data, error = %d\n", error);
   6751 		goto fail_1;
   6752 	}
   6753 
   6754 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6755 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6756 		aprint_error_dev(sc->sc_dev,
   6757 		    "unable to create RX control data DMA map, error = %d\n",
   6758 		    error);
   6759 		goto fail_2;
   6760 	}
   6761 
   6762 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6763 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6764 		aprint_error_dev(sc->sc_dev,
   6765 		    "unable to load RX control data DMA map, error = %d\n",
   6766 		    error);
   6767 		goto fail_3;
   6768 	}
   6769 
   6770 	return 0;
   6771 
   6772  fail_3:
   6773 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6774  fail_2:
   6775 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6776 	    rxq_descs_size);
   6777  fail_1:
   6778 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6779  fail_0:
   6780 	return error;
   6781 }
   6782 
   6783 static void
   6784 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6785 {
   6786 
   6787 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6788 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6789 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6790 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6791 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6792 }
   6793 
   6794 
   6795 static int
   6796 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6797 {
   6798 	int i, error;
   6799 
   6800 	/* Create the transmit buffer DMA maps. */
   6801 	WM_TXQUEUELEN(txq) =
   6802 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6803 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6804 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6805 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6806 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6807 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6808 			aprint_error_dev(sc->sc_dev,
   6809 			    "unable to create Tx DMA map %d, error = %d\n",
   6810 			    i, error);
   6811 			goto fail;
   6812 		}
   6813 	}
   6814 
   6815 	return 0;
   6816 
   6817  fail:
   6818 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6819 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6820 			bus_dmamap_destroy(sc->sc_dmat,
   6821 			    txq->txq_soft[i].txs_dmamap);
   6822 	}
   6823 	return error;
   6824 }
   6825 
   6826 static void
   6827 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6828 {
   6829 	int i;
   6830 
   6831 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6832 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6833 			bus_dmamap_destroy(sc->sc_dmat,
   6834 			    txq->txq_soft[i].txs_dmamap);
   6835 	}
   6836 }
   6837 
   6838 static int
   6839 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6840 {
   6841 	int i, error;
   6842 
   6843 	/* Create the receive buffer DMA maps. */
   6844 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6845 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6846 			    MCLBYTES, 0, 0,
   6847 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6848 			aprint_error_dev(sc->sc_dev,
   6849 			    "unable to create Rx DMA map %d error = %d\n",
   6850 			    i, error);
   6851 			goto fail;
   6852 		}
   6853 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6854 	}
   6855 
   6856 	return 0;
   6857 
   6858  fail:
   6859 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6860 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6861 			bus_dmamap_destroy(sc->sc_dmat,
   6862 			    rxq->rxq_soft[i].rxs_dmamap);
   6863 	}
   6864 	return error;
   6865 }
   6866 
   6867 static void
   6868 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6869 {
   6870 	int i;
   6871 
   6872 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6873 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6874 			bus_dmamap_destroy(sc->sc_dmat,
   6875 			    rxq->rxq_soft[i].rxs_dmamap);
   6876 	}
   6877 }
   6878 
   6879 /*
   6880  * wm_alloc_quques:
   6881  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6882  */
   6883 static int
   6884 wm_alloc_txrx_queues(struct wm_softc *sc)
   6885 {
   6886 	int i, error, tx_done, rx_done;
   6887 
   6888 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6889 	    KM_SLEEP);
   6890 	if (sc->sc_queue == NULL) {
   6891 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6892 		error = ENOMEM;
   6893 		goto fail_0;
   6894 	}
   6895 
   6896 	/* For transmission */
   6897 	error = 0;
   6898 	tx_done = 0;
   6899 	for (i = 0; i < sc->sc_nqueues; i++) {
   6900 #ifdef WM_EVENT_COUNTERS
   6901 		int j;
   6902 		const char *xname;
   6903 #endif
   6904 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6905 		txq->txq_sc = sc;
   6906 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6907 
   6908 		error = wm_alloc_tx_descs(sc, txq);
   6909 		if (error)
   6910 			break;
   6911 		error = wm_alloc_tx_buffer(sc, txq);
   6912 		if (error) {
   6913 			wm_free_tx_descs(sc, txq);
   6914 			break;
   6915 		}
   6916 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6917 		if (txq->txq_interq == NULL) {
   6918 			wm_free_tx_descs(sc, txq);
   6919 			wm_free_tx_buffer(sc, txq);
   6920 			error = ENOMEM;
   6921 			break;
   6922 		}
   6923 
   6924 #ifdef WM_EVENT_COUNTERS
   6925 		xname = device_xname(sc->sc_dev);
   6926 
   6927 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6928 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6929 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6930 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6931 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6932 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6933 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6934 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6935 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6936 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6937 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6938 
   6939 		for (j = 0; j < WM_NTXSEGS; j++) {
   6940 			snprintf(txq->txq_txseg_evcnt_names[j],
   6941 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6942 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6943 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6944 		}
   6945 
   6946 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6947 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6948 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6949 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6950 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6951 #endif /* WM_EVENT_COUNTERS */
   6952 
   6953 		tx_done++;
   6954 	}
   6955 	if (error)
   6956 		goto fail_1;
   6957 
   6958 	/* For receive */
   6959 	error = 0;
   6960 	rx_done = 0;
   6961 	for (i = 0; i < sc->sc_nqueues; i++) {
   6962 #ifdef WM_EVENT_COUNTERS
   6963 		const char *xname;
   6964 #endif
   6965 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6966 		rxq->rxq_sc = sc;
   6967 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6968 
   6969 		error = wm_alloc_rx_descs(sc, rxq);
   6970 		if (error)
   6971 			break;
   6972 
   6973 		error = wm_alloc_rx_buffer(sc, rxq);
   6974 		if (error) {
   6975 			wm_free_rx_descs(sc, rxq);
   6976 			break;
   6977 		}
   6978 
   6979 #ifdef WM_EVENT_COUNTERS
   6980 		xname = device_xname(sc->sc_dev);
   6981 
   6982 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6983 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6984 
   6985 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6986 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6987 #endif /* WM_EVENT_COUNTERS */
   6988 
   6989 		rx_done++;
   6990 	}
   6991 	if (error)
   6992 		goto fail_2;
   6993 
   6994 	for (i = 0; i < sc->sc_nqueues; i++) {
   6995 		char rndname[16];
   6996 
   6997 		snprintf(rndname, sizeof(rndname), "%sTXRX%d",
   6998 		    device_xname(sc->sc_dev), i);
   6999 		rnd_attach_source(&sc->sc_queue[i].rnd_source, rndname,
   7000 		    RND_TYPE_NET, RND_FLAG_DEFAULT);
   7001 	}
   7002 
   7003 	return 0;
   7004 
   7005  fail_2:
   7006 	for (i = 0; i < rx_done; i++) {
   7007 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7008 		wm_free_rx_buffer(sc, rxq);
   7009 		wm_free_rx_descs(sc, rxq);
   7010 		if (rxq->rxq_lock)
   7011 			mutex_obj_free(rxq->rxq_lock);
   7012 	}
   7013  fail_1:
   7014 	for (i = 0; i < tx_done; i++) {
   7015 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7016 		pcq_destroy(txq->txq_interq);
   7017 		wm_free_tx_buffer(sc, txq);
   7018 		wm_free_tx_descs(sc, txq);
   7019 		if (txq->txq_lock)
   7020 			mutex_obj_free(txq->txq_lock);
   7021 	}
   7022 
   7023 	kmem_free(sc->sc_queue,
   7024 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7025  fail_0:
   7026 	return error;
   7027 }
   7028 
   7029 /*
   7030  * wm_free_quques:
   7031  *	Free {tx,rx}descs and {tx,rx} buffers
   7032  */
   7033 static void
   7034 wm_free_txrx_queues(struct wm_softc *sc)
   7035 {
   7036 	int i;
   7037 
   7038 	for (i = 0; i < sc->sc_nqueues; i++)
   7039 		rnd_detach_source(&sc->sc_queue[i].rnd_source);
   7040 
   7041 	for (i = 0; i < sc->sc_nqueues; i++) {
   7042 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7043 
   7044 #ifdef WM_EVENT_COUNTERS
   7045 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7046 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7047 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7048 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7049 #endif /* WM_EVENT_COUNTERS */
   7050 
   7051 		wm_free_rx_buffer(sc, rxq);
   7052 		wm_free_rx_descs(sc, rxq);
   7053 		if (rxq->rxq_lock)
   7054 			mutex_obj_free(rxq->rxq_lock);
   7055 	}
   7056 
   7057 	for (i = 0; i < sc->sc_nqueues; i++) {
   7058 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7059 		struct mbuf *m;
   7060 #ifdef WM_EVENT_COUNTERS
   7061 		int j;
   7062 
   7063 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7064 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7065 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7066 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7067 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7068 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7069 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7070 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7071 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7072 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7073 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7074 
   7075 		for (j = 0; j < WM_NTXSEGS; j++)
   7076 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7077 
   7078 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7079 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7080 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7081 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7082 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7083 #endif /* WM_EVENT_COUNTERS */
   7084 
   7085 		/* Drain txq_interq */
   7086 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7087 			m_freem(m);
   7088 		pcq_destroy(txq->txq_interq);
   7089 
   7090 		wm_free_tx_buffer(sc, txq);
   7091 		wm_free_tx_descs(sc, txq);
   7092 		if (txq->txq_lock)
   7093 			mutex_obj_free(txq->txq_lock);
   7094 	}
   7095 
   7096 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7097 }
   7098 
   7099 static void
   7100 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7101 {
   7102 
   7103 	KASSERT(mutex_owned(txq->txq_lock));
   7104 
   7105 	/* Initialize the transmit descriptor ring. */
   7106 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7107 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7108 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7109 	txq->txq_free = WM_NTXDESC(txq);
   7110 	txq->txq_next = 0;
   7111 }
   7112 
   7113 static void
   7114 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7115     struct wm_txqueue *txq)
   7116 {
   7117 
   7118 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7119 		device_xname(sc->sc_dev), __func__));
   7120 	KASSERT(mutex_owned(txq->txq_lock));
   7121 
   7122 	if (sc->sc_type < WM_T_82543) {
   7123 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7124 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7125 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7126 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7127 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7128 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7129 	} else {
   7130 		int qid = wmq->wmq_id;
   7131 
   7132 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7133 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7134 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7135 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7136 
   7137 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7138 			/*
   7139 			 * Don't write TDT before TCTL.EN is set.
   7140 			 * See the document.
   7141 			 */
   7142 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7143 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7144 			    | TXDCTL_WTHRESH(0));
   7145 		else {
   7146 			/* XXX should update with AIM? */
   7147 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7148 			if (sc->sc_type >= WM_T_82540) {
   7149 				/* Should be the same */
   7150 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7151 			}
   7152 
   7153 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7154 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7155 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7156 		}
   7157 	}
   7158 }
   7159 
   7160 static void
   7161 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7162 {
   7163 	int i;
   7164 
   7165 	KASSERT(mutex_owned(txq->txq_lock));
   7166 
   7167 	/* Initialize the transmit job descriptors. */
   7168 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7169 		txq->txq_soft[i].txs_mbuf = NULL;
   7170 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7171 	txq->txq_snext = 0;
   7172 	txq->txq_sdirty = 0;
   7173 }
   7174 
   7175 static void
   7176 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7177     struct wm_txqueue *txq)
   7178 {
   7179 
   7180 	KASSERT(mutex_owned(txq->txq_lock));
   7181 
   7182 	/*
   7183 	 * Set up some register offsets that are different between
   7184 	 * the i82542 and the i82543 and later chips.
   7185 	 */
   7186 	if (sc->sc_type < WM_T_82543)
   7187 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7188 	else
   7189 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7190 
   7191 	wm_init_tx_descs(sc, txq);
   7192 	wm_init_tx_regs(sc, wmq, txq);
   7193 	wm_init_tx_buffer(sc, txq);
   7194 
   7195 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7196 	txq->txq_sending = false;
   7197 }
   7198 
   7199 static void
   7200 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7201     struct wm_rxqueue *rxq)
   7202 {
   7203 
   7204 	KASSERT(mutex_owned(rxq->rxq_lock));
   7205 
   7206 	/*
   7207 	 * Initialize the receive descriptor and receive job
   7208 	 * descriptor rings.
   7209 	 */
   7210 	if (sc->sc_type < WM_T_82543) {
   7211 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7212 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7213 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7214 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7215 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7216 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7217 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7218 
   7219 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7220 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7221 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7222 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7223 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7224 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7225 	} else {
   7226 		int qid = wmq->wmq_id;
   7227 
   7228 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7229 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7230 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7231 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7232 
   7233 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7234 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7235 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7236 
   7237 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7238 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7239 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7240 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7241 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7242 			    | RXDCTL_WTHRESH(1));
   7243 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7244 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7245 		} else {
   7246 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7247 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7248 			/* XXX should update with AIM? */
   7249 			CSR_WRITE(sc, WMREG_RDTR,
   7250 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7251 			/* MUST be same */
   7252 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7253 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7254 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7255 		}
   7256 	}
   7257 }
   7258 
   7259 static int
   7260 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7261 {
   7262 	struct wm_rxsoft *rxs;
   7263 	int error, i;
   7264 
   7265 	KASSERT(mutex_owned(rxq->rxq_lock));
   7266 
   7267 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7268 		rxs = &rxq->rxq_soft[i];
   7269 		if (rxs->rxs_mbuf == NULL) {
   7270 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7271 				log(LOG_ERR, "%s: unable to allocate or map "
   7272 				    "rx buffer %d, error = %d\n",
   7273 				    device_xname(sc->sc_dev), i, error);
   7274 				/*
   7275 				 * XXX Should attempt to run with fewer receive
   7276 				 * XXX buffers instead of just failing.
   7277 				 */
   7278 				wm_rxdrain(rxq);
   7279 				return ENOMEM;
   7280 			}
   7281 		} else {
   7282 			/*
   7283 			 * For 82575 and 82576, the RX descriptors must be
   7284 			 * initialized after the setting of RCTL.EN in
   7285 			 * wm_set_filter()
   7286 			 */
   7287 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7288 				wm_init_rxdesc(rxq, i);
   7289 		}
   7290 	}
   7291 	rxq->rxq_ptr = 0;
   7292 	rxq->rxq_discard = 0;
   7293 	WM_RXCHAIN_RESET(rxq);
   7294 
   7295 	return 0;
   7296 }
   7297 
   7298 static int
   7299 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7300     struct wm_rxqueue *rxq)
   7301 {
   7302 
   7303 	KASSERT(mutex_owned(rxq->rxq_lock));
   7304 
   7305 	/*
   7306 	 * Set up some register offsets that are different between
   7307 	 * the i82542 and the i82543 and later chips.
   7308 	 */
   7309 	if (sc->sc_type < WM_T_82543)
   7310 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7311 	else
   7312 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7313 
   7314 	wm_init_rx_regs(sc, wmq, rxq);
   7315 	return wm_init_rx_buffer(sc, rxq);
   7316 }
   7317 
   7318 /*
   7319  * wm_init_quques:
   7320  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7321  */
   7322 static int
   7323 wm_init_txrx_queues(struct wm_softc *sc)
   7324 {
   7325 	int i, error = 0;
   7326 
   7327 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7328 		device_xname(sc->sc_dev), __func__));
   7329 
   7330 	for (i = 0; i < sc->sc_nqueues; i++) {
   7331 		struct wm_queue *wmq = &sc->sc_queue[i];
   7332 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7333 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7334 
   7335 		/*
   7336 		 * TODO
   7337 		 * Currently, use constant variable instead of AIM.
   7338 		 * Furthermore, the interrupt interval of multiqueue which use
   7339 		 * polling mode is less than default value.
   7340 		 * More tuning and AIM are required.
   7341 		 */
   7342 		if (wm_is_using_multiqueue(sc))
   7343 			wmq->wmq_itr = 50;
   7344 		else
   7345 			wmq->wmq_itr = sc->sc_itr_init;
   7346 		wmq->wmq_set_itr = true;
   7347 
   7348 		mutex_enter(txq->txq_lock);
   7349 		wm_init_tx_queue(sc, wmq, txq);
   7350 		mutex_exit(txq->txq_lock);
   7351 
   7352 		mutex_enter(rxq->rxq_lock);
   7353 		error = wm_init_rx_queue(sc, wmq, rxq);
   7354 		mutex_exit(rxq->rxq_lock);
   7355 		if (error)
   7356 			break;
   7357 	}
   7358 
   7359 	return error;
   7360 }
   7361 
   7362 /*
   7363  * wm_tx_offload:
   7364  *
   7365  *	Set up TCP/IP checksumming parameters for the
   7366  *	specified packet.
   7367  */
   7368 static int
   7369 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7370     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7371 {
   7372 	struct mbuf *m0 = txs->txs_mbuf;
   7373 	struct livengood_tcpip_ctxdesc *t;
   7374 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7375 	uint32_t ipcse;
   7376 	struct ether_header *eh;
   7377 	int offset, iphl;
   7378 	uint8_t fields;
   7379 
   7380 	/*
   7381 	 * XXX It would be nice if the mbuf pkthdr had offset
   7382 	 * fields for the protocol headers.
   7383 	 */
   7384 
   7385 	eh = mtod(m0, struct ether_header *);
   7386 	switch (htons(eh->ether_type)) {
   7387 	case ETHERTYPE_IP:
   7388 	case ETHERTYPE_IPV6:
   7389 		offset = ETHER_HDR_LEN;
   7390 		break;
   7391 
   7392 	case ETHERTYPE_VLAN:
   7393 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7394 		break;
   7395 
   7396 	default:
   7397 		/* Don't support this protocol or encapsulation. */
   7398 		*fieldsp = 0;
   7399 		*cmdp = 0;
   7400 		return 0;
   7401 	}
   7402 
   7403 	if ((m0->m_pkthdr.csum_flags &
   7404 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7405 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7406 	} else
   7407 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7408 
   7409 	ipcse = offset + iphl - 1;
   7410 
   7411 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7412 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7413 	seg = 0;
   7414 	fields = 0;
   7415 
   7416 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7417 		int hlen = offset + iphl;
   7418 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7419 
   7420 		if (__predict_false(m0->m_len <
   7421 				    (hlen + sizeof(struct tcphdr)))) {
   7422 			/*
   7423 			 * TCP/IP headers are not in the first mbuf; we need
   7424 			 * to do this the slow and painful way. Let's just
   7425 			 * hope this doesn't happen very often.
   7426 			 */
   7427 			struct tcphdr th;
   7428 
   7429 			WM_Q_EVCNT_INCR(txq, tsopain);
   7430 
   7431 			m_copydata(m0, hlen, sizeof(th), &th);
   7432 			if (v4) {
   7433 				struct ip ip;
   7434 
   7435 				m_copydata(m0, offset, sizeof(ip), &ip);
   7436 				ip.ip_len = 0;
   7437 				m_copyback(m0,
   7438 				    offset + offsetof(struct ip, ip_len),
   7439 				    sizeof(ip.ip_len), &ip.ip_len);
   7440 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7441 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7442 			} else {
   7443 				struct ip6_hdr ip6;
   7444 
   7445 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7446 				ip6.ip6_plen = 0;
   7447 				m_copyback(m0,
   7448 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7449 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7450 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7451 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7452 			}
   7453 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7454 			    sizeof(th.th_sum), &th.th_sum);
   7455 
   7456 			hlen += th.th_off << 2;
   7457 		} else {
   7458 			/*
   7459 			 * TCP/IP headers are in the first mbuf; we can do
   7460 			 * this the easy way.
   7461 			 */
   7462 			struct tcphdr *th;
   7463 
   7464 			if (v4) {
   7465 				struct ip *ip =
   7466 				    (void *)(mtod(m0, char *) + offset);
   7467 				th = (void *)(mtod(m0, char *) + hlen);
   7468 
   7469 				ip->ip_len = 0;
   7470 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7471 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7472 			} else {
   7473 				struct ip6_hdr *ip6 =
   7474 				    (void *)(mtod(m0, char *) + offset);
   7475 				th = (void *)(mtod(m0, char *) + hlen);
   7476 
   7477 				ip6->ip6_plen = 0;
   7478 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7479 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7480 			}
   7481 			hlen += th->th_off << 2;
   7482 		}
   7483 
   7484 		if (v4) {
   7485 			WM_Q_EVCNT_INCR(txq, tso);
   7486 			cmdlen |= WTX_TCPIP_CMD_IP;
   7487 		} else {
   7488 			WM_Q_EVCNT_INCR(txq, tso6);
   7489 			ipcse = 0;
   7490 		}
   7491 		cmd |= WTX_TCPIP_CMD_TSE;
   7492 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7493 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7494 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7495 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7496 	}
   7497 
   7498 	/*
   7499 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7500 	 * offload feature, if we load the context descriptor, we
   7501 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7502 	 */
   7503 
   7504 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7505 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7506 	    WTX_TCPIP_IPCSE(ipcse);
   7507 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7508 		WM_Q_EVCNT_INCR(txq, ipsum);
   7509 		fields |= WTX_IXSM;
   7510 	}
   7511 
   7512 	offset += iphl;
   7513 
   7514 	if (m0->m_pkthdr.csum_flags &
   7515 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7516 		WM_Q_EVCNT_INCR(txq, tusum);
   7517 		fields |= WTX_TXSM;
   7518 		tucs = WTX_TCPIP_TUCSS(offset) |
   7519 		    WTX_TCPIP_TUCSO(offset +
   7520 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7521 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7522 	} else if ((m0->m_pkthdr.csum_flags &
   7523 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7524 		WM_Q_EVCNT_INCR(txq, tusum6);
   7525 		fields |= WTX_TXSM;
   7526 		tucs = WTX_TCPIP_TUCSS(offset) |
   7527 		    WTX_TCPIP_TUCSO(offset +
   7528 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7529 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7530 	} else {
   7531 		/* Just initialize it to a valid TCP context. */
   7532 		tucs = WTX_TCPIP_TUCSS(offset) |
   7533 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7534 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7535 	}
   7536 
   7537 	/*
   7538 	 * We don't have to write context descriptor for every packet
   7539 	 * except for 82574. For 82574, we must write context descriptor
   7540 	 * for every packet when we use two descriptor queues.
   7541 	 * It would be overhead to write context descriptor for every packet,
   7542 	 * however it does not cause problems.
   7543 	 */
   7544 	/* Fill in the context descriptor. */
   7545 	t = (struct livengood_tcpip_ctxdesc *)
   7546 	    &txq->txq_descs[txq->txq_next];
   7547 	t->tcpip_ipcs = htole32(ipcs);
   7548 	t->tcpip_tucs = htole32(tucs);
   7549 	t->tcpip_cmdlen = htole32(cmdlen);
   7550 	t->tcpip_seg = htole32(seg);
   7551 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7552 
   7553 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7554 	txs->txs_ndesc++;
   7555 
   7556 	*cmdp = cmd;
   7557 	*fieldsp = fields;
   7558 
   7559 	return 0;
   7560 }
   7561 
   7562 static inline int
   7563 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7564 {
   7565 	struct wm_softc *sc = ifp->if_softc;
   7566 	u_int cpuid = cpu_index(curcpu());
   7567 
   7568 	/*
   7569 	 * Currently, simple distribute strategy.
   7570 	 * TODO:
   7571 	 * distribute by flowid(RSS has value).
   7572 	 */
   7573 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7574 }
   7575 
   7576 /*
   7577  * wm_start:		[ifnet interface function]
   7578  *
   7579  *	Start packet transmission on the interface.
   7580  */
   7581 static void
   7582 wm_start(struct ifnet *ifp)
   7583 {
   7584 	struct wm_softc *sc = ifp->if_softc;
   7585 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7586 
   7587 #ifdef WM_MPSAFE
   7588 	KASSERT(if_is_mpsafe(ifp));
   7589 #endif
   7590 	/*
   7591 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   7592 	 */
   7593 
   7594 	mutex_enter(txq->txq_lock);
   7595 	if (!txq->txq_stopping)
   7596 		wm_start_locked(ifp);
   7597 	mutex_exit(txq->txq_lock);
   7598 }
   7599 
   7600 static void
   7601 wm_start_locked(struct ifnet *ifp)
   7602 {
   7603 	struct wm_softc *sc = ifp->if_softc;
   7604 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7605 
   7606 	wm_send_common_locked(ifp, txq, false);
   7607 }
   7608 
   7609 static int
   7610 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7611 {
   7612 	int qid;
   7613 	struct wm_softc *sc = ifp->if_softc;
   7614 	struct wm_txqueue *txq;
   7615 
   7616 	qid = wm_select_txqueue(ifp, m);
   7617 	txq = &sc->sc_queue[qid].wmq_txq;
   7618 
   7619 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7620 		m_freem(m);
   7621 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7622 		return ENOBUFS;
   7623 	}
   7624 
   7625 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   7626 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   7627 	if (m->m_flags & M_MCAST)
   7628 		if_statinc_ref(nsr, if_omcasts);
   7629 	IF_STAT_PUTREF(ifp);
   7630 
   7631 	if (mutex_tryenter(txq->txq_lock)) {
   7632 		if (!txq->txq_stopping)
   7633 			wm_transmit_locked(ifp, txq);
   7634 		mutex_exit(txq->txq_lock);
   7635 	}
   7636 
   7637 	return 0;
   7638 }
   7639 
   7640 static void
   7641 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7642 {
   7643 
   7644 	wm_send_common_locked(ifp, txq, true);
   7645 }
   7646 
   7647 static void
   7648 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7649     bool is_transmit)
   7650 {
   7651 	struct wm_softc *sc = ifp->if_softc;
   7652 	struct mbuf *m0;
   7653 	struct wm_txsoft *txs;
   7654 	bus_dmamap_t dmamap;
   7655 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7656 	bus_addr_t curaddr;
   7657 	bus_size_t seglen, curlen;
   7658 	uint32_t cksumcmd;
   7659 	uint8_t cksumfields;
   7660 	bool remap = true;
   7661 
   7662 	KASSERT(mutex_owned(txq->txq_lock));
   7663 
   7664 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7665 		return;
   7666 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7667 		return;
   7668 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7669 		return;
   7670 
   7671 	/* Remember the previous number of free descriptors. */
   7672 	ofree = txq->txq_free;
   7673 
   7674 	/*
   7675 	 * Loop through the send queue, setting up transmit descriptors
   7676 	 * until we drain the queue, or use up all available transmit
   7677 	 * descriptors.
   7678 	 */
   7679 	for (;;) {
   7680 		m0 = NULL;
   7681 
   7682 		/* Get a work queue entry. */
   7683 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7684 			wm_txeof(txq, UINT_MAX);
   7685 			if (txq->txq_sfree == 0) {
   7686 				DPRINTF(WM_DEBUG_TX,
   7687 				    ("%s: TX: no free job descriptors\n",
   7688 					device_xname(sc->sc_dev)));
   7689 				WM_Q_EVCNT_INCR(txq, txsstall);
   7690 				break;
   7691 			}
   7692 		}
   7693 
   7694 		/* Grab a packet off the queue. */
   7695 		if (is_transmit)
   7696 			m0 = pcq_get(txq->txq_interq);
   7697 		else
   7698 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7699 		if (m0 == NULL)
   7700 			break;
   7701 
   7702 		DPRINTF(WM_DEBUG_TX,
   7703 		    ("%s: TX: have packet to transmit: %p\n",
   7704 			device_xname(sc->sc_dev), m0));
   7705 
   7706 		txs = &txq->txq_soft[txq->txq_snext];
   7707 		dmamap = txs->txs_dmamap;
   7708 
   7709 		use_tso = (m0->m_pkthdr.csum_flags &
   7710 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7711 
   7712 		/*
   7713 		 * So says the Linux driver:
   7714 		 * The controller does a simple calculation to make sure
   7715 		 * there is enough room in the FIFO before initiating the
   7716 		 * DMA for each buffer. The calc is:
   7717 		 *	4 = ceil(buffer len / MSS)
   7718 		 * To make sure we don't overrun the FIFO, adjust the max
   7719 		 * buffer len if the MSS drops.
   7720 		 */
   7721 		dmamap->dm_maxsegsz =
   7722 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7723 		    ? m0->m_pkthdr.segsz << 2
   7724 		    : WTX_MAX_LEN;
   7725 
   7726 		/*
   7727 		 * Load the DMA map.  If this fails, the packet either
   7728 		 * didn't fit in the allotted number of segments, or we
   7729 		 * were short on resources.  For the too-many-segments
   7730 		 * case, we simply report an error and drop the packet,
   7731 		 * since we can't sanely copy a jumbo packet to a single
   7732 		 * buffer.
   7733 		 */
   7734 retry:
   7735 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7736 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7737 		if (__predict_false(error)) {
   7738 			if (error == EFBIG) {
   7739 				if (remap == true) {
   7740 					struct mbuf *m;
   7741 
   7742 					remap = false;
   7743 					m = m_defrag(m0, M_NOWAIT);
   7744 					if (m != NULL) {
   7745 						WM_Q_EVCNT_INCR(txq, defrag);
   7746 						m0 = m;
   7747 						goto retry;
   7748 					}
   7749 				}
   7750 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7751 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7752 				    "DMA segments, dropping...\n",
   7753 				    device_xname(sc->sc_dev));
   7754 				wm_dump_mbuf_chain(sc, m0);
   7755 				m_freem(m0);
   7756 				continue;
   7757 			}
   7758 			/* Short on resources, just stop for now. */
   7759 			DPRINTF(WM_DEBUG_TX,
   7760 			    ("%s: TX: dmamap load failed: %d\n",
   7761 				device_xname(sc->sc_dev), error));
   7762 			break;
   7763 		}
   7764 
   7765 		segs_needed = dmamap->dm_nsegs;
   7766 		if (use_tso) {
   7767 			/* For sentinel descriptor; see below. */
   7768 			segs_needed++;
   7769 		}
   7770 
   7771 		/*
   7772 		 * Ensure we have enough descriptors free to describe
   7773 		 * the packet. Note, we always reserve one descriptor
   7774 		 * at the end of the ring due to the semantics of the
   7775 		 * TDT register, plus one more in the event we need
   7776 		 * to load offload context.
   7777 		 */
   7778 		if (segs_needed > txq->txq_free - 2) {
   7779 			/*
   7780 			 * Not enough free descriptors to transmit this
   7781 			 * packet.  We haven't committed anything yet,
   7782 			 * so just unload the DMA map, put the packet
   7783 			 * pack on the queue, and punt. Notify the upper
   7784 			 * layer that there are no more slots left.
   7785 			 */
   7786 			DPRINTF(WM_DEBUG_TX,
   7787 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7788 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7789 				segs_needed, txq->txq_free - 1));
   7790 			if (!is_transmit)
   7791 				ifp->if_flags |= IFF_OACTIVE;
   7792 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7793 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7794 			WM_Q_EVCNT_INCR(txq, txdstall);
   7795 			break;
   7796 		}
   7797 
   7798 		/*
   7799 		 * Check for 82547 Tx FIFO bug. We need to do this
   7800 		 * once we know we can transmit the packet, since we
   7801 		 * do some internal FIFO space accounting here.
   7802 		 */
   7803 		if (sc->sc_type == WM_T_82547 &&
   7804 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7805 			DPRINTF(WM_DEBUG_TX,
   7806 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7807 				device_xname(sc->sc_dev)));
   7808 			if (!is_transmit)
   7809 				ifp->if_flags |= IFF_OACTIVE;
   7810 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7811 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7812 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7813 			break;
   7814 		}
   7815 
   7816 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7817 
   7818 		DPRINTF(WM_DEBUG_TX,
   7819 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7820 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7821 
   7822 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7823 
   7824 		/*
   7825 		 * Store a pointer to the packet so that we can free it
   7826 		 * later.
   7827 		 *
   7828 		 * Initially, we consider the number of descriptors the
   7829 		 * packet uses the number of DMA segments.  This may be
   7830 		 * incremented by 1 if we do checksum offload (a descriptor
   7831 		 * is used to set the checksum context).
   7832 		 */
   7833 		txs->txs_mbuf = m0;
   7834 		txs->txs_firstdesc = txq->txq_next;
   7835 		txs->txs_ndesc = segs_needed;
   7836 
   7837 		/* Set up offload parameters for this packet. */
   7838 		if (m0->m_pkthdr.csum_flags &
   7839 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7840 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7841 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7842 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7843 					  &cksumfields) != 0) {
   7844 				/* Error message already displayed. */
   7845 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7846 				continue;
   7847 			}
   7848 		} else {
   7849 			cksumcmd = 0;
   7850 			cksumfields = 0;
   7851 		}
   7852 
   7853 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7854 
   7855 		/* Sync the DMA map. */
   7856 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7857 		    BUS_DMASYNC_PREWRITE);
   7858 
   7859 		/* Initialize the transmit descriptor. */
   7860 		for (nexttx = txq->txq_next, seg = 0;
   7861 		     seg < dmamap->dm_nsegs; seg++) {
   7862 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7863 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7864 			     seglen != 0;
   7865 			     curaddr += curlen, seglen -= curlen,
   7866 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7867 				curlen = seglen;
   7868 
   7869 				/*
   7870 				 * So says the Linux driver:
   7871 				 * Work around for premature descriptor
   7872 				 * write-backs in TSO mode.  Append a
   7873 				 * 4-byte sentinel descriptor.
   7874 				 */
   7875 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7876 				    curlen > 8)
   7877 					curlen -= 4;
   7878 
   7879 				wm_set_dma_addr(
   7880 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7881 				txq->txq_descs[nexttx].wtx_cmdlen
   7882 				    = htole32(cksumcmd | curlen);
   7883 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7884 				    = 0;
   7885 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7886 				    = cksumfields;
   7887 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7888 				lasttx = nexttx;
   7889 
   7890 				DPRINTF(WM_DEBUG_TX,
   7891 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7892 					"len %#04zx\n",
   7893 					device_xname(sc->sc_dev), nexttx,
   7894 					(uint64_t)curaddr, curlen));
   7895 			}
   7896 		}
   7897 
   7898 		KASSERT(lasttx != -1);
   7899 
   7900 		/*
   7901 		 * Set up the command byte on the last descriptor of
   7902 		 * the packet. If we're in the interrupt delay window,
   7903 		 * delay the interrupt.
   7904 		 */
   7905 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7906 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7907 
   7908 		/*
   7909 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7910 		 * up the descriptor to encapsulate the packet for us.
   7911 		 *
   7912 		 * This is only valid on the last descriptor of the packet.
   7913 		 */
   7914 		if (vlan_has_tag(m0)) {
   7915 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7916 			    htole32(WTX_CMD_VLE);
   7917 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7918 			    = htole16(vlan_get_tag(m0));
   7919 		}
   7920 
   7921 		txs->txs_lastdesc = lasttx;
   7922 
   7923 		DPRINTF(WM_DEBUG_TX,
   7924 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7925 			device_xname(sc->sc_dev),
   7926 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7927 
   7928 		/* Sync the descriptors we're using. */
   7929 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7930 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7931 
   7932 		/* Give the packet to the chip. */
   7933 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7934 
   7935 		DPRINTF(WM_DEBUG_TX,
   7936 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7937 
   7938 		DPRINTF(WM_DEBUG_TX,
   7939 		    ("%s: TX: finished transmitting packet, job %d\n",
   7940 			device_xname(sc->sc_dev), txq->txq_snext));
   7941 
   7942 		/* Advance the tx pointer. */
   7943 		txq->txq_free -= txs->txs_ndesc;
   7944 		txq->txq_next = nexttx;
   7945 
   7946 		txq->txq_sfree--;
   7947 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7948 
   7949 		/* Pass the packet to any BPF listeners. */
   7950 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7951 	}
   7952 
   7953 	if (m0 != NULL) {
   7954 		if (!is_transmit)
   7955 			ifp->if_flags |= IFF_OACTIVE;
   7956 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7957 		WM_Q_EVCNT_INCR(txq, descdrop);
   7958 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7959 			__func__));
   7960 		m_freem(m0);
   7961 	}
   7962 
   7963 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7964 		/* No more slots; notify upper layer. */
   7965 		if (!is_transmit)
   7966 			ifp->if_flags |= IFF_OACTIVE;
   7967 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7968 	}
   7969 
   7970 	if (txq->txq_free != ofree) {
   7971 		/* Set a watchdog timer in case the chip flakes out. */
   7972 		txq->txq_lastsent = time_uptime;
   7973 		txq->txq_sending = true;
   7974 	}
   7975 }
   7976 
   7977 /*
   7978  * wm_nq_tx_offload:
   7979  *
   7980  *	Set up TCP/IP checksumming parameters for the
   7981  *	specified packet, for NEWQUEUE devices
   7982  */
   7983 static int
   7984 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7985     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7986 {
   7987 	struct mbuf *m0 = txs->txs_mbuf;
   7988 	uint32_t vl_len, mssidx, cmdc;
   7989 	struct ether_header *eh;
   7990 	int offset, iphl;
   7991 
   7992 	/*
   7993 	 * XXX It would be nice if the mbuf pkthdr had offset
   7994 	 * fields for the protocol headers.
   7995 	 */
   7996 	*cmdlenp = 0;
   7997 	*fieldsp = 0;
   7998 
   7999 	eh = mtod(m0, struct ether_header *);
   8000 	switch (htons(eh->ether_type)) {
   8001 	case ETHERTYPE_IP:
   8002 	case ETHERTYPE_IPV6:
   8003 		offset = ETHER_HDR_LEN;
   8004 		break;
   8005 
   8006 	case ETHERTYPE_VLAN:
   8007 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8008 		break;
   8009 
   8010 	default:
   8011 		/* Don't support this protocol or encapsulation. */
   8012 		*do_csum = false;
   8013 		return 0;
   8014 	}
   8015 	*do_csum = true;
   8016 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8017 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8018 
   8019 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8020 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8021 
   8022 	if ((m0->m_pkthdr.csum_flags &
   8023 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8024 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8025 	} else {
   8026 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8027 	}
   8028 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8029 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8030 
   8031 	if (vlan_has_tag(m0)) {
   8032 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8033 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8034 		*cmdlenp |= NQTX_CMD_VLE;
   8035 	}
   8036 
   8037 	mssidx = 0;
   8038 
   8039 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8040 		int hlen = offset + iphl;
   8041 		int tcp_hlen;
   8042 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8043 
   8044 		if (__predict_false(m0->m_len <
   8045 				    (hlen + sizeof(struct tcphdr)))) {
   8046 			/*
   8047 			 * TCP/IP headers are not in the first mbuf; we need
   8048 			 * to do this the slow and painful way. Let's just
   8049 			 * hope this doesn't happen very often.
   8050 			 */
   8051 			struct tcphdr th;
   8052 
   8053 			WM_Q_EVCNT_INCR(txq, tsopain);
   8054 
   8055 			m_copydata(m0, hlen, sizeof(th), &th);
   8056 			if (v4) {
   8057 				struct ip ip;
   8058 
   8059 				m_copydata(m0, offset, sizeof(ip), &ip);
   8060 				ip.ip_len = 0;
   8061 				m_copyback(m0,
   8062 				    offset + offsetof(struct ip, ip_len),
   8063 				    sizeof(ip.ip_len), &ip.ip_len);
   8064 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8065 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8066 			} else {
   8067 				struct ip6_hdr ip6;
   8068 
   8069 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8070 				ip6.ip6_plen = 0;
   8071 				m_copyback(m0,
   8072 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8073 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8074 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8075 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8076 			}
   8077 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8078 			    sizeof(th.th_sum), &th.th_sum);
   8079 
   8080 			tcp_hlen = th.th_off << 2;
   8081 		} else {
   8082 			/*
   8083 			 * TCP/IP headers are in the first mbuf; we can do
   8084 			 * this the easy way.
   8085 			 */
   8086 			struct tcphdr *th;
   8087 
   8088 			if (v4) {
   8089 				struct ip *ip =
   8090 				    (void *)(mtod(m0, char *) + offset);
   8091 				th = (void *)(mtod(m0, char *) + hlen);
   8092 
   8093 				ip->ip_len = 0;
   8094 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8095 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8096 			} else {
   8097 				struct ip6_hdr *ip6 =
   8098 				    (void *)(mtod(m0, char *) + offset);
   8099 				th = (void *)(mtod(m0, char *) + hlen);
   8100 
   8101 				ip6->ip6_plen = 0;
   8102 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8103 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8104 			}
   8105 			tcp_hlen = th->th_off << 2;
   8106 		}
   8107 		hlen += tcp_hlen;
   8108 		*cmdlenp |= NQTX_CMD_TSE;
   8109 
   8110 		if (v4) {
   8111 			WM_Q_EVCNT_INCR(txq, tso);
   8112 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8113 		} else {
   8114 			WM_Q_EVCNT_INCR(txq, tso6);
   8115 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8116 		}
   8117 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8118 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8119 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8120 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8121 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8122 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8123 	} else {
   8124 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8125 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8126 	}
   8127 
   8128 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8129 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8130 		cmdc |= NQTXC_CMD_IP4;
   8131 	}
   8132 
   8133 	if (m0->m_pkthdr.csum_flags &
   8134 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8135 		WM_Q_EVCNT_INCR(txq, tusum);
   8136 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8137 			cmdc |= NQTXC_CMD_TCP;
   8138 		else
   8139 			cmdc |= NQTXC_CMD_UDP;
   8140 
   8141 		cmdc |= NQTXC_CMD_IP4;
   8142 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8143 	}
   8144 	if (m0->m_pkthdr.csum_flags &
   8145 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8146 		WM_Q_EVCNT_INCR(txq, tusum6);
   8147 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8148 			cmdc |= NQTXC_CMD_TCP;
   8149 		else
   8150 			cmdc |= NQTXC_CMD_UDP;
   8151 
   8152 		cmdc |= NQTXC_CMD_IP6;
   8153 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8154 	}
   8155 
   8156 	/*
   8157 	 * We don't have to write context descriptor for every packet to
   8158 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8159 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8160 	 * controllers.
   8161 	 * It would be overhead to write context descriptor for every packet,
   8162 	 * however it does not cause problems.
   8163 	 */
   8164 	/* Fill in the context descriptor. */
   8165 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8166 	    htole32(vl_len);
   8167 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8168 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8169 	    htole32(cmdc);
   8170 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8171 	    htole32(mssidx);
   8172 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8173 	DPRINTF(WM_DEBUG_TX,
   8174 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8175 		txq->txq_next, 0, vl_len));
   8176 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8177 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8178 	txs->txs_ndesc++;
   8179 	return 0;
   8180 }
   8181 
   8182 /*
   8183  * wm_nq_start:		[ifnet interface function]
   8184  *
   8185  *	Start packet transmission on the interface for NEWQUEUE devices
   8186  */
   8187 static void
   8188 wm_nq_start(struct ifnet *ifp)
   8189 {
   8190 	struct wm_softc *sc = ifp->if_softc;
   8191 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8192 
   8193 #ifdef WM_MPSAFE
   8194 	KASSERT(if_is_mpsafe(ifp));
   8195 #endif
   8196 	/*
   8197 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8198 	 */
   8199 
   8200 	mutex_enter(txq->txq_lock);
   8201 	if (!txq->txq_stopping)
   8202 		wm_nq_start_locked(ifp);
   8203 	mutex_exit(txq->txq_lock);
   8204 }
   8205 
   8206 static void
   8207 wm_nq_start_locked(struct ifnet *ifp)
   8208 {
   8209 	struct wm_softc *sc = ifp->if_softc;
   8210 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8211 
   8212 	wm_nq_send_common_locked(ifp, txq, false);
   8213 }
   8214 
   8215 static int
   8216 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8217 {
   8218 	int qid;
   8219 	struct wm_softc *sc = ifp->if_softc;
   8220 	struct wm_txqueue *txq;
   8221 
   8222 	qid = wm_select_txqueue(ifp, m);
   8223 	txq = &sc->sc_queue[qid].wmq_txq;
   8224 
   8225 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8226 		m_freem(m);
   8227 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8228 		return ENOBUFS;
   8229 	}
   8230 
   8231 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8232 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8233 	if (m->m_flags & M_MCAST)
   8234 		if_statinc_ref(nsr, if_omcasts);
   8235 	IF_STAT_PUTREF(ifp);
   8236 
   8237 	/*
   8238 	 * The situations which this mutex_tryenter() fails at running time
   8239 	 * are below two patterns.
   8240 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8241 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8242 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8243 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8244 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8245 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8246 	 * stuck, either.
   8247 	 */
   8248 	if (mutex_tryenter(txq->txq_lock)) {
   8249 		if (!txq->txq_stopping)
   8250 			wm_nq_transmit_locked(ifp, txq);
   8251 		mutex_exit(txq->txq_lock);
   8252 	}
   8253 
   8254 	return 0;
   8255 }
   8256 
   8257 static void
   8258 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8259 {
   8260 
   8261 	wm_nq_send_common_locked(ifp, txq, true);
   8262 }
   8263 
   8264 static void
   8265 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8266     bool is_transmit)
   8267 {
   8268 	struct wm_softc *sc = ifp->if_softc;
   8269 	struct mbuf *m0;
   8270 	struct wm_txsoft *txs;
   8271 	bus_dmamap_t dmamap;
   8272 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8273 	bool do_csum, sent;
   8274 	bool remap = true;
   8275 
   8276 	KASSERT(mutex_owned(txq->txq_lock));
   8277 
   8278 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8279 		return;
   8280 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   8281 		return;
   8282 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8283 		return;
   8284 
   8285 	sent = false;
   8286 
   8287 	/*
   8288 	 * Loop through the send queue, setting up transmit descriptors
   8289 	 * until we drain the queue, or use up all available transmit
   8290 	 * descriptors.
   8291 	 */
   8292 	for (;;) {
   8293 		m0 = NULL;
   8294 
   8295 		/* Get a work queue entry. */
   8296 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8297 			wm_txeof(txq, UINT_MAX);
   8298 			if (txq->txq_sfree == 0) {
   8299 				DPRINTF(WM_DEBUG_TX,
   8300 				    ("%s: TX: no free job descriptors\n",
   8301 					device_xname(sc->sc_dev)));
   8302 				WM_Q_EVCNT_INCR(txq, txsstall);
   8303 				break;
   8304 			}
   8305 		}
   8306 
   8307 		/* Grab a packet off the queue. */
   8308 		if (is_transmit)
   8309 			m0 = pcq_get(txq->txq_interq);
   8310 		else
   8311 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8312 		if (m0 == NULL)
   8313 			break;
   8314 
   8315 		DPRINTF(WM_DEBUG_TX,
   8316 		    ("%s: TX: have packet to transmit: %p\n",
   8317 		    device_xname(sc->sc_dev), m0));
   8318 
   8319 		txs = &txq->txq_soft[txq->txq_snext];
   8320 		dmamap = txs->txs_dmamap;
   8321 
   8322 		/*
   8323 		 * Load the DMA map.  If this fails, the packet either
   8324 		 * didn't fit in the allotted number of segments, or we
   8325 		 * were short on resources.  For the too-many-segments
   8326 		 * case, we simply report an error and drop the packet,
   8327 		 * since we can't sanely copy a jumbo packet to a single
   8328 		 * buffer.
   8329 		 */
   8330 retry:
   8331 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8332 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8333 		if (__predict_false(error)) {
   8334 			if (error == EFBIG) {
   8335 				if (remap == true) {
   8336 					struct mbuf *m;
   8337 
   8338 					remap = false;
   8339 					m = m_defrag(m0, M_NOWAIT);
   8340 					if (m != NULL) {
   8341 						WM_Q_EVCNT_INCR(txq, defrag);
   8342 						m0 = m;
   8343 						goto retry;
   8344 					}
   8345 				}
   8346 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8347 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8348 				    "DMA segments, dropping...\n",
   8349 				    device_xname(sc->sc_dev));
   8350 				wm_dump_mbuf_chain(sc, m0);
   8351 				m_freem(m0);
   8352 				continue;
   8353 			}
   8354 			/* Short on resources, just stop for now. */
   8355 			DPRINTF(WM_DEBUG_TX,
   8356 			    ("%s: TX: dmamap load failed: %d\n",
   8357 				device_xname(sc->sc_dev), error));
   8358 			break;
   8359 		}
   8360 
   8361 		segs_needed = dmamap->dm_nsegs;
   8362 
   8363 		/*
   8364 		 * Ensure we have enough descriptors free to describe
   8365 		 * the packet. Note, we always reserve one descriptor
   8366 		 * at the end of the ring due to the semantics of the
   8367 		 * TDT register, plus one more in the event we need
   8368 		 * to load offload context.
   8369 		 */
   8370 		if (segs_needed > txq->txq_free - 2) {
   8371 			/*
   8372 			 * Not enough free descriptors to transmit this
   8373 			 * packet.  We haven't committed anything yet,
   8374 			 * so just unload the DMA map, put the packet
   8375 			 * pack on the queue, and punt. Notify the upper
   8376 			 * layer that there are no more slots left.
   8377 			 */
   8378 			DPRINTF(WM_DEBUG_TX,
   8379 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8380 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8381 				segs_needed, txq->txq_free - 1));
   8382 			if (!is_transmit)
   8383 				ifp->if_flags |= IFF_OACTIVE;
   8384 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8385 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8386 			WM_Q_EVCNT_INCR(txq, txdstall);
   8387 			break;
   8388 		}
   8389 
   8390 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8391 
   8392 		DPRINTF(WM_DEBUG_TX,
   8393 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8394 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8395 
   8396 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8397 
   8398 		/*
   8399 		 * Store a pointer to the packet so that we can free it
   8400 		 * later.
   8401 		 *
   8402 		 * Initially, we consider the number of descriptors the
   8403 		 * packet uses the number of DMA segments.  This may be
   8404 		 * incremented by 1 if we do checksum offload (a descriptor
   8405 		 * is used to set the checksum context).
   8406 		 */
   8407 		txs->txs_mbuf = m0;
   8408 		txs->txs_firstdesc = txq->txq_next;
   8409 		txs->txs_ndesc = segs_needed;
   8410 
   8411 		/* Set up offload parameters for this packet. */
   8412 		uint32_t cmdlen, fields, dcmdlen;
   8413 		if (m0->m_pkthdr.csum_flags &
   8414 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8415 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8416 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8417 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8418 			    &do_csum) != 0) {
   8419 				/* Error message already displayed. */
   8420 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   8421 				continue;
   8422 			}
   8423 		} else {
   8424 			do_csum = false;
   8425 			cmdlen = 0;
   8426 			fields = 0;
   8427 		}
   8428 
   8429 		/* Sync the DMA map. */
   8430 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8431 		    BUS_DMASYNC_PREWRITE);
   8432 
   8433 		/* Initialize the first transmit descriptor. */
   8434 		nexttx = txq->txq_next;
   8435 		if (!do_csum) {
   8436 			/* Setup a legacy descriptor */
   8437 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8438 			    dmamap->dm_segs[0].ds_addr);
   8439 			txq->txq_descs[nexttx].wtx_cmdlen =
   8440 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8441 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8442 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8443 			if (vlan_has_tag(m0)) {
   8444 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8445 				    htole32(WTX_CMD_VLE);
   8446 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8447 				    htole16(vlan_get_tag(m0));
   8448 			} else
   8449 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8450 
   8451 			dcmdlen = 0;
   8452 		} else {
   8453 			/* Setup an advanced data descriptor */
   8454 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8455 			    htole64(dmamap->dm_segs[0].ds_addr);
   8456 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8457 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8458 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   8459 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8460 			    htole32(fields);
   8461 			DPRINTF(WM_DEBUG_TX,
   8462 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8463 				device_xname(sc->sc_dev), nexttx,
   8464 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8465 			DPRINTF(WM_DEBUG_TX,
   8466 			    ("\t 0x%08x%08x\n", fields,
   8467 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8468 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8469 		}
   8470 
   8471 		lasttx = nexttx;
   8472 		nexttx = WM_NEXTTX(txq, nexttx);
   8473 		/*
   8474 		 * Fill in the next descriptors. legacy or advanced format
   8475 		 * is the same here
   8476 		 */
   8477 		for (seg = 1; seg < dmamap->dm_nsegs;
   8478 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8479 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8480 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8481 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8482 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8483 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8484 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8485 			lasttx = nexttx;
   8486 
   8487 			DPRINTF(WM_DEBUG_TX,
   8488 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8489 				device_xname(sc->sc_dev), nexttx,
   8490 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8491 				dmamap->dm_segs[seg].ds_len));
   8492 		}
   8493 
   8494 		KASSERT(lasttx != -1);
   8495 
   8496 		/*
   8497 		 * Set up the command byte on the last descriptor of
   8498 		 * the packet. If we're in the interrupt delay window,
   8499 		 * delay the interrupt.
   8500 		 */
   8501 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8502 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8503 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8504 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8505 
   8506 		txs->txs_lastdesc = lasttx;
   8507 
   8508 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8509 		    device_xname(sc->sc_dev),
   8510 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8511 
   8512 		/* Sync the descriptors we're using. */
   8513 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8514 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8515 
   8516 		/* Give the packet to the chip. */
   8517 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8518 		sent = true;
   8519 
   8520 		DPRINTF(WM_DEBUG_TX,
   8521 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8522 
   8523 		DPRINTF(WM_DEBUG_TX,
   8524 		    ("%s: TX: finished transmitting packet, job %d\n",
   8525 			device_xname(sc->sc_dev), txq->txq_snext));
   8526 
   8527 		/* Advance the tx pointer. */
   8528 		txq->txq_free -= txs->txs_ndesc;
   8529 		txq->txq_next = nexttx;
   8530 
   8531 		txq->txq_sfree--;
   8532 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8533 
   8534 		/* Pass the packet to any BPF listeners. */
   8535 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8536 	}
   8537 
   8538 	if (m0 != NULL) {
   8539 		if (!is_transmit)
   8540 			ifp->if_flags |= IFF_OACTIVE;
   8541 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8542 		WM_Q_EVCNT_INCR(txq, descdrop);
   8543 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8544 			__func__));
   8545 		m_freem(m0);
   8546 	}
   8547 
   8548 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8549 		/* No more slots; notify upper layer. */
   8550 		if (!is_transmit)
   8551 			ifp->if_flags |= IFF_OACTIVE;
   8552 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8553 	}
   8554 
   8555 	if (sent) {
   8556 		/* Set a watchdog timer in case the chip flakes out. */
   8557 		txq->txq_lastsent = time_uptime;
   8558 		txq->txq_sending = true;
   8559 	}
   8560 }
   8561 
   8562 static void
   8563 wm_deferred_start_locked(struct wm_txqueue *txq)
   8564 {
   8565 	struct wm_softc *sc = txq->txq_sc;
   8566 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8567 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8568 	int qid = wmq->wmq_id;
   8569 
   8570 	KASSERT(mutex_owned(txq->txq_lock));
   8571 
   8572 	if (txq->txq_stopping) {
   8573 		mutex_exit(txq->txq_lock);
   8574 		return;
   8575 	}
   8576 
   8577 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8578 		/* XXX need for ALTQ or one CPU system */
   8579 		if (qid == 0)
   8580 			wm_nq_start_locked(ifp);
   8581 		wm_nq_transmit_locked(ifp, txq);
   8582 	} else {
   8583 		/* XXX need for ALTQ or one CPU system */
   8584 		if (qid == 0)
   8585 			wm_start_locked(ifp);
   8586 		wm_transmit_locked(ifp, txq);
   8587 	}
   8588 }
   8589 
   8590 /* Interrupt */
   8591 
   8592 /*
   8593  * wm_txeof:
   8594  *
   8595  *	Helper; handle transmit interrupts.
   8596  */
   8597 static bool
   8598 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8599 {
   8600 	struct wm_softc *sc = txq->txq_sc;
   8601 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8602 	struct wm_txsoft *txs;
   8603 	int count = 0;
   8604 	int i;
   8605 	uint8_t status;
   8606 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8607 	bool more = false;
   8608 
   8609 	KASSERT(mutex_owned(txq->txq_lock));
   8610 
   8611 	if (txq->txq_stopping)
   8612 		return false;
   8613 
   8614 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8615 	/* For ALTQ and legacy(not use multiqueue) ethernet controller */
   8616 	if (wmq->wmq_id == 0)
   8617 		ifp->if_flags &= ~IFF_OACTIVE;
   8618 
   8619 	/*
   8620 	 * Go through the Tx list and free mbufs for those
   8621 	 * frames which have been transmitted.
   8622 	 */
   8623 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8624 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8625 		if (limit-- == 0) {
   8626 			more = true;
   8627 			DPRINTF(WM_DEBUG_TX,
   8628 			    ("%s: TX: loop limited, job %d is not processed\n",
   8629 				device_xname(sc->sc_dev), i));
   8630 			break;
   8631 		}
   8632 
   8633 		txs = &txq->txq_soft[i];
   8634 
   8635 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8636 			device_xname(sc->sc_dev), i));
   8637 
   8638 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8639 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8640 
   8641 		status =
   8642 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8643 		if ((status & WTX_ST_DD) == 0) {
   8644 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8645 			    BUS_DMASYNC_PREREAD);
   8646 			break;
   8647 		}
   8648 
   8649 		count++;
   8650 		DPRINTF(WM_DEBUG_TX,
   8651 		    ("%s: TX: job %d done: descs %d..%d\n",
   8652 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8653 		    txs->txs_lastdesc));
   8654 
   8655 		/*
   8656 		 * XXX We should probably be using the statistics
   8657 		 * XXX registers, but I don't know if they exist
   8658 		 * XXX on chips before the i82544.
   8659 		 */
   8660 
   8661 #ifdef WM_EVENT_COUNTERS
   8662 		if (status & WTX_ST_TU)
   8663 			WM_Q_EVCNT_INCR(txq, underrun);
   8664 #endif /* WM_EVENT_COUNTERS */
   8665 
   8666 		/*
   8667 		 * 82574 and newer's document says the status field has neither
   8668 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8669 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8670 		 * Developer's Manual", 82574 datasheet and newer.
   8671 		 *
   8672 		 * XXX I saw the LC bit was set on I218 even though the media
   8673 		 * was full duplex, so the bit might be used for other
   8674 		 * meaning ...(I have no document).
   8675 		 */
   8676 
   8677 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8678 		    && ((sc->sc_type < WM_T_82574)
   8679 			|| (sc->sc_type == WM_T_80003))) {
   8680 			if_statinc(ifp, if_oerrors);
   8681 			if (status & WTX_ST_LC)
   8682 				log(LOG_WARNING, "%s: late collision\n",
   8683 				    device_xname(sc->sc_dev));
   8684 			else if (status & WTX_ST_EC) {
   8685 				if_statadd(ifp, if_collisions,
   8686 				    TX_COLLISION_THRESHOLD + 1);
   8687 				log(LOG_WARNING, "%s: excessive collisions\n",
   8688 				    device_xname(sc->sc_dev));
   8689 			}
   8690 		} else
   8691 			if_statinc(ifp, if_opackets);
   8692 
   8693 		txq->txq_packets++;
   8694 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8695 
   8696 		txq->txq_free += txs->txs_ndesc;
   8697 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8698 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8699 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8700 		m_freem(txs->txs_mbuf);
   8701 		txs->txs_mbuf = NULL;
   8702 	}
   8703 
   8704 	/* Update the dirty transmit buffer pointer. */
   8705 	txq->txq_sdirty = i;
   8706 	DPRINTF(WM_DEBUG_TX,
   8707 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8708 
   8709 	/*
   8710 	 * If there are no more pending transmissions, cancel the watchdog
   8711 	 * timer.
   8712 	 */
   8713 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8714 		txq->txq_sending = false;
   8715 
   8716 	return more;
   8717 }
   8718 
   8719 static inline uint32_t
   8720 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8721 {
   8722 	struct wm_softc *sc = rxq->rxq_sc;
   8723 
   8724 	if (sc->sc_type == WM_T_82574)
   8725 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8726 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8727 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8728 	else
   8729 		return rxq->rxq_descs[idx].wrx_status;
   8730 }
   8731 
   8732 static inline uint32_t
   8733 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8734 {
   8735 	struct wm_softc *sc = rxq->rxq_sc;
   8736 
   8737 	if (sc->sc_type == WM_T_82574)
   8738 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8739 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8740 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8741 	else
   8742 		return rxq->rxq_descs[idx].wrx_errors;
   8743 }
   8744 
   8745 static inline uint16_t
   8746 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8747 {
   8748 	struct wm_softc *sc = rxq->rxq_sc;
   8749 
   8750 	if (sc->sc_type == WM_T_82574)
   8751 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8752 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8753 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8754 	else
   8755 		return rxq->rxq_descs[idx].wrx_special;
   8756 }
   8757 
   8758 static inline int
   8759 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8760 {
   8761 	struct wm_softc *sc = rxq->rxq_sc;
   8762 
   8763 	if (sc->sc_type == WM_T_82574)
   8764 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8765 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8766 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8767 	else
   8768 		return rxq->rxq_descs[idx].wrx_len;
   8769 }
   8770 
   8771 #ifdef WM_DEBUG
   8772 static inline uint32_t
   8773 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8774 {
   8775 	struct wm_softc *sc = rxq->rxq_sc;
   8776 
   8777 	if (sc->sc_type == WM_T_82574)
   8778 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8779 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8780 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8781 	else
   8782 		return 0;
   8783 }
   8784 
   8785 static inline uint8_t
   8786 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8787 {
   8788 	struct wm_softc *sc = rxq->rxq_sc;
   8789 
   8790 	if (sc->sc_type == WM_T_82574)
   8791 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8792 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8793 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8794 	else
   8795 		return 0;
   8796 }
   8797 #endif /* WM_DEBUG */
   8798 
   8799 static inline bool
   8800 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8801     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8802 {
   8803 
   8804 	if (sc->sc_type == WM_T_82574)
   8805 		return (status & ext_bit) != 0;
   8806 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8807 		return (status & nq_bit) != 0;
   8808 	else
   8809 		return (status & legacy_bit) != 0;
   8810 }
   8811 
   8812 static inline bool
   8813 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8814     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8815 {
   8816 
   8817 	if (sc->sc_type == WM_T_82574)
   8818 		return (error & ext_bit) != 0;
   8819 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8820 		return (error & nq_bit) != 0;
   8821 	else
   8822 		return (error & legacy_bit) != 0;
   8823 }
   8824 
   8825 static inline bool
   8826 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8827 {
   8828 
   8829 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8830 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8831 		return true;
   8832 	else
   8833 		return false;
   8834 }
   8835 
   8836 static inline bool
   8837 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8838 {
   8839 	struct wm_softc *sc = rxq->rxq_sc;
   8840 
   8841 	/* XXX missing error bit for newqueue? */
   8842 	if (wm_rxdesc_is_set_error(sc, errors,
   8843 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8844 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8845 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8846 		NQRXC_ERROR_RXE)) {
   8847 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8848 		    EXTRXC_ERROR_SE, 0))
   8849 			log(LOG_WARNING, "%s: symbol error\n",
   8850 			    device_xname(sc->sc_dev));
   8851 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8852 		    EXTRXC_ERROR_SEQ, 0))
   8853 			log(LOG_WARNING, "%s: receive sequence error\n",
   8854 			    device_xname(sc->sc_dev));
   8855 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8856 		    EXTRXC_ERROR_CE, 0))
   8857 			log(LOG_WARNING, "%s: CRC error\n",
   8858 			    device_xname(sc->sc_dev));
   8859 		return true;
   8860 	}
   8861 
   8862 	return false;
   8863 }
   8864 
   8865 static inline bool
   8866 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8867 {
   8868 	struct wm_softc *sc = rxq->rxq_sc;
   8869 
   8870 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8871 		NQRXC_STATUS_DD)) {
   8872 		/* We have processed all of the receive descriptors. */
   8873 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8874 		return false;
   8875 	}
   8876 
   8877 	return true;
   8878 }
   8879 
   8880 static inline bool
   8881 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8882     uint16_t vlantag, struct mbuf *m)
   8883 {
   8884 
   8885 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8886 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8887 		vlan_set_tag(m, le16toh(vlantag));
   8888 	}
   8889 
   8890 	return true;
   8891 }
   8892 
   8893 static inline void
   8894 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8895     uint32_t errors, struct mbuf *m)
   8896 {
   8897 	struct wm_softc *sc = rxq->rxq_sc;
   8898 
   8899 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8900 		if (wm_rxdesc_is_set_status(sc, status,
   8901 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8902 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8903 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8904 			if (wm_rxdesc_is_set_error(sc, errors,
   8905 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8906 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8907 		}
   8908 		if (wm_rxdesc_is_set_status(sc, status,
   8909 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8910 			/*
   8911 			 * Note: we don't know if this was TCP or UDP,
   8912 			 * so we just set both bits, and expect the
   8913 			 * upper layers to deal.
   8914 			 */
   8915 			WM_Q_EVCNT_INCR(rxq, tusum);
   8916 			m->m_pkthdr.csum_flags |=
   8917 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8918 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8919 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8920 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8921 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8922 		}
   8923 	}
   8924 }
   8925 
   8926 /*
   8927  * wm_rxeof:
   8928  *
   8929  *	Helper; handle receive interrupts.
   8930  */
   8931 static bool
   8932 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8933 {
   8934 	struct wm_softc *sc = rxq->rxq_sc;
   8935 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8936 	struct wm_rxsoft *rxs;
   8937 	struct mbuf *m;
   8938 	int i, len;
   8939 	int count = 0;
   8940 	uint32_t status, errors;
   8941 	uint16_t vlantag;
   8942 	bool more = false;
   8943 
   8944 	KASSERT(mutex_owned(rxq->rxq_lock));
   8945 
   8946 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8947 		if (limit-- == 0) {
   8948 			rxq->rxq_ptr = i;
   8949 			more = true;
   8950 			DPRINTF(WM_DEBUG_RX,
   8951 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8952 				device_xname(sc->sc_dev), i));
   8953 			break;
   8954 		}
   8955 
   8956 		rxs = &rxq->rxq_soft[i];
   8957 
   8958 		DPRINTF(WM_DEBUG_RX,
   8959 		    ("%s: RX: checking descriptor %d\n",
   8960 			device_xname(sc->sc_dev), i));
   8961 		wm_cdrxsync(rxq, i,
   8962 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8963 
   8964 		status = wm_rxdesc_get_status(rxq, i);
   8965 		errors = wm_rxdesc_get_errors(rxq, i);
   8966 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8967 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8968 #ifdef WM_DEBUG
   8969 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8970 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8971 #endif
   8972 
   8973 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8974 			/*
   8975 			 * Update the receive pointer holding rxq_lock
   8976 			 * consistent with increment counter.
   8977 			 */
   8978 			rxq->rxq_ptr = i;
   8979 			break;
   8980 		}
   8981 
   8982 		count++;
   8983 		if (__predict_false(rxq->rxq_discard)) {
   8984 			DPRINTF(WM_DEBUG_RX,
   8985 			    ("%s: RX: discarding contents of descriptor %d\n",
   8986 				device_xname(sc->sc_dev), i));
   8987 			wm_init_rxdesc(rxq, i);
   8988 			if (wm_rxdesc_is_eop(rxq, status)) {
   8989 				/* Reset our state. */
   8990 				DPRINTF(WM_DEBUG_RX,
   8991 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8992 					device_xname(sc->sc_dev)));
   8993 				rxq->rxq_discard = 0;
   8994 			}
   8995 			continue;
   8996 		}
   8997 
   8998 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8999 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9000 
   9001 		m = rxs->rxs_mbuf;
   9002 
   9003 		/*
   9004 		 * Add a new receive buffer to the ring, unless of
   9005 		 * course the length is zero. Treat the latter as a
   9006 		 * failed mapping.
   9007 		 */
   9008 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9009 			/*
   9010 			 * Failed, throw away what we've done so
   9011 			 * far, and discard the rest of the packet.
   9012 			 */
   9013 			if_statinc(ifp, if_ierrors);
   9014 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9015 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9016 			wm_init_rxdesc(rxq, i);
   9017 			if (!wm_rxdesc_is_eop(rxq, status))
   9018 				rxq->rxq_discard = 1;
   9019 			if (rxq->rxq_head != NULL)
   9020 				m_freem(rxq->rxq_head);
   9021 			WM_RXCHAIN_RESET(rxq);
   9022 			DPRINTF(WM_DEBUG_RX,
   9023 			    ("%s: RX: Rx buffer allocation failed, "
   9024 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9025 				rxq->rxq_discard ? " (discard)" : ""));
   9026 			continue;
   9027 		}
   9028 
   9029 		m->m_len = len;
   9030 		rxq->rxq_len += len;
   9031 		DPRINTF(WM_DEBUG_RX,
   9032 		    ("%s: RX: buffer at %p len %d\n",
   9033 			device_xname(sc->sc_dev), m->m_data, len));
   9034 
   9035 		/* If this is not the end of the packet, keep looking. */
   9036 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9037 			WM_RXCHAIN_LINK(rxq, m);
   9038 			DPRINTF(WM_DEBUG_RX,
   9039 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9040 				device_xname(sc->sc_dev), rxq->rxq_len));
   9041 			continue;
   9042 		}
   9043 
   9044 		/*
   9045 		 * Okay, we have the entire packet now. The chip is
   9046 		 * configured to include the FCS except I350 and I21[01]
   9047 		 * (not all chips can be configured to strip it),
   9048 		 * so we need to trim it.
   9049 		 * May need to adjust length of previous mbuf in the
   9050 		 * chain if the current mbuf is too short.
   9051 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   9052 		 * is always set in I350, so we don't trim it.
   9053 		 */
   9054 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   9055 		    && (sc->sc_type != WM_T_I210)
   9056 		    && (sc->sc_type != WM_T_I211)) {
   9057 			if (m->m_len < ETHER_CRC_LEN) {
   9058 				rxq->rxq_tail->m_len
   9059 				    -= (ETHER_CRC_LEN - m->m_len);
   9060 				m->m_len = 0;
   9061 			} else
   9062 				m->m_len -= ETHER_CRC_LEN;
   9063 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9064 		} else
   9065 			len = rxq->rxq_len;
   9066 
   9067 		WM_RXCHAIN_LINK(rxq, m);
   9068 
   9069 		*rxq->rxq_tailp = NULL;
   9070 		m = rxq->rxq_head;
   9071 
   9072 		WM_RXCHAIN_RESET(rxq);
   9073 
   9074 		DPRINTF(WM_DEBUG_RX,
   9075 		    ("%s: RX: have entire packet, len -> %d\n",
   9076 			device_xname(sc->sc_dev), len));
   9077 
   9078 		/* If an error occurred, update stats and drop the packet. */
   9079 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9080 			m_freem(m);
   9081 			continue;
   9082 		}
   9083 
   9084 		/* No errors.  Receive the packet. */
   9085 		m_set_rcvif(m, ifp);
   9086 		m->m_pkthdr.len = len;
   9087 		/*
   9088 		 * TODO
   9089 		 * should be save rsshash and rsstype to this mbuf.
   9090 		 */
   9091 		DPRINTF(WM_DEBUG_RX,
   9092 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9093 			device_xname(sc->sc_dev), rsstype, rsshash));
   9094 
   9095 		/*
   9096 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9097 		 * for us.  Associate the tag with the packet.
   9098 		 */
   9099 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9100 			continue;
   9101 
   9102 		/* Set up checksum info for this packet. */
   9103 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9104 		/*
   9105 		 * Update the receive pointer holding rxq_lock consistent with
   9106 		 * increment counter.
   9107 		 */
   9108 		rxq->rxq_ptr = i;
   9109 		rxq->rxq_packets++;
   9110 		rxq->rxq_bytes += len;
   9111 		mutex_exit(rxq->rxq_lock);
   9112 
   9113 		/* Pass it on. */
   9114 		if_percpuq_enqueue(sc->sc_ipq, m);
   9115 
   9116 		mutex_enter(rxq->rxq_lock);
   9117 
   9118 		if (rxq->rxq_stopping)
   9119 			break;
   9120 	}
   9121 
   9122 	DPRINTF(WM_DEBUG_RX,
   9123 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9124 
   9125 	return more;
   9126 }
   9127 
   9128 /*
   9129  * wm_linkintr_gmii:
   9130  *
   9131  *	Helper; handle link interrupts for GMII.
   9132  */
   9133 static void
   9134 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9135 {
   9136 	device_t dev = sc->sc_dev;
   9137 	uint32_t status, reg;
   9138 	bool link;
   9139 	int rv;
   9140 
   9141 	KASSERT(WM_CORE_LOCKED(sc));
   9142 
   9143 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9144 		__func__));
   9145 
   9146 	if ((icr & ICR_LSC) == 0) {
   9147 		if (icr & ICR_RXSEQ)
   9148 			DPRINTF(WM_DEBUG_LINK,
   9149 			    ("%s: LINK Receive sequence error\n",
   9150 				device_xname(dev)));
   9151 		return;
   9152 	}
   9153 
   9154 	/* Link status changed */
   9155 	status = CSR_READ(sc, WMREG_STATUS);
   9156 	link = status & STATUS_LU;
   9157 	if (link) {
   9158 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9159 			device_xname(dev),
   9160 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9161 	} else {
   9162 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9163 			device_xname(dev)));
   9164 	}
   9165 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9166 		wm_gig_downshift_workaround_ich8lan(sc);
   9167 
   9168 	if ((sc->sc_type == WM_T_ICH8)
   9169 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9170 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9171 	}
   9172 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9173 		device_xname(dev)));
   9174 	mii_pollstat(&sc->sc_mii);
   9175 	if (sc->sc_type == WM_T_82543) {
   9176 		int miistatus, active;
   9177 
   9178 		/*
   9179 		 * With 82543, we need to force speed and
   9180 		 * duplex on the MAC equal to what the PHY
   9181 		 * speed and duplex configuration is.
   9182 		 */
   9183 		miistatus = sc->sc_mii.mii_media_status;
   9184 
   9185 		if (miistatus & IFM_ACTIVE) {
   9186 			active = sc->sc_mii.mii_media_active;
   9187 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9188 			switch (IFM_SUBTYPE(active)) {
   9189 			case IFM_10_T:
   9190 				sc->sc_ctrl |= CTRL_SPEED_10;
   9191 				break;
   9192 			case IFM_100_TX:
   9193 				sc->sc_ctrl |= CTRL_SPEED_100;
   9194 				break;
   9195 			case IFM_1000_T:
   9196 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9197 				break;
   9198 			default:
   9199 				/*
   9200 				 * Fiber?
   9201 				 * Shoud not enter here.
   9202 				 */
   9203 				device_printf(dev, "unknown media (%x)\n",
   9204 				    active);
   9205 				break;
   9206 			}
   9207 			if (active & IFM_FDX)
   9208 				sc->sc_ctrl |= CTRL_FD;
   9209 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9210 		}
   9211 	} else if (sc->sc_type == WM_T_PCH) {
   9212 		wm_k1_gig_workaround_hv(sc,
   9213 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9214 	}
   9215 
   9216 	/*
   9217 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9218 	 * aggressive resulting in many collisions. To avoid this, increase
   9219 	 * the IPG and reduce Rx latency in the PHY.
   9220 	 */
   9221 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9222 	    && link) {
   9223 		uint32_t tipg_reg;
   9224 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9225 		bool fdx;
   9226 		uint16_t emi_addr, emi_val;
   9227 
   9228 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9229 		tipg_reg &= ~TIPG_IPGT_MASK;
   9230 		fdx = status & STATUS_FD;
   9231 
   9232 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9233 			tipg_reg |= 0xff;
   9234 			/* Reduce Rx latency in analog PHY */
   9235 			emi_val = 0;
   9236 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9237 		    fdx && speed != STATUS_SPEED_1000) {
   9238 			tipg_reg |= 0xc;
   9239 			emi_val = 1;
   9240 		} else {
   9241 			/* Roll back the default values */
   9242 			tipg_reg |= 0x08;
   9243 			emi_val = 1;
   9244 		}
   9245 
   9246 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9247 
   9248 		rv = sc->phy.acquire(sc);
   9249 		if (rv)
   9250 			return;
   9251 
   9252 		if (sc->sc_type == WM_T_PCH2)
   9253 			emi_addr = I82579_RX_CONFIG;
   9254 		else
   9255 			emi_addr = I217_RX_CONFIG;
   9256 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9257 
   9258 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9259 			uint16_t phy_reg;
   9260 
   9261 			sc->phy.readreg_locked(dev, 2,
   9262 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9263 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9264 			if (speed == STATUS_SPEED_100
   9265 			    || speed == STATUS_SPEED_10)
   9266 				phy_reg |= 0x3e8;
   9267 			else
   9268 				phy_reg |= 0xfa;
   9269 			sc->phy.writereg_locked(dev, 2,
   9270 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9271 
   9272 			if (speed == STATUS_SPEED_1000) {
   9273 				sc->phy.readreg_locked(dev, 2,
   9274 				    HV_PM_CTRL, &phy_reg);
   9275 
   9276 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9277 
   9278 				sc->phy.writereg_locked(dev, 2,
   9279 				    HV_PM_CTRL, phy_reg);
   9280 			}
   9281 		}
   9282 		sc->phy.release(sc);
   9283 
   9284 		if (rv)
   9285 			return;
   9286 
   9287 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9288 			uint16_t data, ptr_gap;
   9289 
   9290 			if (speed == STATUS_SPEED_1000) {
   9291 				rv = sc->phy.acquire(sc);
   9292 				if (rv)
   9293 					return;
   9294 
   9295 				rv = sc->phy.readreg_locked(dev, 2,
   9296 				    I219_UNKNOWN1, &data);
   9297 				if (rv) {
   9298 					sc->phy.release(sc);
   9299 					return;
   9300 				}
   9301 
   9302 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9303 				if (ptr_gap < 0x18) {
   9304 					data &= ~(0x3ff << 2);
   9305 					data |= (0x18 << 2);
   9306 					rv = sc->phy.writereg_locked(dev,
   9307 					    2, I219_UNKNOWN1, data);
   9308 				}
   9309 				sc->phy.release(sc);
   9310 				if (rv)
   9311 					return;
   9312 			} else {
   9313 				rv = sc->phy.acquire(sc);
   9314 				if (rv)
   9315 					return;
   9316 
   9317 				rv = sc->phy.writereg_locked(dev, 2,
   9318 				    I219_UNKNOWN1, 0xc023);
   9319 				sc->phy.release(sc);
   9320 				if (rv)
   9321 					return;
   9322 
   9323 			}
   9324 		}
   9325 	}
   9326 
   9327 	/*
   9328 	 * I217 Packet Loss issue:
   9329 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9330 	 * on power up.
   9331 	 * Set the Beacon Duration for I217 to 8 usec
   9332 	 */
   9333 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9334 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9335 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9336 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9337 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9338 	}
   9339 
   9340 	/* Work-around I218 hang issue */
   9341 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9342 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9343 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9344 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9345 		wm_k1_workaround_lpt_lp(sc, link);
   9346 
   9347 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9348 		/*
   9349 		 * Set platform power management values for Latency
   9350 		 * Tolerance Reporting (LTR)
   9351 		 */
   9352 		wm_platform_pm_pch_lpt(sc,
   9353 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9354 	}
   9355 
   9356 	/* Clear link partner's EEE ability */
   9357 	sc->eee_lp_ability = 0;
   9358 
   9359 	/* FEXTNVM6 K1-off workaround */
   9360 	if (sc->sc_type == WM_T_PCH_SPT) {
   9361 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9362 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9363 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9364 		else
   9365 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9366 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9367 	}
   9368 
   9369 	if (!link)
   9370 		return;
   9371 
   9372 	switch (sc->sc_type) {
   9373 	case WM_T_PCH2:
   9374 		wm_k1_workaround_lv(sc);
   9375 		/* FALLTHROUGH */
   9376 	case WM_T_PCH:
   9377 		if (sc->sc_phytype == WMPHY_82578)
   9378 			wm_link_stall_workaround_hv(sc);
   9379 		break;
   9380 	default:
   9381 		break;
   9382 	}
   9383 
   9384 	/* Enable/Disable EEE after link up */
   9385 	if (sc->sc_phytype > WMPHY_82579)
   9386 		wm_set_eee_pchlan(sc);
   9387 }
   9388 
   9389 /*
   9390  * wm_linkintr_tbi:
   9391  *
   9392  *	Helper; handle link interrupts for TBI mode.
   9393  */
   9394 static void
   9395 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9396 {
   9397 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9398 	uint32_t status;
   9399 
   9400 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9401 		__func__));
   9402 
   9403 	status = CSR_READ(sc, WMREG_STATUS);
   9404 	if (icr & ICR_LSC) {
   9405 		wm_check_for_link(sc);
   9406 		if (status & STATUS_LU) {
   9407 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9408 				device_xname(sc->sc_dev),
   9409 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9410 			/*
   9411 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9412 			 * so we should update sc->sc_ctrl
   9413 			 */
   9414 
   9415 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9416 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9417 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9418 			if (status & STATUS_FD)
   9419 				sc->sc_tctl |=
   9420 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9421 			else
   9422 				sc->sc_tctl |=
   9423 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9424 			if (sc->sc_ctrl & CTRL_TFCE)
   9425 				sc->sc_fcrtl |= FCRTL_XONE;
   9426 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9427 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9428 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9429 			sc->sc_tbi_linkup = 1;
   9430 			if_link_state_change(ifp, LINK_STATE_UP);
   9431 		} else {
   9432 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9433 				device_xname(sc->sc_dev)));
   9434 			sc->sc_tbi_linkup = 0;
   9435 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9436 		}
   9437 		/* Update LED */
   9438 		wm_tbi_serdes_set_linkled(sc);
   9439 	} else if (icr & ICR_RXSEQ)
   9440 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9441 			device_xname(sc->sc_dev)));
   9442 }
   9443 
   9444 /*
   9445  * wm_linkintr_serdes:
   9446  *
   9447  *	Helper; handle link interrupts for TBI mode.
   9448  */
   9449 static void
   9450 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9451 {
   9452 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9453 	struct mii_data *mii = &sc->sc_mii;
   9454 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9455 	uint32_t pcs_adv, pcs_lpab, reg;
   9456 
   9457 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9458 		__func__));
   9459 
   9460 	if (icr & ICR_LSC) {
   9461 		/* Check PCS */
   9462 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9463 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9464 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9465 				device_xname(sc->sc_dev)));
   9466 			mii->mii_media_status |= IFM_ACTIVE;
   9467 			sc->sc_tbi_linkup = 1;
   9468 			if_link_state_change(ifp, LINK_STATE_UP);
   9469 		} else {
   9470 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9471 				device_xname(sc->sc_dev)));
   9472 			mii->mii_media_status |= IFM_NONE;
   9473 			sc->sc_tbi_linkup = 0;
   9474 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9475 			wm_tbi_serdes_set_linkled(sc);
   9476 			return;
   9477 		}
   9478 		mii->mii_media_active |= IFM_1000_SX;
   9479 		if ((reg & PCS_LSTS_FDX) != 0)
   9480 			mii->mii_media_active |= IFM_FDX;
   9481 		else
   9482 			mii->mii_media_active |= IFM_HDX;
   9483 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9484 			/* Check flow */
   9485 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9486 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9487 				DPRINTF(WM_DEBUG_LINK,
   9488 				    ("XXX LINKOK but not ACOMP\n"));
   9489 				return;
   9490 			}
   9491 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9492 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9493 			DPRINTF(WM_DEBUG_LINK,
   9494 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9495 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9496 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9497 				mii->mii_media_active |= IFM_FLOW
   9498 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9499 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9500 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9501 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9502 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9503 				mii->mii_media_active |= IFM_FLOW
   9504 				    | IFM_ETH_TXPAUSE;
   9505 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9506 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9507 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9508 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9509 				mii->mii_media_active |= IFM_FLOW
   9510 				    | IFM_ETH_RXPAUSE;
   9511 		}
   9512 		/* Update LED */
   9513 		wm_tbi_serdes_set_linkled(sc);
   9514 	} else
   9515 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9516 		    device_xname(sc->sc_dev)));
   9517 }
   9518 
   9519 /*
   9520  * wm_linkintr:
   9521  *
   9522  *	Helper; handle link interrupts.
   9523  */
   9524 static void
   9525 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9526 {
   9527 
   9528 	KASSERT(WM_CORE_LOCKED(sc));
   9529 
   9530 	if (sc->sc_flags & WM_F_HAS_MII)
   9531 		wm_linkintr_gmii(sc, icr);
   9532 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9533 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9534 		wm_linkintr_serdes(sc, icr);
   9535 	else
   9536 		wm_linkintr_tbi(sc, icr);
   9537 }
   9538 
   9539 
   9540 static inline void
   9541 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   9542 {
   9543 
   9544 	if (wmq->wmq_txrx_use_workqueue)
   9545 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   9546 	else
   9547 		softint_schedule(wmq->wmq_si);
   9548 }
   9549 
   9550 /*
   9551  * wm_intr_legacy:
   9552  *
   9553  *	Interrupt service routine for INTx and MSI.
   9554  */
   9555 static int
   9556 wm_intr_legacy(void *arg)
   9557 {
   9558 	struct wm_softc *sc = arg;
   9559 	struct wm_queue *wmq = &sc->sc_queue[0];
   9560 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9561 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9562 	uint32_t icr, rndval = 0;
   9563 	int handled = 0;
   9564 
   9565 	while (1 /* CONSTCOND */) {
   9566 		icr = CSR_READ(sc, WMREG_ICR);
   9567 		if ((icr & sc->sc_icr) == 0)
   9568 			break;
   9569 		if (handled == 0)
   9570 			DPRINTF(WM_DEBUG_TX,
   9571 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9572 		if (rndval == 0)
   9573 			rndval = icr;
   9574 
   9575 		mutex_enter(rxq->rxq_lock);
   9576 
   9577 		if (rxq->rxq_stopping) {
   9578 			mutex_exit(rxq->rxq_lock);
   9579 			break;
   9580 		}
   9581 
   9582 		handled = 1;
   9583 
   9584 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9585 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9586 			DPRINTF(WM_DEBUG_RX,
   9587 			    ("%s: RX: got Rx intr 0x%08x\n",
   9588 				device_xname(sc->sc_dev),
   9589 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9590 			WM_Q_EVCNT_INCR(rxq, intr);
   9591 		}
   9592 #endif
   9593 		/*
   9594 		 * wm_rxeof() does *not* call upper layer functions directly,
   9595 		 * as if_percpuq_enqueue() just call softint_schedule().
   9596 		 * So, we can call wm_rxeof() in interrupt context.
   9597 		 */
   9598 		wm_rxeof(rxq, UINT_MAX);
   9599 		/* Fill lower bits with RX index. See below for the upper. */
   9600 		rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
   9601 
   9602 		mutex_exit(rxq->rxq_lock);
   9603 		mutex_enter(txq->txq_lock);
   9604 
   9605 		if (txq->txq_stopping) {
   9606 			mutex_exit(txq->txq_lock);
   9607 			break;
   9608 		}
   9609 
   9610 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9611 		if (icr & ICR_TXDW) {
   9612 			DPRINTF(WM_DEBUG_TX,
   9613 			    ("%s: TX: got TXDW interrupt\n",
   9614 				device_xname(sc->sc_dev)));
   9615 			WM_Q_EVCNT_INCR(txq, txdw);
   9616 		}
   9617 #endif
   9618 		wm_txeof(txq, UINT_MAX);
   9619 		/* Fill upper bits with TX index. See above for the lower. */
   9620 		rndval = txq->txq_next * WM_NRXDESC;
   9621 
   9622 		mutex_exit(txq->txq_lock);
   9623 		WM_CORE_LOCK(sc);
   9624 
   9625 		if (sc->sc_core_stopping) {
   9626 			WM_CORE_UNLOCK(sc);
   9627 			break;
   9628 		}
   9629 
   9630 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9631 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9632 			wm_linkintr(sc, icr);
   9633 		}
   9634 		if ((icr & ICR_GPI(0)) != 0)
   9635 			device_printf(sc->sc_dev, "got module interrupt\n");
   9636 
   9637 		WM_CORE_UNLOCK(sc);
   9638 
   9639 		if (icr & ICR_RXO) {
   9640 #if defined(WM_DEBUG)
   9641 			log(LOG_WARNING, "%s: Receive overrun\n",
   9642 			    device_xname(sc->sc_dev));
   9643 #endif /* defined(WM_DEBUG) */
   9644 		}
   9645 	}
   9646 
   9647 	rnd_add_uint32(&sc->sc_queue[0].rnd_source, rndval);
   9648 
   9649 	if (handled) {
   9650 		/* Try to get more packets going. */
   9651 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9652 		wm_sched_handle_queue(sc, wmq);
   9653 	}
   9654 
   9655 	return handled;
   9656 }
   9657 
   9658 static inline void
   9659 wm_txrxintr_disable(struct wm_queue *wmq)
   9660 {
   9661 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9662 
   9663 	if (sc->sc_type == WM_T_82574)
   9664 		CSR_WRITE(sc, WMREG_IMC,
   9665 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9666 	else if (sc->sc_type == WM_T_82575)
   9667 		CSR_WRITE(sc, WMREG_EIMC,
   9668 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9669 	else
   9670 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9671 }
   9672 
   9673 static inline void
   9674 wm_txrxintr_enable(struct wm_queue *wmq)
   9675 {
   9676 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9677 
   9678 	wm_itrs_calculate(sc, wmq);
   9679 
   9680 	/*
   9681 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9682 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9683 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9684 	 * while each wm_handle_queue(wmq) is runnig.
   9685 	 */
   9686 	if (sc->sc_type == WM_T_82574)
   9687 		CSR_WRITE(sc, WMREG_IMS,
   9688 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9689 	else if (sc->sc_type == WM_T_82575)
   9690 		CSR_WRITE(sc, WMREG_EIMS,
   9691 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9692 	else
   9693 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9694 }
   9695 
   9696 static int
   9697 wm_txrxintr_msix(void *arg)
   9698 {
   9699 	struct wm_queue *wmq = arg;
   9700 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9701 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9702 	struct wm_softc *sc = txq->txq_sc;
   9703 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9704 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9705 	uint32_t rndval = 0;
   9706 	bool txmore;
   9707 	bool rxmore;
   9708 
   9709 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9710 
   9711 	DPRINTF(WM_DEBUG_TX,
   9712 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9713 
   9714 	wm_txrxintr_disable(wmq);
   9715 
   9716 	mutex_enter(txq->txq_lock);
   9717 
   9718 	if (txq->txq_stopping) {
   9719 		mutex_exit(txq->txq_lock);
   9720 		return 0;
   9721 	}
   9722 
   9723 	WM_Q_EVCNT_INCR(txq, txdw);
   9724 	txmore = wm_txeof(txq, txlimit);
   9725 	/* Fill upper bits with TX index. See below for the lower. */
   9726 	rndval = txq->txq_next * WM_NRXDESC;
   9727 	/* wm_deferred start() is done in wm_handle_queue(). */
   9728 	mutex_exit(txq->txq_lock);
   9729 
   9730 	DPRINTF(WM_DEBUG_RX,
   9731 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9732 	mutex_enter(rxq->rxq_lock);
   9733 
   9734 	if (rxq->rxq_stopping) {
   9735 		mutex_exit(rxq->rxq_lock);
   9736 		return 0;
   9737 	}
   9738 
   9739 	WM_Q_EVCNT_INCR(rxq, intr);
   9740 	rxmore = wm_rxeof(rxq, rxlimit);
   9741 
   9742 	/* Fill lower bits with RX index. See above for the upper. */
   9743 	rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
   9744 	mutex_exit(rxq->rxq_lock);
   9745 
   9746 	wm_itrs_writereg(sc, wmq);
   9747 
   9748 	/*
   9749 	 * This function is called in the hardware interrupt context and
   9750 	 * per-CPU, so it's not required to take a lock.
   9751 	 */
   9752 	if (rndval != 0)
   9753 		rnd_add_uint32(&sc->sc_queue[wmq->wmq_id].rnd_source, rndval);
   9754 
   9755 	if (txmore || rxmore) {
   9756 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9757 		wm_sched_handle_queue(sc, wmq);
   9758 	} else
   9759 		wm_txrxintr_enable(wmq);
   9760 
   9761 	return 1;
   9762 }
   9763 
   9764 static void
   9765 wm_handle_queue(void *arg)
   9766 {
   9767 	struct wm_queue *wmq = arg;
   9768 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9769 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9770 	struct wm_softc *sc = txq->txq_sc;
   9771 	u_int txlimit = sc->sc_tx_process_limit;
   9772 	u_int rxlimit = sc->sc_rx_process_limit;
   9773 	bool txmore;
   9774 	bool rxmore;
   9775 
   9776 	mutex_enter(txq->txq_lock);
   9777 	if (txq->txq_stopping) {
   9778 		mutex_exit(txq->txq_lock);
   9779 		return;
   9780 	}
   9781 	txmore = wm_txeof(txq, txlimit);
   9782 	wm_deferred_start_locked(txq);
   9783 	mutex_exit(txq->txq_lock);
   9784 
   9785 	mutex_enter(rxq->rxq_lock);
   9786 	if (rxq->rxq_stopping) {
   9787 		mutex_exit(rxq->rxq_lock);
   9788 		return;
   9789 	}
   9790 	WM_Q_EVCNT_INCR(rxq, defer);
   9791 	rxmore = wm_rxeof(rxq, rxlimit);
   9792 	mutex_exit(rxq->rxq_lock);
   9793 
   9794 	if (txmore || rxmore) {
   9795 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9796 		wm_sched_handle_queue(sc, wmq);
   9797 	} else
   9798 		wm_txrxintr_enable(wmq);
   9799 }
   9800 
   9801 static void
   9802 wm_handle_queue_work(struct work *wk, void *context)
   9803 {
   9804 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   9805 
   9806 	/*
   9807 	 * "enqueued flag" is not required here.
   9808 	 */
   9809 	wm_handle_queue(wmq);
   9810 }
   9811 
   9812 /*
   9813  * wm_linkintr_msix:
   9814  *
   9815  *	Interrupt service routine for link status change for MSI-X.
   9816  */
   9817 static int
   9818 wm_linkintr_msix(void *arg)
   9819 {
   9820 	struct wm_softc *sc = arg;
   9821 	uint32_t reg;
   9822 	bool has_rxo;
   9823 
   9824 	reg = CSR_READ(sc, WMREG_ICR);
   9825 	WM_CORE_LOCK(sc);
   9826 	DPRINTF(WM_DEBUG_LINK,
   9827 	    ("%s: LINK: got link intr. ICR = %08x\n",
   9828 		device_xname(sc->sc_dev), reg));
   9829 
   9830 	if (sc->sc_core_stopping)
   9831 		goto out;
   9832 
   9833 	if ((reg & ICR_LSC) != 0) {
   9834 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9835 		wm_linkintr(sc, ICR_LSC);
   9836 	}
   9837 	if ((reg & ICR_GPI(0)) != 0)
   9838 		device_printf(sc->sc_dev, "got module interrupt\n");
   9839 
   9840 	/*
   9841 	 * XXX 82574 MSI-X mode workaround
   9842 	 *
   9843 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9844 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9845 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9846 	 * interrupts by writing WMREG_ICS to process receive packets.
   9847 	 */
   9848 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9849 #if defined(WM_DEBUG)
   9850 		log(LOG_WARNING, "%s: Receive overrun\n",
   9851 		    device_xname(sc->sc_dev));
   9852 #endif /* defined(WM_DEBUG) */
   9853 
   9854 		has_rxo = true;
   9855 		/*
   9856 		 * The RXO interrupt is very high rate when receive traffic is
   9857 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9858 		 * interrupts. ICR_OTHER will be enabled at the end of
   9859 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9860 		 * ICR_RXQ(1) interrupts.
   9861 		 */
   9862 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9863 
   9864 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9865 	}
   9866 
   9867 
   9868 
   9869 out:
   9870 	WM_CORE_UNLOCK(sc);
   9871 
   9872 	if (sc->sc_type == WM_T_82574) {
   9873 		if (!has_rxo)
   9874 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9875 		else
   9876 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9877 	} else if (sc->sc_type == WM_T_82575)
   9878 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9879 	else
   9880 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9881 
   9882 	return 1;
   9883 }
   9884 
   9885 /*
   9886  * Media related.
   9887  * GMII, SGMII, TBI (and SERDES)
   9888  */
   9889 
   9890 /* Common */
   9891 
   9892 /*
   9893  * wm_tbi_serdes_set_linkled:
   9894  *
   9895  *	Update the link LED on TBI and SERDES devices.
   9896  */
   9897 static void
   9898 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9899 {
   9900 
   9901 	if (sc->sc_tbi_linkup)
   9902 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9903 	else
   9904 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9905 
   9906 	/* 82540 or newer devices are active low */
   9907 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9908 
   9909 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9910 }
   9911 
   9912 /* GMII related */
   9913 
   9914 /*
   9915  * wm_gmii_reset:
   9916  *
   9917  *	Reset the PHY.
   9918  */
   9919 static void
   9920 wm_gmii_reset(struct wm_softc *sc)
   9921 {
   9922 	uint32_t reg;
   9923 	int rv;
   9924 
   9925 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9926 		device_xname(sc->sc_dev), __func__));
   9927 
   9928 	rv = sc->phy.acquire(sc);
   9929 	if (rv != 0) {
   9930 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9931 		    __func__);
   9932 		return;
   9933 	}
   9934 
   9935 	switch (sc->sc_type) {
   9936 	case WM_T_82542_2_0:
   9937 	case WM_T_82542_2_1:
   9938 		/* null */
   9939 		break;
   9940 	case WM_T_82543:
   9941 		/*
   9942 		 * With 82543, we need to force speed and duplex on the MAC
   9943 		 * equal to what the PHY speed and duplex configuration is.
   9944 		 * In addition, we need to perform a hardware reset on the PHY
   9945 		 * to take it out of reset.
   9946 		 */
   9947 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9948 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9949 
   9950 		/* The PHY reset pin is active-low. */
   9951 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9952 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9953 		    CTRL_EXT_SWDPIN(4));
   9954 		reg |= CTRL_EXT_SWDPIO(4);
   9955 
   9956 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9957 		CSR_WRITE_FLUSH(sc);
   9958 		delay(10*1000);
   9959 
   9960 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9961 		CSR_WRITE_FLUSH(sc);
   9962 		delay(150);
   9963 #if 0
   9964 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9965 #endif
   9966 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9967 		break;
   9968 	case WM_T_82544:	/* Reset 10000us */
   9969 	case WM_T_82540:
   9970 	case WM_T_82545:
   9971 	case WM_T_82545_3:
   9972 	case WM_T_82546:
   9973 	case WM_T_82546_3:
   9974 	case WM_T_82541:
   9975 	case WM_T_82541_2:
   9976 	case WM_T_82547:
   9977 	case WM_T_82547_2:
   9978 	case WM_T_82571:	/* Reset 100us */
   9979 	case WM_T_82572:
   9980 	case WM_T_82573:
   9981 	case WM_T_82574:
   9982 	case WM_T_82575:
   9983 	case WM_T_82576:
   9984 	case WM_T_82580:
   9985 	case WM_T_I350:
   9986 	case WM_T_I354:
   9987 	case WM_T_I210:
   9988 	case WM_T_I211:
   9989 	case WM_T_82583:
   9990 	case WM_T_80003:
   9991 		/* Generic reset */
   9992 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9993 		CSR_WRITE_FLUSH(sc);
   9994 		delay(20000);
   9995 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9996 		CSR_WRITE_FLUSH(sc);
   9997 		delay(20000);
   9998 
   9999 		if ((sc->sc_type == WM_T_82541)
   10000 		    || (sc->sc_type == WM_T_82541_2)
   10001 		    || (sc->sc_type == WM_T_82547)
   10002 		    || (sc->sc_type == WM_T_82547_2)) {
   10003 			/* Workaround for igp are done in igp_reset() */
   10004 			/* XXX add code to set LED after phy reset */
   10005 		}
   10006 		break;
   10007 	case WM_T_ICH8:
   10008 	case WM_T_ICH9:
   10009 	case WM_T_ICH10:
   10010 	case WM_T_PCH:
   10011 	case WM_T_PCH2:
   10012 	case WM_T_PCH_LPT:
   10013 	case WM_T_PCH_SPT:
   10014 	case WM_T_PCH_CNP:
   10015 		/* Generic reset */
   10016 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10017 		CSR_WRITE_FLUSH(sc);
   10018 		delay(100);
   10019 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10020 		CSR_WRITE_FLUSH(sc);
   10021 		delay(150);
   10022 		break;
   10023 	default:
   10024 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10025 		    __func__);
   10026 		break;
   10027 	}
   10028 
   10029 	sc->phy.release(sc);
   10030 
   10031 	/* get_cfg_done */
   10032 	wm_get_cfg_done(sc);
   10033 
   10034 	/* Extra setup */
   10035 	switch (sc->sc_type) {
   10036 	case WM_T_82542_2_0:
   10037 	case WM_T_82542_2_1:
   10038 	case WM_T_82543:
   10039 	case WM_T_82544:
   10040 	case WM_T_82540:
   10041 	case WM_T_82545:
   10042 	case WM_T_82545_3:
   10043 	case WM_T_82546:
   10044 	case WM_T_82546_3:
   10045 	case WM_T_82541_2:
   10046 	case WM_T_82547_2:
   10047 	case WM_T_82571:
   10048 	case WM_T_82572:
   10049 	case WM_T_82573:
   10050 	case WM_T_82574:
   10051 	case WM_T_82583:
   10052 	case WM_T_82575:
   10053 	case WM_T_82576:
   10054 	case WM_T_82580:
   10055 	case WM_T_I350:
   10056 	case WM_T_I354:
   10057 	case WM_T_I210:
   10058 	case WM_T_I211:
   10059 	case WM_T_80003:
   10060 		/* Null */
   10061 		break;
   10062 	case WM_T_82541:
   10063 	case WM_T_82547:
   10064 		/* XXX Configure actively LED after PHY reset */
   10065 		break;
   10066 	case WM_T_ICH8:
   10067 	case WM_T_ICH9:
   10068 	case WM_T_ICH10:
   10069 	case WM_T_PCH:
   10070 	case WM_T_PCH2:
   10071 	case WM_T_PCH_LPT:
   10072 	case WM_T_PCH_SPT:
   10073 	case WM_T_PCH_CNP:
   10074 		wm_phy_post_reset(sc);
   10075 		break;
   10076 	default:
   10077 		panic("%s: unknown type\n", __func__);
   10078 		break;
   10079 	}
   10080 }
   10081 
   10082 /*
   10083  * Setup sc_phytype and mii_{read|write}reg.
   10084  *
   10085  *  To identify PHY type, correct read/write function should be selected.
   10086  * To select correct read/write function, PCI ID or MAC type are required
   10087  * without accessing PHY registers.
   10088  *
   10089  *  On the first call of this function, PHY ID is not known yet. Check
   10090  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10091  * result might be incorrect.
   10092  *
   10093  *  In the second call, PHY OUI and model is used to identify PHY type.
   10094  * It might not be perfect because of the lack of compared entry, but it
   10095  * would be better than the first call.
   10096  *
   10097  *  If the detected new result and previous assumption is different,
   10098  * diagnous message will be printed.
   10099  */
   10100 static void
   10101 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10102     uint16_t phy_model)
   10103 {
   10104 	device_t dev = sc->sc_dev;
   10105 	struct mii_data *mii = &sc->sc_mii;
   10106 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10107 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10108 	mii_readreg_t new_readreg;
   10109 	mii_writereg_t new_writereg;
   10110 	bool dodiag = true;
   10111 
   10112 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10113 		device_xname(sc->sc_dev), __func__));
   10114 
   10115 	/*
   10116 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10117 	 * incorrect. So don't print diag output when it's 2nd call.
   10118 	 */
   10119 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10120 		dodiag = false;
   10121 
   10122 	if (mii->mii_readreg == NULL) {
   10123 		/*
   10124 		 *  This is the first call of this function. For ICH and PCH
   10125 		 * variants, it's difficult to determine the PHY access method
   10126 		 * by sc_type, so use the PCI product ID for some devices.
   10127 		 */
   10128 
   10129 		switch (sc->sc_pcidevid) {
   10130 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10131 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10132 			/* 82577 */
   10133 			new_phytype = WMPHY_82577;
   10134 			break;
   10135 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10136 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10137 			/* 82578 */
   10138 			new_phytype = WMPHY_82578;
   10139 			break;
   10140 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10141 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10142 			/* 82579 */
   10143 			new_phytype = WMPHY_82579;
   10144 			break;
   10145 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10146 		case PCI_PRODUCT_INTEL_82801I_BM:
   10147 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10148 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10149 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10150 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10151 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10152 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10153 			/* ICH8, 9, 10 with 82567 */
   10154 			new_phytype = WMPHY_BM;
   10155 			break;
   10156 		default:
   10157 			break;
   10158 		}
   10159 	} else {
   10160 		/* It's not the first call. Use PHY OUI and model */
   10161 		switch (phy_oui) {
   10162 		case MII_OUI_ATTANSIC: /* XXX ??? */
   10163 			switch (phy_model) {
   10164 			case 0x0004: /* XXX */
   10165 				new_phytype = WMPHY_82578;
   10166 				break;
   10167 			default:
   10168 				break;
   10169 			}
   10170 			break;
   10171 		case MII_OUI_xxMARVELL:
   10172 			switch (phy_model) {
   10173 			case MII_MODEL_xxMARVELL_I210:
   10174 				new_phytype = WMPHY_I210;
   10175 				break;
   10176 			case MII_MODEL_xxMARVELL_E1011:
   10177 			case MII_MODEL_xxMARVELL_E1000_3:
   10178 			case MII_MODEL_xxMARVELL_E1000_5:
   10179 			case MII_MODEL_xxMARVELL_E1112:
   10180 				new_phytype = WMPHY_M88;
   10181 				break;
   10182 			case MII_MODEL_xxMARVELL_E1149:
   10183 				new_phytype = WMPHY_BM;
   10184 				break;
   10185 			case MII_MODEL_xxMARVELL_E1111:
   10186 			case MII_MODEL_xxMARVELL_I347:
   10187 			case MII_MODEL_xxMARVELL_E1512:
   10188 			case MII_MODEL_xxMARVELL_E1340M:
   10189 			case MII_MODEL_xxMARVELL_E1543:
   10190 				new_phytype = WMPHY_M88;
   10191 				break;
   10192 			case MII_MODEL_xxMARVELL_I82563:
   10193 				new_phytype = WMPHY_GG82563;
   10194 				break;
   10195 			default:
   10196 				break;
   10197 			}
   10198 			break;
   10199 		case MII_OUI_INTEL:
   10200 			switch (phy_model) {
   10201 			case MII_MODEL_INTEL_I82577:
   10202 				new_phytype = WMPHY_82577;
   10203 				break;
   10204 			case MII_MODEL_INTEL_I82579:
   10205 				new_phytype = WMPHY_82579;
   10206 				break;
   10207 			case MII_MODEL_INTEL_I217:
   10208 				new_phytype = WMPHY_I217;
   10209 				break;
   10210 			case MII_MODEL_INTEL_I82580:
   10211 			case MII_MODEL_INTEL_I350:
   10212 				new_phytype = WMPHY_82580;
   10213 				break;
   10214 			default:
   10215 				break;
   10216 			}
   10217 			break;
   10218 		case MII_OUI_yyINTEL:
   10219 			switch (phy_model) {
   10220 			case MII_MODEL_yyINTEL_I82562G:
   10221 			case MII_MODEL_yyINTEL_I82562EM:
   10222 			case MII_MODEL_yyINTEL_I82562ET:
   10223 				new_phytype = WMPHY_IFE;
   10224 				break;
   10225 			case MII_MODEL_yyINTEL_IGP01E1000:
   10226 				new_phytype = WMPHY_IGP;
   10227 				break;
   10228 			case MII_MODEL_yyINTEL_I82566:
   10229 				new_phytype = WMPHY_IGP_3;
   10230 				break;
   10231 			default:
   10232 				break;
   10233 			}
   10234 			break;
   10235 		default:
   10236 			break;
   10237 		}
   10238 
   10239 		if (dodiag) {
   10240 			if (new_phytype == WMPHY_UNKNOWN)
   10241 				aprint_verbose_dev(dev,
   10242 				    "%s: Unknown PHY model. OUI=%06x, "
   10243 				    "model=%04x\n", __func__, phy_oui,
   10244 				    phy_model);
   10245 
   10246 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10247 			    && (sc->sc_phytype != new_phytype)) {
   10248 				aprint_error_dev(dev, "Previously assumed PHY "
   10249 				    "type(%u) was incorrect. PHY type from PHY"
   10250 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   10251 			}
   10252 		}
   10253 	}
   10254 
   10255 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10256 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10257 		/* SGMII */
   10258 		new_readreg = wm_sgmii_readreg;
   10259 		new_writereg = wm_sgmii_writereg;
   10260 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10261 		/* BM2 (phyaddr == 1) */
   10262 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10263 		    && (new_phytype != WMPHY_BM)
   10264 		    && (new_phytype != WMPHY_UNKNOWN))
   10265 			doubt_phytype = new_phytype;
   10266 		new_phytype = WMPHY_BM;
   10267 		new_readreg = wm_gmii_bm_readreg;
   10268 		new_writereg = wm_gmii_bm_writereg;
   10269 	} else if (sc->sc_type >= WM_T_PCH) {
   10270 		/* All PCH* use _hv_ */
   10271 		new_readreg = wm_gmii_hv_readreg;
   10272 		new_writereg = wm_gmii_hv_writereg;
   10273 	} else if (sc->sc_type >= WM_T_ICH8) {
   10274 		/* non-82567 ICH8, 9 and 10 */
   10275 		new_readreg = wm_gmii_i82544_readreg;
   10276 		new_writereg = wm_gmii_i82544_writereg;
   10277 	} else if (sc->sc_type >= WM_T_80003) {
   10278 		/* 80003 */
   10279 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10280 		    && (new_phytype != WMPHY_GG82563)
   10281 		    && (new_phytype != WMPHY_UNKNOWN))
   10282 			doubt_phytype = new_phytype;
   10283 		new_phytype = WMPHY_GG82563;
   10284 		new_readreg = wm_gmii_i80003_readreg;
   10285 		new_writereg = wm_gmii_i80003_writereg;
   10286 	} else if (sc->sc_type >= WM_T_I210) {
   10287 		/* I210 and I211 */
   10288 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10289 		    && (new_phytype != WMPHY_I210)
   10290 		    && (new_phytype != WMPHY_UNKNOWN))
   10291 			doubt_phytype = new_phytype;
   10292 		new_phytype = WMPHY_I210;
   10293 		new_readreg = wm_gmii_gs40g_readreg;
   10294 		new_writereg = wm_gmii_gs40g_writereg;
   10295 	} else if (sc->sc_type >= WM_T_82580) {
   10296 		/* 82580, I350 and I354 */
   10297 		new_readreg = wm_gmii_82580_readreg;
   10298 		new_writereg = wm_gmii_82580_writereg;
   10299 	} else if (sc->sc_type >= WM_T_82544) {
   10300 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10301 		new_readreg = wm_gmii_i82544_readreg;
   10302 		new_writereg = wm_gmii_i82544_writereg;
   10303 	} else {
   10304 		new_readreg = wm_gmii_i82543_readreg;
   10305 		new_writereg = wm_gmii_i82543_writereg;
   10306 	}
   10307 
   10308 	if (new_phytype == WMPHY_BM) {
   10309 		/* All BM use _bm_ */
   10310 		new_readreg = wm_gmii_bm_readreg;
   10311 		new_writereg = wm_gmii_bm_writereg;
   10312 	}
   10313 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10314 		/* All PCH* use _hv_ */
   10315 		new_readreg = wm_gmii_hv_readreg;
   10316 		new_writereg = wm_gmii_hv_writereg;
   10317 	}
   10318 
   10319 	/* Diag output */
   10320 	if (dodiag) {
   10321 		if (doubt_phytype != WMPHY_UNKNOWN)
   10322 			aprint_error_dev(dev, "Assumed new PHY type was "
   10323 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10324 			    new_phytype);
   10325 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10326 		    && (sc->sc_phytype != new_phytype))
   10327 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10328 			    "was incorrect. New PHY type = %u\n",
   10329 			    sc->sc_phytype, new_phytype);
   10330 
   10331 		if ((mii->mii_readreg != NULL) &&
   10332 		    (new_phytype == WMPHY_UNKNOWN))
   10333 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   10334 
   10335 		if ((mii->mii_readreg != NULL) &&
   10336 		    (mii->mii_readreg != new_readreg))
   10337 			aprint_error_dev(dev, "Previously assumed PHY "
   10338 			    "read/write function was incorrect.\n");
   10339 	}
   10340 
   10341 	/* Update now */
   10342 	sc->sc_phytype = new_phytype;
   10343 	mii->mii_readreg = new_readreg;
   10344 	mii->mii_writereg = new_writereg;
   10345 	if (new_readreg == wm_gmii_hv_readreg) {
   10346 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10347 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10348 	} else if (new_readreg == wm_sgmii_readreg) {
   10349 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10350 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10351 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10352 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10353 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10354 	}
   10355 }
   10356 
   10357 /*
   10358  * wm_get_phy_id_82575:
   10359  *
   10360  * Return PHY ID. Return -1 if it failed.
   10361  */
   10362 static int
   10363 wm_get_phy_id_82575(struct wm_softc *sc)
   10364 {
   10365 	uint32_t reg;
   10366 	int phyid = -1;
   10367 
   10368 	/* XXX */
   10369 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10370 		return -1;
   10371 
   10372 	if (wm_sgmii_uses_mdio(sc)) {
   10373 		switch (sc->sc_type) {
   10374 		case WM_T_82575:
   10375 		case WM_T_82576:
   10376 			reg = CSR_READ(sc, WMREG_MDIC);
   10377 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10378 			break;
   10379 		case WM_T_82580:
   10380 		case WM_T_I350:
   10381 		case WM_T_I354:
   10382 		case WM_T_I210:
   10383 		case WM_T_I211:
   10384 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10385 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10386 			break;
   10387 		default:
   10388 			return -1;
   10389 		}
   10390 	}
   10391 
   10392 	return phyid;
   10393 }
   10394 
   10395 
   10396 /*
   10397  * wm_gmii_mediainit:
   10398  *
   10399  *	Initialize media for use on 1000BASE-T devices.
   10400  */
   10401 static void
   10402 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10403 {
   10404 	device_t dev = sc->sc_dev;
   10405 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10406 	struct mii_data *mii = &sc->sc_mii;
   10407 
   10408 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10409 		device_xname(sc->sc_dev), __func__));
   10410 
   10411 	/* We have GMII. */
   10412 	sc->sc_flags |= WM_F_HAS_MII;
   10413 
   10414 	if (sc->sc_type == WM_T_80003)
   10415 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10416 	else
   10417 		sc->sc_tipg = TIPG_1000T_DFLT;
   10418 
   10419 	/*
   10420 	 * Let the chip set speed/duplex on its own based on
   10421 	 * signals from the PHY.
   10422 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10423 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10424 	 */
   10425 	sc->sc_ctrl |= CTRL_SLU;
   10426 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10427 
   10428 	/* Initialize our media structures and probe the GMII. */
   10429 	mii->mii_ifp = ifp;
   10430 
   10431 	mii->mii_statchg = wm_gmii_statchg;
   10432 
   10433 	/* get PHY control from SMBus to PCIe */
   10434 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10435 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10436 	    || (sc->sc_type == WM_T_PCH_CNP))
   10437 		wm_init_phy_workarounds_pchlan(sc);
   10438 
   10439 	wm_gmii_reset(sc);
   10440 
   10441 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10442 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10443 	    wm_gmii_mediastatus, sc->sc_core_lock);
   10444 
   10445 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10446 	    || (sc->sc_type == WM_T_82580)
   10447 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10448 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10449 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10450 			/* Attach only one port */
   10451 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10452 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10453 		} else {
   10454 			int i, id;
   10455 			uint32_t ctrl_ext;
   10456 
   10457 			id = wm_get_phy_id_82575(sc);
   10458 			if (id != -1) {
   10459 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10460 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10461 			}
   10462 			if ((id == -1)
   10463 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10464 				/* Power on sgmii phy if it is disabled */
   10465 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10466 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10467 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10468 				CSR_WRITE_FLUSH(sc);
   10469 				delay(300*1000); /* XXX too long */
   10470 
   10471 				/*
   10472 				 * From 1 to 8.
   10473 				 *
   10474 				 * I2C access fails with I2C register's ERROR
   10475 				 * bit set, so prevent error message while
   10476 				 * scanning.
   10477 				 */
   10478 				sc->phy.no_errprint = true;
   10479 				for (i = 1; i < 8; i++)
   10480 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10481 					    0xffffffff, i, MII_OFFSET_ANY,
   10482 					    MIIF_DOPAUSE);
   10483 				sc->phy.no_errprint = false;
   10484 
   10485 				/* Restore previous sfp cage power state */
   10486 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10487 			}
   10488 		}
   10489 	} else
   10490 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10491 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10492 
   10493 	/*
   10494 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10495 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10496 	 */
   10497 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10498 		|| (sc->sc_type == WM_T_PCH_SPT)
   10499 		|| (sc->sc_type == WM_T_PCH_CNP))
   10500 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10501 		wm_set_mdio_slow_mode_hv(sc);
   10502 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10503 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10504 	}
   10505 
   10506 	/*
   10507 	 * (For ICH8 variants)
   10508 	 * If PHY detection failed, use BM's r/w function and retry.
   10509 	 */
   10510 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10511 		/* if failed, retry with *_bm_* */
   10512 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10513 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10514 		    sc->sc_phytype);
   10515 		sc->sc_phytype = WMPHY_BM;
   10516 		mii->mii_readreg = wm_gmii_bm_readreg;
   10517 		mii->mii_writereg = wm_gmii_bm_writereg;
   10518 
   10519 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10520 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10521 	}
   10522 
   10523 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10524 		/* Any PHY wasn't find */
   10525 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10526 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10527 		sc->sc_phytype = WMPHY_NONE;
   10528 	} else {
   10529 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10530 
   10531 		/*
   10532 		 * PHY Found! Check PHY type again by the second call of
   10533 		 * wm_gmii_setup_phytype.
   10534 		 */
   10535 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10536 		    child->mii_mpd_model);
   10537 
   10538 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10539 	}
   10540 }
   10541 
   10542 /*
   10543  * wm_gmii_mediachange:	[ifmedia interface function]
   10544  *
   10545  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10546  */
   10547 static int
   10548 wm_gmii_mediachange(struct ifnet *ifp)
   10549 {
   10550 	struct wm_softc *sc = ifp->if_softc;
   10551 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10552 	uint32_t reg;
   10553 	int rc;
   10554 
   10555 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10556 		device_xname(sc->sc_dev), __func__));
   10557 	if ((ifp->if_flags & IFF_UP) == 0)
   10558 		return 0;
   10559 
   10560 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10561 	if ((sc->sc_type == WM_T_82580)
   10562 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10563 	    || (sc->sc_type == WM_T_I211)) {
   10564 		reg = CSR_READ(sc, WMREG_PHPM);
   10565 		reg &= ~PHPM_GO_LINK_D;
   10566 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10567 	}
   10568 
   10569 	/* Disable D0 LPLU. */
   10570 	wm_lplu_d0_disable(sc);
   10571 
   10572 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10573 	sc->sc_ctrl |= CTRL_SLU;
   10574 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10575 	    || (sc->sc_type > WM_T_82543)) {
   10576 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10577 	} else {
   10578 		sc->sc_ctrl &= ~CTRL_ASDE;
   10579 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10580 		if (ife->ifm_media & IFM_FDX)
   10581 			sc->sc_ctrl |= CTRL_FD;
   10582 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10583 		case IFM_10_T:
   10584 			sc->sc_ctrl |= CTRL_SPEED_10;
   10585 			break;
   10586 		case IFM_100_TX:
   10587 			sc->sc_ctrl |= CTRL_SPEED_100;
   10588 			break;
   10589 		case IFM_1000_T:
   10590 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10591 			break;
   10592 		case IFM_NONE:
   10593 			/* There is no specific setting for IFM_NONE */
   10594 			break;
   10595 		default:
   10596 			panic("wm_gmii_mediachange: bad media 0x%x",
   10597 			    ife->ifm_media);
   10598 		}
   10599 	}
   10600 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10601 	CSR_WRITE_FLUSH(sc);
   10602 
   10603 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   10604 		wm_serdes_mediachange(ifp);
   10605 
   10606 	if (sc->sc_type <= WM_T_82543)
   10607 		wm_gmii_reset(sc);
   10608 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   10609 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   10610 		/* allow time for SFP cage time to power up phy */
   10611 		delay(300 * 1000);
   10612 		wm_gmii_reset(sc);
   10613 	}
   10614 
   10615 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10616 		return 0;
   10617 	return rc;
   10618 }
   10619 
   10620 /*
   10621  * wm_gmii_mediastatus:	[ifmedia interface function]
   10622  *
   10623  *	Get the current interface media status on a 1000BASE-T device.
   10624  */
   10625 static void
   10626 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10627 {
   10628 	struct wm_softc *sc = ifp->if_softc;
   10629 
   10630 	ether_mediastatus(ifp, ifmr);
   10631 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10632 	    | sc->sc_flowflags;
   10633 }
   10634 
   10635 #define	MDI_IO		CTRL_SWDPIN(2)
   10636 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10637 #define	MDI_CLK		CTRL_SWDPIN(3)
   10638 
   10639 static void
   10640 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10641 {
   10642 	uint32_t i, v;
   10643 
   10644 	v = CSR_READ(sc, WMREG_CTRL);
   10645 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10646 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10647 
   10648 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10649 		if (data & i)
   10650 			v |= MDI_IO;
   10651 		else
   10652 			v &= ~MDI_IO;
   10653 		CSR_WRITE(sc, WMREG_CTRL, v);
   10654 		CSR_WRITE_FLUSH(sc);
   10655 		delay(10);
   10656 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10657 		CSR_WRITE_FLUSH(sc);
   10658 		delay(10);
   10659 		CSR_WRITE(sc, WMREG_CTRL, v);
   10660 		CSR_WRITE_FLUSH(sc);
   10661 		delay(10);
   10662 	}
   10663 }
   10664 
   10665 static uint16_t
   10666 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10667 {
   10668 	uint32_t v, i;
   10669 	uint16_t data = 0;
   10670 
   10671 	v = CSR_READ(sc, WMREG_CTRL);
   10672 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10673 	v |= CTRL_SWDPIO(3);
   10674 
   10675 	CSR_WRITE(sc, WMREG_CTRL, v);
   10676 	CSR_WRITE_FLUSH(sc);
   10677 	delay(10);
   10678 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10679 	CSR_WRITE_FLUSH(sc);
   10680 	delay(10);
   10681 	CSR_WRITE(sc, WMREG_CTRL, v);
   10682 	CSR_WRITE_FLUSH(sc);
   10683 	delay(10);
   10684 
   10685 	for (i = 0; i < 16; i++) {
   10686 		data <<= 1;
   10687 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10688 		CSR_WRITE_FLUSH(sc);
   10689 		delay(10);
   10690 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10691 			data |= 1;
   10692 		CSR_WRITE(sc, WMREG_CTRL, v);
   10693 		CSR_WRITE_FLUSH(sc);
   10694 		delay(10);
   10695 	}
   10696 
   10697 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10698 	CSR_WRITE_FLUSH(sc);
   10699 	delay(10);
   10700 	CSR_WRITE(sc, WMREG_CTRL, v);
   10701 	CSR_WRITE_FLUSH(sc);
   10702 	delay(10);
   10703 
   10704 	return data;
   10705 }
   10706 
   10707 #undef MDI_IO
   10708 #undef MDI_DIR
   10709 #undef MDI_CLK
   10710 
   10711 /*
   10712  * wm_gmii_i82543_readreg:	[mii interface function]
   10713  *
   10714  *	Read a PHY register on the GMII (i82543 version).
   10715  */
   10716 static int
   10717 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10718 {
   10719 	struct wm_softc *sc = device_private(dev);
   10720 
   10721 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10722 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10723 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10724 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10725 
   10726 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10727 		device_xname(dev), phy, reg, *val));
   10728 
   10729 	return 0;
   10730 }
   10731 
   10732 /*
   10733  * wm_gmii_i82543_writereg:	[mii interface function]
   10734  *
   10735  *	Write a PHY register on the GMII (i82543 version).
   10736  */
   10737 static int
   10738 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10739 {
   10740 	struct wm_softc *sc = device_private(dev);
   10741 
   10742 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10743 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10744 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10745 	    (MII_COMMAND_START << 30), 32);
   10746 
   10747 	return 0;
   10748 }
   10749 
   10750 /*
   10751  * wm_gmii_mdic_readreg:	[mii interface function]
   10752  *
   10753  *	Read a PHY register on the GMII.
   10754  */
   10755 static int
   10756 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10757 {
   10758 	struct wm_softc *sc = device_private(dev);
   10759 	uint32_t mdic = 0;
   10760 	int i;
   10761 
   10762 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10763 	    && (reg > MII_ADDRMASK)) {
   10764 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10765 		    __func__, sc->sc_phytype, reg);
   10766 		reg &= MII_ADDRMASK;
   10767 	}
   10768 
   10769 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10770 	    MDIC_REGADD(reg));
   10771 
   10772 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10773 		delay(50);
   10774 		mdic = CSR_READ(sc, WMREG_MDIC);
   10775 		if (mdic & MDIC_READY)
   10776 			break;
   10777 	}
   10778 
   10779 	if ((mdic & MDIC_READY) == 0) {
   10780 		DPRINTF(WM_DEBUG_GMII,
   10781 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10782 			device_xname(dev), phy, reg));
   10783 		return ETIMEDOUT;
   10784 	} else if (mdic & MDIC_E) {
   10785 		/* This is normal if no PHY is present. */
   10786 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10787 			device_xname(sc->sc_dev), phy, reg));
   10788 		return -1;
   10789 	} else
   10790 		*val = MDIC_DATA(mdic);
   10791 
   10792 	/*
   10793 	 * Allow some time after each MDIC transaction to avoid
   10794 	 * reading duplicate data in the next MDIC transaction.
   10795 	 */
   10796 	if (sc->sc_type == WM_T_PCH2)
   10797 		delay(100);
   10798 
   10799 	return 0;
   10800 }
   10801 
   10802 /*
   10803  * wm_gmii_mdic_writereg:	[mii interface function]
   10804  *
   10805  *	Write a PHY register on the GMII.
   10806  */
   10807 static int
   10808 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10809 {
   10810 	struct wm_softc *sc = device_private(dev);
   10811 	uint32_t mdic = 0;
   10812 	int i;
   10813 
   10814 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10815 	    && (reg > MII_ADDRMASK)) {
   10816 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10817 		    __func__, sc->sc_phytype, reg);
   10818 		reg &= MII_ADDRMASK;
   10819 	}
   10820 
   10821 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10822 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10823 
   10824 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10825 		delay(50);
   10826 		mdic = CSR_READ(sc, WMREG_MDIC);
   10827 		if (mdic & MDIC_READY)
   10828 			break;
   10829 	}
   10830 
   10831 	if ((mdic & MDIC_READY) == 0) {
   10832 		DPRINTF(WM_DEBUG_GMII,
   10833 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10834 			device_xname(dev), phy, reg));
   10835 		return ETIMEDOUT;
   10836 	} else if (mdic & MDIC_E) {
   10837 		DPRINTF(WM_DEBUG_GMII,
   10838 		    ("%s: MDIC write error: phy %d reg %d\n",
   10839 			device_xname(dev), phy, reg));
   10840 		return -1;
   10841 	}
   10842 
   10843 	/*
   10844 	 * Allow some time after each MDIC transaction to avoid
   10845 	 * reading duplicate data in the next MDIC transaction.
   10846 	 */
   10847 	if (sc->sc_type == WM_T_PCH2)
   10848 		delay(100);
   10849 
   10850 	return 0;
   10851 }
   10852 
   10853 /*
   10854  * wm_gmii_i82544_readreg:	[mii interface function]
   10855  *
   10856  *	Read a PHY register on the GMII.
   10857  */
   10858 static int
   10859 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10860 {
   10861 	struct wm_softc *sc = device_private(dev);
   10862 	int rv;
   10863 
   10864 	if (sc->phy.acquire(sc)) {
   10865 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10866 		return -1;
   10867 	}
   10868 
   10869 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10870 
   10871 	sc->phy.release(sc);
   10872 
   10873 	return rv;
   10874 }
   10875 
   10876 static int
   10877 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10878 {
   10879 	struct wm_softc *sc = device_private(dev);
   10880 	int rv;
   10881 
   10882 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10883 		switch (sc->sc_phytype) {
   10884 		case WMPHY_IGP:
   10885 		case WMPHY_IGP_2:
   10886 		case WMPHY_IGP_3:
   10887 			rv = wm_gmii_mdic_writereg(dev, phy,
   10888 			    MII_IGPHY_PAGE_SELECT, reg);
   10889 			if (rv != 0)
   10890 				return rv;
   10891 			break;
   10892 		default:
   10893 #ifdef WM_DEBUG
   10894 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10895 			    __func__, sc->sc_phytype, reg);
   10896 #endif
   10897 			break;
   10898 		}
   10899 	}
   10900 
   10901 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10902 }
   10903 
   10904 /*
   10905  * wm_gmii_i82544_writereg:	[mii interface function]
   10906  *
   10907  *	Write a PHY register on the GMII.
   10908  */
   10909 static int
   10910 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10911 {
   10912 	struct wm_softc *sc = device_private(dev);
   10913 	int rv;
   10914 
   10915 	if (sc->phy.acquire(sc)) {
   10916 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10917 		return -1;
   10918 	}
   10919 
   10920 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10921 	sc->phy.release(sc);
   10922 
   10923 	return rv;
   10924 }
   10925 
   10926 static int
   10927 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10928 {
   10929 	struct wm_softc *sc = device_private(dev);
   10930 	int rv;
   10931 
   10932 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10933 		switch (sc->sc_phytype) {
   10934 		case WMPHY_IGP:
   10935 		case WMPHY_IGP_2:
   10936 		case WMPHY_IGP_3:
   10937 			rv = wm_gmii_mdic_writereg(dev, phy,
   10938 			    MII_IGPHY_PAGE_SELECT, reg);
   10939 			if (rv != 0)
   10940 				return rv;
   10941 			break;
   10942 		default:
   10943 #ifdef WM_DEBUG
   10944 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10945 			    __func__, sc->sc_phytype, reg);
   10946 #endif
   10947 			break;
   10948 		}
   10949 	}
   10950 
   10951 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10952 }
   10953 
   10954 /*
   10955  * wm_gmii_i80003_readreg:	[mii interface function]
   10956  *
   10957  *	Read a PHY register on the kumeran
   10958  * This could be handled by the PHY layer if we didn't have to lock the
   10959  * ressource ...
   10960  */
   10961 static int
   10962 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10963 {
   10964 	struct wm_softc *sc = device_private(dev);
   10965 	int page_select;
   10966 	uint16_t temp, temp2;
   10967 	int rv = 0;
   10968 
   10969 	if (phy != 1) /* Only one PHY on kumeran bus */
   10970 		return -1;
   10971 
   10972 	if (sc->phy.acquire(sc)) {
   10973 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10974 		return -1;
   10975 	}
   10976 
   10977 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10978 		page_select = GG82563_PHY_PAGE_SELECT;
   10979 	else {
   10980 		/*
   10981 		 * Use Alternative Page Select register to access registers
   10982 		 * 30 and 31.
   10983 		 */
   10984 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10985 	}
   10986 	temp = reg >> GG82563_PAGE_SHIFT;
   10987 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10988 		goto out;
   10989 
   10990 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10991 		/*
   10992 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10993 		 * register.
   10994 		 */
   10995 		delay(200);
   10996 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10997 		if ((rv != 0) || (temp2 != temp)) {
   10998 			device_printf(dev, "%s failed\n", __func__);
   10999 			rv = -1;
   11000 			goto out;
   11001 		}
   11002 		delay(200);
   11003 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11004 		delay(200);
   11005 	} else
   11006 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11007 
   11008 out:
   11009 	sc->phy.release(sc);
   11010 	return rv;
   11011 }
   11012 
   11013 /*
   11014  * wm_gmii_i80003_writereg:	[mii interface function]
   11015  *
   11016  *	Write a PHY register on the kumeran.
   11017  * This could be handled by the PHY layer if we didn't have to lock the
   11018  * ressource ...
   11019  */
   11020 static int
   11021 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11022 {
   11023 	struct wm_softc *sc = device_private(dev);
   11024 	int page_select, rv;
   11025 	uint16_t temp, temp2;
   11026 
   11027 	if (phy != 1) /* Only one PHY on kumeran bus */
   11028 		return -1;
   11029 
   11030 	if (sc->phy.acquire(sc)) {
   11031 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11032 		return -1;
   11033 	}
   11034 
   11035 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11036 		page_select = GG82563_PHY_PAGE_SELECT;
   11037 	else {
   11038 		/*
   11039 		 * Use Alternative Page Select register to access registers
   11040 		 * 30 and 31.
   11041 		 */
   11042 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11043 	}
   11044 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11045 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11046 		goto out;
   11047 
   11048 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11049 		/*
   11050 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11051 		 * register.
   11052 		 */
   11053 		delay(200);
   11054 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11055 		if ((rv != 0) || (temp2 != temp)) {
   11056 			device_printf(dev, "%s failed\n", __func__);
   11057 			rv = -1;
   11058 			goto out;
   11059 		}
   11060 		delay(200);
   11061 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11062 		delay(200);
   11063 	} else
   11064 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11065 
   11066 out:
   11067 	sc->phy.release(sc);
   11068 	return rv;
   11069 }
   11070 
   11071 /*
   11072  * wm_gmii_bm_readreg:	[mii interface function]
   11073  *
   11074  *	Read a PHY register on the kumeran
   11075  * This could be handled by the PHY layer if we didn't have to lock the
   11076  * ressource ...
   11077  */
   11078 static int
   11079 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11080 {
   11081 	struct wm_softc *sc = device_private(dev);
   11082 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11083 	int rv;
   11084 
   11085 	if (sc->phy.acquire(sc)) {
   11086 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11087 		return -1;
   11088 	}
   11089 
   11090 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11091 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11092 		    || (reg == 31)) ? 1 : phy;
   11093 	/* Page 800 works differently than the rest so it has its own func */
   11094 	if (page == BM_WUC_PAGE) {
   11095 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11096 		goto release;
   11097 	}
   11098 
   11099 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11100 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11101 		    && (sc->sc_type != WM_T_82583))
   11102 			rv = wm_gmii_mdic_writereg(dev, phy,
   11103 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11104 		else
   11105 			rv = wm_gmii_mdic_writereg(dev, phy,
   11106 			    BME1000_PHY_PAGE_SELECT, page);
   11107 		if (rv != 0)
   11108 			goto release;
   11109 	}
   11110 
   11111 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11112 
   11113 release:
   11114 	sc->phy.release(sc);
   11115 	return rv;
   11116 }
   11117 
   11118 /*
   11119  * wm_gmii_bm_writereg:	[mii interface function]
   11120  *
   11121  *	Write a PHY register on the kumeran.
   11122  * This could be handled by the PHY layer if we didn't have to lock the
   11123  * ressource ...
   11124  */
   11125 static int
   11126 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11127 {
   11128 	struct wm_softc *sc = device_private(dev);
   11129 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11130 	int rv;
   11131 
   11132 	if (sc->phy.acquire(sc)) {
   11133 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11134 		return -1;
   11135 	}
   11136 
   11137 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11138 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11139 		    || (reg == 31)) ? 1 : phy;
   11140 	/* Page 800 works differently than the rest so it has its own func */
   11141 	if (page == BM_WUC_PAGE) {
   11142 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11143 		goto release;
   11144 	}
   11145 
   11146 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11147 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11148 		    && (sc->sc_type != WM_T_82583))
   11149 			rv = wm_gmii_mdic_writereg(dev, phy,
   11150 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11151 		else
   11152 			rv = wm_gmii_mdic_writereg(dev, phy,
   11153 			    BME1000_PHY_PAGE_SELECT, page);
   11154 		if (rv != 0)
   11155 			goto release;
   11156 	}
   11157 
   11158 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11159 
   11160 release:
   11161 	sc->phy.release(sc);
   11162 	return rv;
   11163 }
   11164 
   11165 /*
   11166  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11167  *  @dev: pointer to the HW structure
   11168  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11169  *
   11170  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11171  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11172  */
   11173 static int
   11174 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11175 {
   11176 	uint16_t temp;
   11177 	int rv;
   11178 
   11179 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11180 		device_xname(dev), __func__));
   11181 
   11182 	if (!phy_regp)
   11183 		return -1;
   11184 
   11185 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11186 
   11187 	/* Select Port Control Registers page */
   11188 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11189 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11190 	if (rv != 0)
   11191 		return rv;
   11192 
   11193 	/* Read WUCE and save it */
   11194 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11195 	if (rv != 0)
   11196 		return rv;
   11197 
   11198 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11199 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11200 	 */
   11201 	temp = *phy_regp;
   11202 	temp |= BM_WUC_ENABLE_BIT;
   11203 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11204 
   11205 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11206 		return rv;
   11207 
   11208 	/* Select Host Wakeup Registers page - caller now able to write
   11209 	 * registers on the Wakeup registers page
   11210 	 */
   11211 	return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11212 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11213 }
   11214 
   11215 /*
   11216  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11217  *  @dev: pointer to the HW structure
   11218  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11219  *
   11220  *  Restore BM_WUC_ENABLE_REG to its original value.
   11221  *
   11222  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11223  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11224  *  caller.
   11225  */
   11226 static int
   11227 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11228 {
   11229 
   11230 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11231 		device_xname(dev), __func__));
   11232 
   11233 	if (!phy_regp)
   11234 		return -1;
   11235 
   11236 	/* Select Port Control Registers page */
   11237 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11238 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11239 
   11240 	/* Restore 769.17 to its original value */
   11241 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11242 
   11243 	return 0;
   11244 }
   11245 
   11246 /*
   11247  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11248  *  @sc: pointer to the HW structure
   11249  *  @offset: register offset to be read or written
   11250  *  @val: pointer to the data to read or write
   11251  *  @rd: determines if operation is read or write
   11252  *  @page_set: BM_WUC_PAGE already set and access enabled
   11253  *
   11254  *  Read the PHY register at offset and store the retrieved information in
   11255  *  data, or write data to PHY register at offset.  Note the procedure to
   11256  *  access the PHY wakeup registers is different than reading the other PHY
   11257  *  registers. It works as such:
   11258  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11259  *  2) Set page to 800 for host (801 if we were manageability)
   11260  *  3) Write the address using the address opcode (0x11)
   11261  *  4) Read or write the data using the data opcode (0x12)
   11262  *  5) Restore 769.17.2 to its original value
   11263  *
   11264  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11265  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11266  *
   11267  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11268  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11269  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11270  */
   11271 static int
   11272 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11273 	bool page_set)
   11274 {
   11275 	struct wm_softc *sc = device_private(dev);
   11276 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11277 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11278 	uint16_t wuce;
   11279 	int rv = 0;
   11280 
   11281 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11282 		device_xname(dev), __func__));
   11283 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11284 	if ((sc->sc_type == WM_T_PCH)
   11285 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11286 		device_printf(dev,
   11287 		    "Attempting to access page %d while gig enabled.\n", page);
   11288 	}
   11289 
   11290 	if (!page_set) {
   11291 		/* Enable access to PHY wakeup registers */
   11292 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11293 		if (rv != 0) {
   11294 			device_printf(dev,
   11295 			    "%s: Could not enable PHY wakeup reg access\n",
   11296 			    __func__);
   11297 			return rv;
   11298 		}
   11299 	}
   11300 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11301 		device_xname(sc->sc_dev), __func__, page, regnum));
   11302 
   11303 	/*
   11304 	 * 2) Access PHY wakeup register.
   11305 	 * See wm_access_phy_wakeup_reg_bm.
   11306 	 */
   11307 
   11308 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11309 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11310 	if (rv != 0)
   11311 		return rv;
   11312 
   11313 	if (rd) {
   11314 		/* Read the Wakeup register page value using opcode 0x12 */
   11315 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11316 	} else {
   11317 		/* Write the Wakeup register page value using opcode 0x12 */
   11318 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11319 	}
   11320 	if (rv != 0)
   11321 		return rv;
   11322 
   11323 	if (!page_set)
   11324 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11325 
   11326 	return rv;
   11327 }
   11328 
   11329 /*
   11330  * wm_gmii_hv_readreg:	[mii interface function]
   11331  *
   11332  *	Read a PHY register on the kumeran
   11333  * This could be handled by the PHY layer if we didn't have to lock the
   11334  * ressource ...
   11335  */
   11336 static int
   11337 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11338 {
   11339 	struct wm_softc *sc = device_private(dev);
   11340 	int rv;
   11341 
   11342 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11343 		device_xname(dev), __func__));
   11344 	if (sc->phy.acquire(sc)) {
   11345 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11346 		return -1;
   11347 	}
   11348 
   11349 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11350 	sc->phy.release(sc);
   11351 	return rv;
   11352 }
   11353 
   11354 static int
   11355 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11356 {
   11357 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11358 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11359 	int rv;
   11360 
   11361 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11362 
   11363 	/* Page 800 works differently than the rest so it has its own func */
   11364 	if (page == BM_WUC_PAGE)
   11365 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11366 
   11367 	/*
   11368 	 * Lower than page 768 works differently than the rest so it has its
   11369 	 * own func
   11370 	 */
   11371 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11372 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11373 		return -1;
   11374 	}
   11375 
   11376 	/*
   11377 	 * XXX I21[789] documents say that the SMBus Address register is at
   11378 	 * PHY address 01, Page 0 (not 768), Register 26.
   11379 	 */
   11380 	if (page == HV_INTC_FC_PAGE_START)
   11381 		page = 0;
   11382 
   11383 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11384 		rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11385 		    page << BME1000_PAGE_SHIFT);
   11386 		if (rv != 0)
   11387 			return rv;
   11388 	}
   11389 
   11390 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11391 }
   11392 
   11393 /*
   11394  * wm_gmii_hv_writereg:	[mii interface function]
   11395  *
   11396  *	Write a PHY register on the kumeran.
   11397  * This could be handled by the PHY layer if we didn't have to lock the
   11398  * ressource ...
   11399  */
   11400 static int
   11401 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11402 {
   11403 	struct wm_softc *sc = device_private(dev);
   11404 	int rv;
   11405 
   11406 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11407 		device_xname(dev), __func__));
   11408 
   11409 	if (sc->phy.acquire(sc)) {
   11410 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11411 		return -1;
   11412 	}
   11413 
   11414 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11415 	sc->phy.release(sc);
   11416 
   11417 	return rv;
   11418 }
   11419 
   11420 static int
   11421 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11422 {
   11423 	struct wm_softc *sc = device_private(dev);
   11424 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11425 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11426 	int rv;
   11427 
   11428 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11429 
   11430 	/* Page 800 works differently than the rest so it has its own func */
   11431 	if (page == BM_WUC_PAGE)
   11432 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11433 		    false);
   11434 
   11435 	/*
   11436 	 * Lower than page 768 works differently than the rest so it has its
   11437 	 * own func
   11438 	 */
   11439 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11440 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11441 		return -1;
   11442 	}
   11443 
   11444 	{
   11445 		/*
   11446 		 * XXX I21[789] documents say that the SMBus Address register
   11447 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11448 		 */
   11449 		if (page == HV_INTC_FC_PAGE_START)
   11450 			page = 0;
   11451 
   11452 		/*
   11453 		 * XXX Workaround MDIO accesses being disabled after entering
   11454 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11455 		 * register is set)
   11456 		 */
   11457 		if (sc->sc_phytype == WMPHY_82578) {
   11458 			struct mii_softc *child;
   11459 
   11460 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11461 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11462 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11463 			    && ((val & (1 << 11)) != 0)) {
   11464 				device_printf(dev, "XXX need workaround\n");
   11465 			}
   11466 		}
   11467 
   11468 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11469 			rv = wm_gmii_mdic_writereg(dev, 1,
   11470 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11471 			if (rv != 0)
   11472 				return rv;
   11473 		}
   11474 	}
   11475 
   11476 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11477 }
   11478 
   11479 /*
   11480  * wm_gmii_82580_readreg:	[mii interface function]
   11481  *
   11482  *	Read a PHY register on the 82580 and I350.
   11483  * This could be handled by the PHY layer if we didn't have to lock the
   11484  * ressource ...
   11485  */
   11486 static int
   11487 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11488 {
   11489 	struct wm_softc *sc = device_private(dev);
   11490 	int rv;
   11491 
   11492 	if (sc->phy.acquire(sc) != 0) {
   11493 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11494 		return -1;
   11495 	}
   11496 
   11497 #ifdef DIAGNOSTIC
   11498 	if (reg > MII_ADDRMASK) {
   11499 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11500 		    __func__, sc->sc_phytype, reg);
   11501 		reg &= MII_ADDRMASK;
   11502 	}
   11503 #endif
   11504 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11505 
   11506 	sc->phy.release(sc);
   11507 	return rv;
   11508 }
   11509 
   11510 /*
   11511  * wm_gmii_82580_writereg:	[mii interface function]
   11512  *
   11513  *	Write a PHY register on the 82580 and I350.
   11514  * This could be handled by the PHY layer if we didn't have to lock the
   11515  * ressource ...
   11516  */
   11517 static int
   11518 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11519 {
   11520 	struct wm_softc *sc = device_private(dev);
   11521 	int rv;
   11522 
   11523 	if (sc->phy.acquire(sc) != 0) {
   11524 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11525 		return -1;
   11526 	}
   11527 
   11528 #ifdef DIAGNOSTIC
   11529 	if (reg > MII_ADDRMASK) {
   11530 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11531 		    __func__, sc->sc_phytype, reg);
   11532 		reg &= MII_ADDRMASK;
   11533 	}
   11534 #endif
   11535 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11536 
   11537 	sc->phy.release(sc);
   11538 	return rv;
   11539 }
   11540 
   11541 /*
   11542  * wm_gmii_gs40g_readreg:	[mii interface function]
   11543  *
   11544  *	Read a PHY register on the I2100 and I211.
   11545  * This could be handled by the PHY layer if we didn't have to lock the
   11546  * ressource ...
   11547  */
   11548 static int
   11549 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11550 {
   11551 	struct wm_softc *sc = device_private(dev);
   11552 	int page, offset;
   11553 	int rv;
   11554 
   11555 	/* Acquire semaphore */
   11556 	if (sc->phy.acquire(sc)) {
   11557 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11558 		return -1;
   11559 	}
   11560 
   11561 	/* Page select */
   11562 	page = reg >> GS40G_PAGE_SHIFT;
   11563 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11564 	if (rv != 0)
   11565 		goto release;
   11566 
   11567 	/* Read reg */
   11568 	offset = reg & GS40G_OFFSET_MASK;
   11569 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11570 
   11571 release:
   11572 	sc->phy.release(sc);
   11573 	return rv;
   11574 }
   11575 
   11576 /*
   11577  * wm_gmii_gs40g_writereg:	[mii interface function]
   11578  *
   11579  *	Write a PHY register on the I210 and I211.
   11580  * This could be handled by the PHY layer if we didn't have to lock the
   11581  * ressource ...
   11582  */
   11583 static int
   11584 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11585 {
   11586 	struct wm_softc *sc = device_private(dev);
   11587 	uint16_t page;
   11588 	int offset, rv;
   11589 
   11590 	/* Acquire semaphore */
   11591 	if (sc->phy.acquire(sc)) {
   11592 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11593 		return -1;
   11594 	}
   11595 
   11596 	/* Page select */
   11597 	page = reg >> GS40G_PAGE_SHIFT;
   11598 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11599 	if (rv != 0)
   11600 		goto release;
   11601 
   11602 	/* Write reg */
   11603 	offset = reg & GS40G_OFFSET_MASK;
   11604 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11605 
   11606 release:
   11607 	/* Release semaphore */
   11608 	sc->phy.release(sc);
   11609 	return rv;
   11610 }
   11611 
   11612 /*
   11613  * wm_gmii_statchg:	[mii interface function]
   11614  *
   11615  *	Callback from MII layer when media changes.
   11616  */
   11617 static void
   11618 wm_gmii_statchg(struct ifnet *ifp)
   11619 {
   11620 	struct wm_softc *sc = ifp->if_softc;
   11621 	struct mii_data *mii = &sc->sc_mii;
   11622 
   11623 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11624 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11625 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11626 
   11627 	/* Get flow control negotiation result. */
   11628 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11629 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11630 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11631 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11632 	}
   11633 
   11634 	if (sc->sc_flowflags & IFM_FLOW) {
   11635 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11636 			sc->sc_ctrl |= CTRL_TFCE;
   11637 			sc->sc_fcrtl |= FCRTL_XONE;
   11638 		}
   11639 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11640 			sc->sc_ctrl |= CTRL_RFCE;
   11641 	}
   11642 
   11643 	if (mii->mii_media_active & IFM_FDX) {
   11644 		DPRINTF(WM_DEBUG_LINK,
   11645 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11646 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11647 	} else {
   11648 		DPRINTF(WM_DEBUG_LINK,
   11649 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11650 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11651 	}
   11652 
   11653 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11654 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11655 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11656 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11657 	if (sc->sc_type == WM_T_80003) {
   11658 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   11659 		case IFM_1000_T:
   11660 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11661 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11662 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11663 			break;
   11664 		default:
   11665 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11666 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11667 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11668 			break;
   11669 		}
   11670 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11671 	}
   11672 }
   11673 
   11674 /* kumeran related (80003, ICH* and PCH*) */
   11675 
   11676 /*
   11677  * wm_kmrn_readreg:
   11678  *
   11679  *	Read a kumeran register
   11680  */
   11681 static int
   11682 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11683 {
   11684 	int rv;
   11685 
   11686 	if (sc->sc_type == WM_T_80003)
   11687 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11688 	else
   11689 		rv = sc->phy.acquire(sc);
   11690 	if (rv != 0) {
   11691 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11692 		    __func__);
   11693 		return rv;
   11694 	}
   11695 
   11696 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11697 
   11698 	if (sc->sc_type == WM_T_80003)
   11699 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11700 	else
   11701 		sc->phy.release(sc);
   11702 
   11703 	return rv;
   11704 }
   11705 
   11706 static int
   11707 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11708 {
   11709 
   11710 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11711 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11712 	    KUMCTRLSTA_REN);
   11713 	CSR_WRITE_FLUSH(sc);
   11714 	delay(2);
   11715 
   11716 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11717 
   11718 	return 0;
   11719 }
   11720 
   11721 /*
   11722  * wm_kmrn_writereg:
   11723  *
   11724  *	Write a kumeran register
   11725  */
   11726 static int
   11727 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11728 {
   11729 	int rv;
   11730 
   11731 	if (sc->sc_type == WM_T_80003)
   11732 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11733 	else
   11734 		rv = sc->phy.acquire(sc);
   11735 	if (rv != 0) {
   11736 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11737 		    __func__);
   11738 		return rv;
   11739 	}
   11740 
   11741 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11742 
   11743 	if (sc->sc_type == WM_T_80003)
   11744 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11745 	else
   11746 		sc->phy.release(sc);
   11747 
   11748 	return rv;
   11749 }
   11750 
   11751 static int
   11752 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11753 {
   11754 
   11755 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11756 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11757 
   11758 	return 0;
   11759 }
   11760 
   11761 /*
   11762  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11763  * This access method is different from IEEE MMD.
   11764  */
   11765 static int
   11766 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11767 {
   11768 	struct wm_softc *sc = device_private(dev);
   11769 	int rv;
   11770 
   11771 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11772 	if (rv != 0)
   11773 		return rv;
   11774 
   11775 	if (rd)
   11776 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11777 	else
   11778 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11779 	return rv;
   11780 }
   11781 
   11782 static int
   11783 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11784 {
   11785 
   11786 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11787 }
   11788 
   11789 static int
   11790 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11791 {
   11792 
   11793 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11794 }
   11795 
   11796 /* SGMII related */
   11797 
   11798 /*
   11799  * wm_sgmii_uses_mdio
   11800  *
   11801  * Check whether the transaction is to the internal PHY or the external
   11802  * MDIO interface. Return true if it's MDIO.
   11803  */
   11804 static bool
   11805 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11806 {
   11807 	uint32_t reg;
   11808 	bool ismdio = false;
   11809 
   11810 	switch (sc->sc_type) {
   11811 	case WM_T_82575:
   11812 	case WM_T_82576:
   11813 		reg = CSR_READ(sc, WMREG_MDIC);
   11814 		ismdio = ((reg & MDIC_DEST) != 0);
   11815 		break;
   11816 	case WM_T_82580:
   11817 	case WM_T_I350:
   11818 	case WM_T_I354:
   11819 	case WM_T_I210:
   11820 	case WM_T_I211:
   11821 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11822 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11823 		break;
   11824 	default:
   11825 		break;
   11826 	}
   11827 
   11828 	return ismdio;
   11829 }
   11830 
   11831 /*
   11832  * wm_sgmii_readreg:	[mii interface function]
   11833  *
   11834  *	Read a PHY register on the SGMII
   11835  * This could be handled by the PHY layer if we didn't have to lock the
   11836  * ressource ...
   11837  */
   11838 static int
   11839 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11840 {
   11841 	struct wm_softc *sc = device_private(dev);
   11842 	int rv;
   11843 
   11844 	if (sc->phy.acquire(sc)) {
   11845 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11846 		return -1;
   11847 	}
   11848 
   11849 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11850 
   11851 	sc->phy.release(sc);
   11852 	return rv;
   11853 }
   11854 
   11855 static int
   11856 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11857 {
   11858 	struct wm_softc *sc = device_private(dev);
   11859 	uint32_t i2ccmd;
   11860 	int i, rv = 0;
   11861 
   11862 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11863 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11864 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11865 
   11866 	/* Poll the ready bit */
   11867 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11868 		delay(50);
   11869 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11870 		if (i2ccmd & I2CCMD_READY)
   11871 			break;
   11872 	}
   11873 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11874 		device_printf(dev, "I2CCMD Read did not complete\n");
   11875 		rv = ETIMEDOUT;
   11876 	}
   11877 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11878 		if (!sc->phy.no_errprint)
   11879 			device_printf(dev, "I2CCMD Error bit set\n");
   11880 		rv = EIO;
   11881 	}
   11882 
   11883 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11884 
   11885 	return rv;
   11886 }
   11887 
   11888 /*
   11889  * wm_sgmii_writereg:	[mii interface function]
   11890  *
   11891  *	Write a PHY register on the SGMII.
   11892  * This could be handled by the PHY layer if we didn't have to lock the
   11893  * ressource ...
   11894  */
   11895 static int
   11896 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11897 {
   11898 	struct wm_softc *sc = device_private(dev);
   11899 	int rv;
   11900 
   11901 	if (sc->phy.acquire(sc) != 0) {
   11902 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11903 		return -1;
   11904 	}
   11905 
   11906 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11907 
   11908 	sc->phy.release(sc);
   11909 
   11910 	return rv;
   11911 }
   11912 
   11913 static int
   11914 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11915 {
   11916 	struct wm_softc *sc = device_private(dev);
   11917 	uint32_t i2ccmd;
   11918 	uint16_t swapdata;
   11919 	int rv = 0;
   11920 	int i;
   11921 
   11922 	/* Swap the data bytes for the I2C interface */
   11923 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11924 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11925 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11926 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11927 
   11928 	/* Poll the ready bit */
   11929 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11930 		delay(50);
   11931 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11932 		if (i2ccmd & I2CCMD_READY)
   11933 			break;
   11934 	}
   11935 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11936 		device_printf(dev, "I2CCMD Write did not complete\n");
   11937 		rv = ETIMEDOUT;
   11938 	}
   11939 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11940 		device_printf(dev, "I2CCMD Error bit set\n");
   11941 		rv = EIO;
   11942 	}
   11943 
   11944 	return rv;
   11945 }
   11946 
   11947 /* TBI related */
   11948 
   11949 static bool
   11950 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11951 {
   11952 	bool sig;
   11953 
   11954 	sig = ctrl & CTRL_SWDPIN(1);
   11955 
   11956 	/*
   11957 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11958 	 * detect a signal, 1 if they don't.
   11959 	 */
   11960 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11961 		sig = !sig;
   11962 
   11963 	return sig;
   11964 }
   11965 
   11966 /*
   11967  * wm_tbi_mediainit:
   11968  *
   11969  *	Initialize media for use on 1000BASE-X devices.
   11970  */
   11971 static void
   11972 wm_tbi_mediainit(struct wm_softc *sc)
   11973 {
   11974 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11975 	const char *sep = "";
   11976 
   11977 	if (sc->sc_type < WM_T_82543)
   11978 		sc->sc_tipg = TIPG_WM_DFLT;
   11979 	else
   11980 		sc->sc_tipg = TIPG_LG_DFLT;
   11981 
   11982 	sc->sc_tbi_serdes_anegticks = 5;
   11983 
   11984 	/* Initialize our media structures */
   11985 	sc->sc_mii.mii_ifp = ifp;
   11986 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11987 
   11988 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11989 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   11990 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   11991 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   11992 		    sc->sc_core_lock);
   11993 	} else {
   11994 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   11995 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   11996 	}
   11997 
   11998 	/*
   11999 	 * SWD Pins:
   12000 	 *
   12001 	 *	0 = Link LED (output)
   12002 	 *	1 = Loss Of Signal (input)
   12003 	 */
   12004 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12005 
   12006 	/* XXX Perhaps this is only for TBI */
   12007 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12008 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12009 
   12010 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12011 		sc->sc_ctrl &= ~CTRL_LRST;
   12012 
   12013 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12014 
   12015 #define	ADD(ss, mm, dd)							\
   12016 do {									\
   12017 	aprint_normal("%s%s", sep, ss);					\
   12018 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12019 	sep = ", ";							\
   12020 } while (/*CONSTCOND*/0)
   12021 
   12022 	aprint_normal_dev(sc->sc_dev, "");
   12023 
   12024 	if (sc->sc_type == WM_T_I354) {
   12025 		uint32_t status;
   12026 
   12027 		status = CSR_READ(sc, WMREG_STATUS);
   12028 		if (((status & STATUS_2P5_SKU) != 0)
   12029 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12030 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12031 		} else
   12032 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12033 	} else if (sc->sc_type == WM_T_82545) {
   12034 		/* Only 82545 is LX (XXX except SFP) */
   12035 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12036 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12037 	} else if (sc->sc_sfptype != 0) {
   12038 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12039 		switch (sc->sc_sfptype) {
   12040 		default:
   12041 		case SFF_SFP_ETH_FLAGS_1000SX:
   12042 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12043 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12044 			break;
   12045 		case SFF_SFP_ETH_FLAGS_1000LX:
   12046 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12047 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12048 			break;
   12049 		case SFF_SFP_ETH_FLAGS_1000CX:
   12050 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12051 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12052 			break;
   12053 		case SFF_SFP_ETH_FLAGS_1000T:
   12054 			ADD("1000baseT", IFM_1000_T, 0);
   12055 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12056 			break;
   12057 		case SFF_SFP_ETH_FLAGS_100FX:
   12058 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12059 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12060 			break;
   12061 		}
   12062 	} else {
   12063 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12064 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12065 	}
   12066 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12067 	aprint_normal("\n");
   12068 
   12069 #undef ADD
   12070 
   12071 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12072 }
   12073 
   12074 /*
   12075  * wm_tbi_mediachange:	[ifmedia interface function]
   12076  *
   12077  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12078  */
   12079 static int
   12080 wm_tbi_mediachange(struct ifnet *ifp)
   12081 {
   12082 	struct wm_softc *sc = ifp->if_softc;
   12083 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12084 	uint32_t status, ctrl;
   12085 	bool signal;
   12086 	int i;
   12087 
   12088 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12089 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12090 		/* XXX need some work for >= 82571 and < 82575 */
   12091 		if (sc->sc_type < WM_T_82575)
   12092 			return 0;
   12093 	}
   12094 
   12095 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12096 	    || (sc->sc_type >= WM_T_82575))
   12097 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12098 
   12099 	sc->sc_ctrl &= ~CTRL_LRST;
   12100 	sc->sc_txcw = TXCW_ANE;
   12101 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12102 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12103 	else if (ife->ifm_media & IFM_FDX)
   12104 		sc->sc_txcw |= TXCW_FD;
   12105 	else
   12106 		sc->sc_txcw |= TXCW_HD;
   12107 
   12108 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12109 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12110 
   12111 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12112 		device_xname(sc->sc_dev), sc->sc_txcw));
   12113 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12114 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12115 	CSR_WRITE_FLUSH(sc);
   12116 	delay(1000);
   12117 
   12118 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12119 	signal = wm_tbi_havesignal(sc, ctrl);
   12120 
   12121 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   12122 		signal));
   12123 
   12124 	if (signal) {
   12125 		/* Have signal; wait for the link to come up. */
   12126 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12127 			delay(10000);
   12128 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12129 				break;
   12130 		}
   12131 
   12132 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   12133 			device_xname(sc->sc_dev), i));
   12134 
   12135 		status = CSR_READ(sc, WMREG_STATUS);
   12136 		DPRINTF(WM_DEBUG_LINK,
   12137 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   12138 			device_xname(sc->sc_dev), status, STATUS_LU));
   12139 		if (status & STATUS_LU) {
   12140 			/* Link is up. */
   12141 			DPRINTF(WM_DEBUG_LINK,
   12142 			    ("%s: LINK: set media -> link up %s\n",
   12143 				device_xname(sc->sc_dev),
   12144 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12145 
   12146 			/*
   12147 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12148 			 * so we should update sc->sc_ctrl
   12149 			 */
   12150 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12151 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12152 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12153 			if (status & STATUS_FD)
   12154 				sc->sc_tctl |=
   12155 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12156 			else
   12157 				sc->sc_tctl |=
   12158 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12159 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12160 				sc->sc_fcrtl |= FCRTL_XONE;
   12161 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12162 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12163 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12164 			sc->sc_tbi_linkup = 1;
   12165 		} else {
   12166 			if (i == WM_LINKUP_TIMEOUT)
   12167 				wm_check_for_link(sc);
   12168 			/* Link is down. */
   12169 			DPRINTF(WM_DEBUG_LINK,
   12170 			    ("%s: LINK: set media -> link down\n",
   12171 				device_xname(sc->sc_dev)));
   12172 			sc->sc_tbi_linkup = 0;
   12173 		}
   12174 	} else {
   12175 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   12176 			device_xname(sc->sc_dev)));
   12177 		sc->sc_tbi_linkup = 0;
   12178 	}
   12179 
   12180 	wm_tbi_serdes_set_linkled(sc);
   12181 
   12182 	return 0;
   12183 }
   12184 
   12185 /*
   12186  * wm_tbi_mediastatus:	[ifmedia interface function]
   12187  *
   12188  *	Get the current interface media status on a 1000BASE-X device.
   12189  */
   12190 static void
   12191 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12192 {
   12193 	struct wm_softc *sc = ifp->if_softc;
   12194 	uint32_t ctrl, status;
   12195 
   12196 	ifmr->ifm_status = IFM_AVALID;
   12197 	ifmr->ifm_active = IFM_ETHER;
   12198 
   12199 	status = CSR_READ(sc, WMREG_STATUS);
   12200 	if ((status & STATUS_LU) == 0) {
   12201 		ifmr->ifm_active |= IFM_NONE;
   12202 		return;
   12203 	}
   12204 
   12205 	ifmr->ifm_status |= IFM_ACTIVE;
   12206 	/* Only 82545 is LX */
   12207 	if (sc->sc_type == WM_T_82545)
   12208 		ifmr->ifm_active |= IFM_1000_LX;
   12209 	else
   12210 		ifmr->ifm_active |= IFM_1000_SX;
   12211 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   12212 		ifmr->ifm_active |= IFM_FDX;
   12213 	else
   12214 		ifmr->ifm_active |= IFM_HDX;
   12215 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12216 	if (ctrl & CTRL_RFCE)
   12217 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   12218 	if (ctrl & CTRL_TFCE)
   12219 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   12220 }
   12221 
   12222 /* XXX TBI only */
   12223 static int
   12224 wm_check_for_link(struct wm_softc *sc)
   12225 {
   12226 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12227 	uint32_t rxcw;
   12228 	uint32_t ctrl;
   12229 	uint32_t status;
   12230 	bool signal;
   12231 
   12232 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   12233 		device_xname(sc->sc_dev), __func__));
   12234 
   12235 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12236 		/* XXX need some work for >= 82571 */
   12237 		if (sc->sc_type >= WM_T_82571) {
   12238 			sc->sc_tbi_linkup = 1;
   12239 			return 0;
   12240 		}
   12241 	}
   12242 
   12243 	rxcw = CSR_READ(sc, WMREG_RXCW);
   12244 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12245 	status = CSR_READ(sc, WMREG_STATUS);
   12246 	signal = wm_tbi_havesignal(sc, ctrl);
   12247 
   12248 	DPRINTF(WM_DEBUG_LINK,
   12249 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   12250 		device_xname(sc->sc_dev), __func__, signal,
   12251 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   12252 
   12253 	/*
   12254 	 * SWDPIN   LU RXCW
   12255 	 *	0    0	  0
   12256 	 *	0    0	  1	(should not happen)
   12257 	 *	0    1	  0	(should not happen)
   12258 	 *	0    1	  1	(should not happen)
   12259 	 *	1    0	  0	Disable autonego and force linkup
   12260 	 *	1    0	  1	got /C/ but not linkup yet
   12261 	 *	1    1	  0	(linkup)
   12262 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12263 	 *
   12264 	 */
   12265 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12266 		DPRINTF(WM_DEBUG_LINK,
   12267 		    ("%s: %s: force linkup and fullduplex\n",
   12268 			device_xname(sc->sc_dev), __func__));
   12269 		sc->sc_tbi_linkup = 0;
   12270 		/* Disable auto-negotiation in the TXCW register */
   12271 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12272 
   12273 		/*
   12274 		 * Force link-up and also force full-duplex.
   12275 		 *
   12276 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12277 		 * so we should update sc->sc_ctrl
   12278 		 */
   12279 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12280 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12281 	} else if (((status & STATUS_LU) != 0)
   12282 	    && ((rxcw & RXCW_C) != 0)
   12283 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12284 		sc->sc_tbi_linkup = 1;
   12285 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12286 			device_xname(sc->sc_dev),
   12287 			__func__));
   12288 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12289 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12290 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12291 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   12292 			device_xname(sc->sc_dev), __func__));
   12293 	} else {
   12294 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12295 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12296 			status));
   12297 	}
   12298 
   12299 	return 0;
   12300 }
   12301 
   12302 /*
   12303  * wm_tbi_tick:
   12304  *
   12305  *	Check the link on TBI devices.
   12306  *	This function acts as mii_tick().
   12307  */
   12308 static void
   12309 wm_tbi_tick(struct wm_softc *sc)
   12310 {
   12311 	struct mii_data *mii = &sc->sc_mii;
   12312 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12313 	uint32_t status;
   12314 
   12315 	KASSERT(WM_CORE_LOCKED(sc));
   12316 
   12317 	status = CSR_READ(sc, WMREG_STATUS);
   12318 
   12319 	/* XXX is this needed? */
   12320 	(void)CSR_READ(sc, WMREG_RXCW);
   12321 	(void)CSR_READ(sc, WMREG_CTRL);
   12322 
   12323 	/* set link status */
   12324 	if ((status & STATUS_LU) == 0) {
   12325 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12326 			device_xname(sc->sc_dev)));
   12327 		sc->sc_tbi_linkup = 0;
   12328 	} else if (sc->sc_tbi_linkup == 0) {
   12329 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12330 			device_xname(sc->sc_dev),
   12331 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12332 		sc->sc_tbi_linkup = 1;
   12333 		sc->sc_tbi_serdes_ticks = 0;
   12334 	}
   12335 
   12336 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12337 		goto setled;
   12338 
   12339 	if ((status & STATUS_LU) == 0) {
   12340 		sc->sc_tbi_linkup = 0;
   12341 		/* If the timer expired, retry autonegotiation */
   12342 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12343 		    && (++sc->sc_tbi_serdes_ticks
   12344 			>= sc->sc_tbi_serdes_anegticks)) {
   12345 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12346 				device_xname(sc->sc_dev), __func__));
   12347 			sc->sc_tbi_serdes_ticks = 0;
   12348 			/*
   12349 			 * Reset the link, and let autonegotiation do
   12350 			 * its thing
   12351 			 */
   12352 			sc->sc_ctrl |= CTRL_LRST;
   12353 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12354 			CSR_WRITE_FLUSH(sc);
   12355 			delay(1000);
   12356 			sc->sc_ctrl &= ~CTRL_LRST;
   12357 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12358 			CSR_WRITE_FLUSH(sc);
   12359 			delay(1000);
   12360 			CSR_WRITE(sc, WMREG_TXCW,
   12361 			    sc->sc_txcw & ~TXCW_ANE);
   12362 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12363 		}
   12364 	}
   12365 
   12366 setled:
   12367 	wm_tbi_serdes_set_linkled(sc);
   12368 }
   12369 
   12370 /* SERDES related */
   12371 static void
   12372 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12373 {
   12374 	uint32_t reg;
   12375 
   12376 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12377 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12378 		return;
   12379 
   12380 	/* Enable PCS to turn on link */
   12381 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12382 	reg |= PCS_CFG_PCS_EN;
   12383 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12384 
   12385 	/* Power up the laser */
   12386 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12387 	reg &= ~CTRL_EXT_SWDPIN(3);
   12388 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12389 
   12390 	/* Flush the write to verify completion */
   12391 	CSR_WRITE_FLUSH(sc);
   12392 	delay(1000);
   12393 }
   12394 
   12395 static int
   12396 wm_serdes_mediachange(struct ifnet *ifp)
   12397 {
   12398 	struct wm_softc *sc = ifp->if_softc;
   12399 	bool pcs_autoneg = true; /* XXX */
   12400 	uint32_t ctrl_ext, pcs_lctl, reg;
   12401 
   12402 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12403 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12404 		return 0;
   12405 
   12406 	/* XXX Currently, this function is not called on 8257[12] */
   12407 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12408 	    || (sc->sc_type >= WM_T_82575))
   12409 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12410 
   12411 	/* Power on the sfp cage if present */
   12412 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12413 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12414 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   12415 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12416 
   12417 	sc->sc_ctrl |= CTRL_SLU;
   12418 
   12419 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   12420 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12421 
   12422 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12423 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12424 	case CTRL_EXT_LINK_MODE_SGMII:
   12425 		/* SGMII mode lets the phy handle forcing speed/duplex */
   12426 		pcs_autoneg = true;
   12427 		/* Autoneg time out should be disabled for SGMII mode */
   12428 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12429 		break;
   12430 	case CTRL_EXT_LINK_MODE_1000KX:
   12431 		pcs_autoneg = false;
   12432 		/* FALLTHROUGH */
   12433 	default:
   12434 		if ((sc->sc_type == WM_T_82575)
   12435 		    || (sc->sc_type == WM_T_82576)) {
   12436 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12437 				pcs_autoneg = false;
   12438 		}
   12439 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12440 		    | CTRL_FRCFDX;
   12441 
   12442 		/* Set speed of 1000/Full if speed/duplex is forced */
   12443 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12444 	}
   12445 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12446 
   12447 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   12448 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   12449 
   12450 	if (pcs_autoneg) {
   12451 		/* Set PCS register for autoneg */
   12452 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12453 
   12454 		/* Disable force flow control for autoneg */
   12455 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12456 
   12457 		/* Configure flow control advertisement for autoneg */
   12458 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12459 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12460 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12461 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12462 	} else
   12463 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12464 
   12465 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12466 
   12467 	return 0;
   12468 }
   12469 
   12470 static void
   12471 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12472 {
   12473 	struct wm_softc *sc = ifp->if_softc;
   12474 	struct mii_data *mii = &sc->sc_mii;
   12475 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12476 	uint32_t pcs_adv, pcs_lpab, reg;
   12477 
   12478 	ifmr->ifm_status = IFM_AVALID;
   12479 	ifmr->ifm_active = IFM_ETHER;
   12480 
   12481 	/* Check PCS */
   12482 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12483 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12484 		ifmr->ifm_active |= IFM_NONE;
   12485 		sc->sc_tbi_linkup = 0;
   12486 		goto setled;
   12487 	}
   12488 
   12489 	sc->sc_tbi_linkup = 1;
   12490 	ifmr->ifm_status |= IFM_ACTIVE;
   12491 	if (sc->sc_type == WM_T_I354) {
   12492 		uint32_t status;
   12493 
   12494 		status = CSR_READ(sc, WMREG_STATUS);
   12495 		if (((status & STATUS_2P5_SKU) != 0)
   12496 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12497 			ifmr->ifm_active |= IFM_2500_KX;
   12498 		} else
   12499 			ifmr->ifm_active |= IFM_1000_KX;
   12500 	} else {
   12501 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12502 		case PCS_LSTS_SPEED_10:
   12503 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12504 			break;
   12505 		case PCS_LSTS_SPEED_100:
   12506 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12507 			break;
   12508 		case PCS_LSTS_SPEED_1000:
   12509 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12510 			break;
   12511 		default:
   12512 			device_printf(sc->sc_dev, "Unknown speed\n");
   12513 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12514 			break;
   12515 		}
   12516 	}
   12517 	if ((reg & PCS_LSTS_FDX) != 0)
   12518 		ifmr->ifm_active |= IFM_FDX;
   12519 	else
   12520 		ifmr->ifm_active |= IFM_HDX;
   12521 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12522 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12523 		/* Check flow */
   12524 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12525 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12526 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12527 			goto setled;
   12528 		}
   12529 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12530 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12531 		DPRINTF(WM_DEBUG_LINK,
   12532 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12533 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12534 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12535 			mii->mii_media_active |= IFM_FLOW
   12536 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12537 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12538 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12539 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12540 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12541 			mii->mii_media_active |= IFM_FLOW
   12542 			    | IFM_ETH_TXPAUSE;
   12543 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12544 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12545 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12546 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12547 			mii->mii_media_active |= IFM_FLOW
   12548 			    | IFM_ETH_RXPAUSE;
   12549 		}
   12550 	}
   12551 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12552 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12553 setled:
   12554 	wm_tbi_serdes_set_linkled(sc);
   12555 }
   12556 
   12557 /*
   12558  * wm_serdes_tick:
   12559  *
   12560  *	Check the link on serdes devices.
   12561  */
   12562 static void
   12563 wm_serdes_tick(struct wm_softc *sc)
   12564 {
   12565 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12566 	struct mii_data *mii = &sc->sc_mii;
   12567 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12568 	uint32_t reg;
   12569 
   12570 	KASSERT(WM_CORE_LOCKED(sc));
   12571 
   12572 	mii->mii_media_status = IFM_AVALID;
   12573 	mii->mii_media_active = IFM_ETHER;
   12574 
   12575 	/* Check PCS */
   12576 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12577 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12578 		mii->mii_media_status |= IFM_ACTIVE;
   12579 		sc->sc_tbi_linkup = 1;
   12580 		sc->sc_tbi_serdes_ticks = 0;
   12581 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12582 		if ((reg & PCS_LSTS_FDX) != 0)
   12583 			mii->mii_media_active |= IFM_FDX;
   12584 		else
   12585 			mii->mii_media_active |= IFM_HDX;
   12586 	} else {
   12587 		mii->mii_media_status |= IFM_NONE;
   12588 		sc->sc_tbi_linkup = 0;
   12589 		/* If the timer expired, retry autonegotiation */
   12590 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12591 		    && (++sc->sc_tbi_serdes_ticks
   12592 			>= sc->sc_tbi_serdes_anegticks)) {
   12593 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12594 				device_xname(sc->sc_dev), __func__));
   12595 			sc->sc_tbi_serdes_ticks = 0;
   12596 			/* XXX */
   12597 			wm_serdes_mediachange(ifp);
   12598 		}
   12599 	}
   12600 
   12601 	wm_tbi_serdes_set_linkled(sc);
   12602 }
   12603 
   12604 /* SFP related */
   12605 
   12606 static int
   12607 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12608 {
   12609 	uint32_t i2ccmd;
   12610 	int i;
   12611 
   12612 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12613 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12614 
   12615 	/* Poll the ready bit */
   12616 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12617 		delay(50);
   12618 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12619 		if (i2ccmd & I2CCMD_READY)
   12620 			break;
   12621 	}
   12622 	if ((i2ccmd & I2CCMD_READY) == 0)
   12623 		return -1;
   12624 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12625 		return -1;
   12626 
   12627 	*data = i2ccmd & 0x00ff;
   12628 
   12629 	return 0;
   12630 }
   12631 
   12632 static uint32_t
   12633 wm_sfp_get_media_type(struct wm_softc *sc)
   12634 {
   12635 	uint32_t ctrl_ext;
   12636 	uint8_t val = 0;
   12637 	int timeout = 3;
   12638 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12639 	int rv = -1;
   12640 
   12641 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12642 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12643 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12644 	CSR_WRITE_FLUSH(sc);
   12645 
   12646 	/* Read SFP module data */
   12647 	while (timeout) {
   12648 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12649 		if (rv == 0)
   12650 			break;
   12651 		delay(100*1000); /* XXX too big */
   12652 		timeout--;
   12653 	}
   12654 	if (rv != 0)
   12655 		goto out;
   12656 
   12657 	switch (val) {
   12658 	case SFF_SFP_ID_SFF:
   12659 		aprint_normal_dev(sc->sc_dev,
   12660 		    "Module/Connector soldered to board\n");
   12661 		break;
   12662 	case SFF_SFP_ID_SFP:
   12663 		sc->sc_flags |= WM_F_SFP;
   12664 		break;
   12665 	case SFF_SFP_ID_UNKNOWN:
   12666 		goto out;
   12667 	default:
   12668 		break;
   12669 	}
   12670 
   12671 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12672 	if (rv != 0)
   12673 		goto out;
   12674 
   12675 	sc->sc_sfptype = val;
   12676 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12677 		mediatype = WM_MEDIATYPE_SERDES;
   12678 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12679 		sc->sc_flags |= WM_F_SGMII;
   12680 		mediatype = WM_MEDIATYPE_COPPER;
   12681 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12682 		sc->sc_flags |= WM_F_SGMII;
   12683 		mediatype = WM_MEDIATYPE_SERDES;
   12684 	} else {
   12685 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   12686 		    __func__, sc->sc_sfptype);
   12687 		sc->sc_sfptype = 0; /* XXX unknown */
   12688 	}
   12689 
   12690 out:
   12691 	/* Restore I2C interface setting */
   12692 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12693 
   12694 	return mediatype;
   12695 }
   12696 
   12697 /*
   12698  * NVM related.
   12699  * Microwire, SPI (w/wo EERD) and Flash.
   12700  */
   12701 
   12702 /* Both spi and uwire */
   12703 
   12704 /*
   12705  * wm_eeprom_sendbits:
   12706  *
   12707  *	Send a series of bits to the EEPROM.
   12708  */
   12709 static void
   12710 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12711 {
   12712 	uint32_t reg;
   12713 	int x;
   12714 
   12715 	reg = CSR_READ(sc, WMREG_EECD);
   12716 
   12717 	for (x = nbits; x > 0; x--) {
   12718 		if (bits & (1U << (x - 1)))
   12719 			reg |= EECD_DI;
   12720 		else
   12721 			reg &= ~EECD_DI;
   12722 		CSR_WRITE(sc, WMREG_EECD, reg);
   12723 		CSR_WRITE_FLUSH(sc);
   12724 		delay(2);
   12725 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12726 		CSR_WRITE_FLUSH(sc);
   12727 		delay(2);
   12728 		CSR_WRITE(sc, WMREG_EECD, reg);
   12729 		CSR_WRITE_FLUSH(sc);
   12730 		delay(2);
   12731 	}
   12732 }
   12733 
   12734 /*
   12735  * wm_eeprom_recvbits:
   12736  *
   12737  *	Receive a series of bits from the EEPROM.
   12738  */
   12739 static void
   12740 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12741 {
   12742 	uint32_t reg, val;
   12743 	int x;
   12744 
   12745 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12746 
   12747 	val = 0;
   12748 	for (x = nbits; x > 0; x--) {
   12749 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12750 		CSR_WRITE_FLUSH(sc);
   12751 		delay(2);
   12752 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12753 			val |= (1U << (x - 1));
   12754 		CSR_WRITE(sc, WMREG_EECD, reg);
   12755 		CSR_WRITE_FLUSH(sc);
   12756 		delay(2);
   12757 	}
   12758 	*valp = val;
   12759 }
   12760 
   12761 /* Microwire */
   12762 
   12763 /*
   12764  * wm_nvm_read_uwire:
   12765  *
   12766  *	Read a word from the EEPROM using the MicroWire protocol.
   12767  */
   12768 static int
   12769 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12770 {
   12771 	uint32_t reg, val;
   12772 	int i;
   12773 
   12774 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12775 		device_xname(sc->sc_dev), __func__));
   12776 
   12777 	if (sc->nvm.acquire(sc) != 0)
   12778 		return -1;
   12779 
   12780 	for (i = 0; i < wordcnt; i++) {
   12781 		/* Clear SK and DI. */
   12782 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12783 		CSR_WRITE(sc, WMREG_EECD, reg);
   12784 
   12785 		/*
   12786 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12787 		 * and Xen.
   12788 		 *
   12789 		 * We use this workaround only for 82540 because qemu's
   12790 		 * e1000 act as 82540.
   12791 		 */
   12792 		if (sc->sc_type == WM_T_82540) {
   12793 			reg |= EECD_SK;
   12794 			CSR_WRITE(sc, WMREG_EECD, reg);
   12795 			reg &= ~EECD_SK;
   12796 			CSR_WRITE(sc, WMREG_EECD, reg);
   12797 			CSR_WRITE_FLUSH(sc);
   12798 			delay(2);
   12799 		}
   12800 		/* XXX: end of workaround */
   12801 
   12802 		/* Set CHIP SELECT. */
   12803 		reg |= EECD_CS;
   12804 		CSR_WRITE(sc, WMREG_EECD, reg);
   12805 		CSR_WRITE_FLUSH(sc);
   12806 		delay(2);
   12807 
   12808 		/* Shift in the READ command. */
   12809 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12810 
   12811 		/* Shift in address. */
   12812 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12813 
   12814 		/* Shift out the data. */
   12815 		wm_eeprom_recvbits(sc, &val, 16);
   12816 		data[i] = val & 0xffff;
   12817 
   12818 		/* Clear CHIP SELECT. */
   12819 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12820 		CSR_WRITE(sc, WMREG_EECD, reg);
   12821 		CSR_WRITE_FLUSH(sc);
   12822 		delay(2);
   12823 	}
   12824 
   12825 	sc->nvm.release(sc);
   12826 	return 0;
   12827 }
   12828 
   12829 /* SPI */
   12830 
   12831 /*
   12832  * Set SPI and FLASH related information from the EECD register.
   12833  * For 82541 and 82547, the word size is taken from EEPROM.
   12834  */
   12835 static int
   12836 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12837 {
   12838 	int size;
   12839 	uint32_t reg;
   12840 	uint16_t data;
   12841 
   12842 	reg = CSR_READ(sc, WMREG_EECD);
   12843 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12844 
   12845 	/* Read the size of NVM from EECD by default */
   12846 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12847 	switch (sc->sc_type) {
   12848 	case WM_T_82541:
   12849 	case WM_T_82541_2:
   12850 	case WM_T_82547:
   12851 	case WM_T_82547_2:
   12852 		/* Set dummy value to access EEPROM */
   12853 		sc->sc_nvm_wordsize = 64;
   12854 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12855 			aprint_error_dev(sc->sc_dev,
   12856 			    "%s: failed to read EEPROM size\n", __func__);
   12857 		}
   12858 		reg = data;
   12859 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12860 		if (size == 0)
   12861 			size = 6; /* 64 word size */
   12862 		else
   12863 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12864 		break;
   12865 	case WM_T_80003:
   12866 	case WM_T_82571:
   12867 	case WM_T_82572:
   12868 	case WM_T_82573: /* SPI case */
   12869 	case WM_T_82574: /* SPI case */
   12870 	case WM_T_82583: /* SPI case */
   12871 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12872 		if (size > 14)
   12873 			size = 14;
   12874 		break;
   12875 	case WM_T_82575:
   12876 	case WM_T_82576:
   12877 	case WM_T_82580:
   12878 	case WM_T_I350:
   12879 	case WM_T_I354:
   12880 	case WM_T_I210:
   12881 	case WM_T_I211:
   12882 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12883 		if (size > 15)
   12884 			size = 15;
   12885 		break;
   12886 	default:
   12887 		aprint_error_dev(sc->sc_dev,
   12888 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12889 		return -1;
   12890 		break;
   12891 	}
   12892 
   12893 	sc->sc_nvm_wordsize = 1 << size;
   12894 
   12895 	return 0;
   12896 }
   12897 
   12898 /*
   12899  * wm_nvm_ready_spi:
   12900  *
   12901  *	Wait for a SPI EEPROM to be ready for commands.
   12902  */
   12903 static int
   12904 wm_nvm_ready_spi(struct wm_softc *sc)
   12905 {
   12906 	uint32_t val;
   12907 	int usec;
   12908 
   12909 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12910 		device_xname(sc->sc_dev), __func__));
   12911 
   12912 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12913 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12914 		wm_eeprom_recvbits(sc, &val, 8);
   12915 		if ((val & SPI_SR_RDY) == 0)
   12916 			break;
   12917 	}
   12918 	if (usec >= SPI_MAX_RETRIES) {
   12919 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12920 		return -1;
   12921 	}
   12922 	return 0;
   12923 }
   12924 
   12925 /*
   12926  * wm_nvm_read_spi:
   12927  *
   12928  *	Read a work from the EEPROM using the SPI protocol.
   12929  */
   12930 static int
   12931 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12932 {
   12933 	uint32_t reg, val;
   12934 	int i;
   12935 	uint8_t opc;
   12936 	int rv = 0;
   12937 
   12938 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12939 		device_xname(sc->sc_dev), __func__));
   12940 
   12941 	if (sc->nvm.acquire(sc) != 0)
   12942 		return -1;
   12943 
   12944 	/* Clear SK and CS. */
   12945 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12946 	CSR_WRITE(sc, WMREG_EECD, reg);
   12947 	CSR_WRITE_FLUSH(sc);
   12948 	delay(2);
   12949 
   12950 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12951 		goto out;
   12952 
   12953 	/* Toggle CS to flush commands. */
   12954 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12955 	CSR_WRITE_FLUSH(sc);
   12956 	delay(2);
   12957 	CSR_WRITE(sc, WMREG_EECD, reg);
   12958 	CSR_WRITE_FLUSH(sc);
   12959 	delay(2);
   12960 
   12961 	opc = SPI_OPC_READ;
   12962 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12963 		opc |= SPI_OPC_A8;
   12964 
   12965 	wm_eeprom_sendbits(sc, opc, 8);
   12966 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12967 
   12968 	for (i = 0; i < wordcnt; i++) {
   12969 		wm_eeprom_recvbits(sc, &val, 16);
   12970 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12971 	}
   12972 
   12973 	/* Raise CS and clear SK. */
   12974 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12975 	CSR_WRITE(sc, WMREG_EECD, reg);
   12976 	CSR_WRITE_FLUSH(sc);
   12977 	delay(2);
   12978 
   12979 out:
   12980 	sc->nvm.release(sc);
   12981 	return rv;
   12982 }
   12983 
   12984 /* Using with EERD */
   12985 
   12986 static int
   12987 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12988 {
   12989 	uint32_t attempts = 100000;
   12990 	uint32_t i, reg = 0;
   12991 	int32_t done = -1;
   12992 
   12993 	for (i = 0; i < attempts; i++) {
   12994 		reg = CSR_READ(sc, rw);
   12995 
   12996 		if (reg & EERD_DONE) {
   12997 			done = 0;
   12998 			break;
   12999 		}
   13000 		delay(5);
   13001 	}
   13002 
   13003 	return done;
   13004 }
   13005 
   13006 static int
   13007 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13008 {
   13009 	int i, eerd = 0;
   13010 	int rv = 0;
   13011 
   13012 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13013 		device_xname(sc->sc_dev), __func__));
   13014 
   13015 	if (sc->nvm.acquire(sc) != 0)
   13016 		return -1;
   13017 
   13018 	for (i = 0; i < wordcnt; i++) {
   13019 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13020 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13021 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13022 		if (rv != 0) {
   13023 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13024 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13025 			break;
   13026 		}
   13027 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13028 	}
   13029 
   13030 	sc->nvm.release(sc);
   13031 	return rv;
   13032 }
   13033 
   13034 /* Flash */
   13035 
   13036 static int
   13037 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13038 {
   13039 	uint32_t eecd;
   13040 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13041 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13042 	uint32_t nvm_dword = 0;
   13043 	uint8_t sig_byte = 0;
   13044 	int rv;
   13045 
   13046 	switch (sc->sc_type) {
   13047 	case WM_T_PCH_SPT:
   13048 	case WM_T_PCH_CNP:
   13049 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13050 		act_offset = ICH_NVM_SIG_WORD * 2;
   13051 
   13052 		/* Set bank to 0 in case flash read fails. */
   13053 		*bank = 0;
   13054 
   13055 		/* Check bank 0 */
   13056 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13057 		if (rv != 0)
   13058 			return rv;
   13059 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13060 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13061 			*bank = 0;
   13062 			return 0;
   13063 		}
   13064 
   13065 		/* Check bank 1 */
   13066 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13067 		    &nvm_dword);
   13068 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13069 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13070 			*bank = 1;
   13071 			return 0;
   13072 		}
   13073 		aprint_error_dev(sc->sc_dev,
   13074 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13075 		return -1;
   13076 	case WM_T_ICH8:
   13077 	case WM_T_ICH9:
   13078 		eecd = CSR_READ(sc, WMREG_EECD);
   13079 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13080 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13081 			return 0;
   13082 		}
   13083 		/* FALLTHROUGH */
   13084 	default:
   13085 		/* Default to 0 */
   13086 		*bank = 0;
   13087 
   13088 		/* Check bank 0 */
   13089 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13090 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13091 			*bank = 0;
   13092 			return 0;
   13093 		}
   13094 
   13095 		/* Check bank 1 */
   13096 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13097 		    &sig_byte);
   13098 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13099 			*bank = 1;
   13100 			return 0;
   13101 		}
   13102 	}
   13103 
   13104 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13105 		device_xname(sc->sc_dev)));
   13106 	return -1;
   13107 }
   13108 
   13109 /******************************************************************************
   13110  * This function does initial flash setup so that a new read/write/erase cycle
   13111  * can be started.
   13112  *
   13113  * sc - The pointer to the hw structure
   13114  ****************************************************************************/
   13115 static int32_t
   13116 wm_ich8_cycle_init(struct wm_softc *sc)
   13117 {
   13118 	uint16_t hsfsts;
   13119 	int32_t error = 1;
   13120 	int32_t i     = 0;
   13121 
   13122 	if (sc->sc_type >= WM_T_PCH_SPT)
   13123 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13124 	else
   13125 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13126 
   13127 	/* May be check the Flash Des Valid bit in Hw status */
   13128 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13129 		return error;
   13130 
   13131 	/* Clear FCERR in Hw status by writing 1 */
   13132 	/* Clear DAEL in Hw status by writing a 1 */
   13133 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13134 
   13135 	if (sc->sc_type >= WM_T_PCH_SPT)
   13136 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13137 	else
   13138 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13139 
   13140 	/*
   13141 	 * Either we should have a hardware SPI cycle in progress bit to check
   13142 	 * against, in order to start a new cycle or FDONE bit should be
   13143 	 * changed in the hardware so that it is 1 after hardware reset, which
   13144 	 * can then be used as an indication whether a cycle is in progress or
   13145 	 * has been completed .. we should also have some software semaphore
   13146 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13147 	 * threads access to those bits can be sequentiallized or a way so that
   13148 	 * 2 threads don't start the cycle at the same time
   13149 	 */
   13150 
   13151 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13152 		/*
   13153 		 * There is no cycle running at present, so we can start a
   13154 		 * cycle
   13155 		 */
   13156 
   13157 		/* Begin by setting Flash Cycle Done. */
   13158 		hsfsts |= HSFSTS_DONE;
   13159 		if (sc->sc_type >= WM_T_PCH_SPT)
   13160 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13161 			    hsfsts & 0xffffUL);
   13162 		else
   13163 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13164 		error = 0;
   13165 	} else {
   13166 		/*
   13167 		 * Otherwise poll for sometime so the current cycle has a
   13168 		 * chance to end before giving up.
   13169 		 */
   13170 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   13171 			if (sc->sc_type >= WM_T_PCH_SPT)
   13172 				hsfsts = ICH8_FLASH_READ32(sc,
   13173 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13174 			else
   13175 				hsfsts = ICH8_FLASH_READ16(sc,
   13176 				    ICH_FLASH_HSFSTS);
   13177 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13178 				error = 0;
   13179 				break;
   13180 			}
   13181 			delay(1);
   13182 		}
   13183 		if (error == 0) {
   13184 			/*
   13185 			 * Successful in waiting for previous cycle to timeout,
   13186 			 * now set the Flash Cycle Done.
   13187 			 */
   13188 			hsfsts |= HSFSTS_DONE;
   13189 			if (sc->sc_type >= WM_T_PCH_SPT)
   13190 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13191 				    hsfsts & 0xffffUL);
   13192 			else
   13193 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   13194 				    hsfsts);
   13195 		}
   13196 	}
   13197 	return error;
   13198 }
   13199 
   13200 /******************************************************************************
   13201  * This function starts a flash cycle and waits for its completion
   13202  *
   13203  * sc - The pointer to the hw structure
   13204  ****************************************************************************/
   13205 static int32_t
   13206 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   13207 {
   13208 	uint16_t hsflctl;
   13209 	uint16_t hsfsts;
   13210 	int32_t error = 1;
   13211 	uint32_t i = 0;
   13212 
   13213 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   13214 	if (sc->sc_type >= WM_T_PCH_SPT)
   13215 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   13216 	else
   13217 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13218 	hsflctl |= HSFCTL_GO;
   13219 	if (sc->sc_type >= WM_T_PCH_SPT)
   13220 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13221 		    (uint32_t)hsflctl << 16);
   13222 	else
   13223 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13224 
   13225 	/* Wait till FDONE bit is set to 1 */
   13226 	do {
   13227 		if (sc->sc_type >= WM_T_PCH_SPT)
   13228 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13229 			    & 0xffffUL;
   13230 		else
   13231 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13232 		if (hsfsts & HSFSTS_DONE)
   13233 			break;
   13234 		delay(1);
   13235 		i++;
   13236 	} while (i < timeout);
   13237 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   13238 		error = 0;
   13239 
   13240 	return error;
   13241 }
   13242 
   13243 /******************************************************************************
   13244  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   13245  *
   13246  * sc - The pointer to the hw structure
   13247  * index - The index of the byte or word to read.
   13248  * size - Size of data to read, 1=byte 2=word, 4=dword
   13249  * data - Pointer to the word to store the value read.
   13250  *****************************************************************************/
   13251 static int32_t
   13252 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   13253     uint32_t size, uint32_t *data)
   13254 {
   13255 	uint16_t hsfsts;
   13256 	uint16_t hsflctl;
   13257 	uint32_t flash_linear_address;
   13258 	uint32_t flash_data = 0;
   13259 	int32_t error = 1;
   13260 	int32_t count = 0;
   13261 
   13262 	if (size < 1  || size > 4 || data == 0x0 ||
   13263 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   13264 		return error;
   13265 
   13266 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   13267 	    sc->sc_ich8_flash_base;
   13268 
   13269 	do {
   13270 		delay(1);
   13271 		/* Steps */
   13272 		error = wm_ich8_cycle_init(sc);
   13273 		if (error)
   13274 			break;
   13275 
   13276 		if (sc->sc_type >= WM_T_PCH_SPT)
   13277 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13278 			    >> 16;
   13279 		else
   13280 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13281 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13282 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13283 		    & HSFCTL_BCOUNT_MASK;
   13284 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13285 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13286 			/*
   13287 			 * In SPT, This register is in Lan memory space, not
   13288 			 * flash. Therefore, only 32 bit access is supported.
   13289 			 */
   13290 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13291 			    (uint32_t)hsflctl << 16);
   13292 		} else
   13293 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13294 
   13295 		/*
   13296 		 * Write the last 24 bits of index into Flash Linear address
   13297 		 * field in Flash Address
   13298 		 */
   13299 		/* TODO: TBD maybe check the index against the size of flash */
   13300 
   13301 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13302 
   13303 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13304 
   13305 		/*
   13306 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13307 		 * the whole sequence a few more times, else read in (shift in)
   13308 		 * the Flash Data0, the order is least significant byte first
   13309 		 * msb to lsb
   13310 		 */
   13311 		if (error == 0) {
   13312 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13313 			if (size == 1)
   13314 				*data = (uint8_t)(flash_data & 0x000000FF);
   13315 			else if (size == 2)
   13316 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13317 			else if (size == 4)
   13318 				*data = (uint32_t)flash_data;
   13319 			break;
   13320 		} else {
   13321 			/*
   13322 			 * If we've gotten here, then things are probably
   13323 			 * completely hosed, but if the error condition is
   13324 			 * detected, it won't hurt to give it another try...
   13325 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13326 			 */
   13327 			if (sc->sc_type >= WM_T_PCH_SPT)
   13328 				hsfsts = ICH8_FLASH_READ32(sc,
   13329 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13330 			else
   13331 				hsfsts = ICH8_FLASH_READ16(sc,
   13332 				    ICH_FLASH_HSFSTS);
   13333 
   13334 			if (hsfsts & HSFSTS_ERR) {
   13335 				/* Repeat for some time before giving up. */
   13336 				continue;
   13337 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13338 				break;
   13339 		}
   13340 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13341 
   13342 	return error;
   13343 }
   13344 
   13345 /******************************************************************************
   13346  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13347  *
   13348  * sc - pointer to wm_hw structure
   13349  * index - The index of the byte to read.
   13350  * data - Pointer to a byte to store the value read.
   13351  *****************************************************************************/
   13352 static int32_t
   13353 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13354 {
   13355 	int32_t status;
   13356 	uint32_t word = 0;
   13357 
   13358 	status = wm_read_ich8_data(sc, index, 1, &word);
   13359 	if (status == 0)
   13360 		*data = (uint8_t)word;
   13361 	else
   13362 		*data = 0;
   13363 
   13364 	return status;
   13365 }
   13366 
   13367 /******************************************************************************
   13368  * Reads a word from the NVM using the ICH8 flash access registers.
   13369  *
   13370  * sc - pointer to wm_hw structure
   13371  * index - The starting byte index of the word to read.
   13372  * data - Pointer to a word to store the value read.
   13373  *****************************************************************************/
   13374 static int32_t
   13375 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13376 {
   13377 	int32_t status;
   13378 	uint32_t word = 0;
   13379 
   13380 	status = wm_read_ich8_data(sc, index, 2, &word);
   13381 	if (status == 0)
   13382 		*data = (uint16_t)word;
   13383 	else
   13384 		*data = 0;
   13385 
   13386 	return status;
   13387 }
   13388 
   13389 /******************************************************************************
   13390  * Reads a dword from the NVM using the ICH8 flash access registers.
   13391  *
   13392  * sc - pointer to wm_hw structure
   13393  * index - The starting byte index of the word to read.
   13394  * data - Pointer to a word to store the value read.
   13395  *****************************************************************************/
   13396 static int32_t
   13397 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13398 {
   13399 	int32_t status;
   13400 
   13401 	status = wm_read_ich8_data(sc, index, 4, data);
   13402 	return status;
   13403 }
   13404 
   13405 /******************************************************************************
   13406  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13407  * register.
   13408  *
   13409  * sc - Struct containing variables accessed by shared code
   13410  * offset - offset of word in the EEPROM to read
   13411  * data - word read from the EEPROM
   13412  * words - number of words to read
   13413  *****************************************************************************/
   13414 static int
   13415 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13416 {
   13417 	int32_t	 rv = 0;
   13418 	uint32_t flash_bank = 0;
   13419 	uint32_t act_offset = 0;
   13420 	uint32_t bank_offset = 0;
   13421 	uint16_t word = 0;
   13422 	uint16_t i = 0;
   13423 
   13424 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13425 		device_xname(sc->sc_dev), __func__));
   13426 
   13427 	if (sc->nvm.acquire(sc) != 0)
   13428 		return -1;
   13429 
   13430 	/*
   13431 	 * We need to know which is the valid flash bank.  In the event
   13432 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13433 	 * managing flash_bank. So it cannot be trusted and needs
   13434 	 * to be updated with each read.
   13435 	 */
   13436 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13437 	if (rv) {
   13438 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13439 			device_xname(sc->sc_dev)));
   13440 		flash_bank = 0;
   13441 	}
   13442 
   13443 	/*
   13444 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13445 	 * size
   13446 	 */
   13447 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13448 
   13449 	for (i = 0; i < words; i++) {
   13450 		/* The NVM part needs a byte offset, hence * 2 */
   13451 		act_offset = bank_offset + ((offset + i) * 2);
   13452 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13453 		if (rv) {
   13454 			aprint_error_dev(sc->sc_dev,
   13455 			    "%s: failed to read NVM\n", __func__);
   13456 			break;
   13457 		}
   13458 		data[i] = word;
   13459 	}
   13460 
   13461 	sc->nvm.release(sc);
   13462 	return rv;
   13463 }
   13464 
   13465 /******************************************************************************
   13466  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13467  * register.
   13468  *
   13469  * sc - Struct containing variables accessed by shared code
   13470  * offset - offset of word in the EEPROM to read
   13471  * data - word read from the EEPROM
   13472  * words - number of words to read
   13473  *****************************************************************************/
   13474 static int
   13475 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13476 {
   13477 	int32_t	 rv = 0;
   13478 	uint32_t flash_bank = 0;
   13479 	uint32_t act_offset = 0;
   13480 	uint32_t bank_offset = 0;
   13481 	uint32_t dword = 0;
   13482 	uint16_t i = 0;
   13483 
   13484 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13485 		device_xname(sc->sc_dev), __func__));
   13486 
   13487 	if (sc->nvm.acquire(sc) != 0)
   13488 		return -1;
   13489 
   13490 	/*
   13491 	 * We need to know which is the valid flash bank.  In the event
   13492 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13493 	 * managing flash_bank. So it cannot be trusted and needs
   13494 	 * to be updated with each read.
   13495 	 */
   13496 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13497 	if (rv) {
   13498 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13499 			device_xname(sc->sc_dev)));
   13500 		flash_bank = 0;
   13501 	}
   13502 
   13503 	/*
   13504 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13505 	 * size
   13506 	 */
   13507 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13508 
   13509 	for (i = 0; i < words; i++) {
   13510 		/* The NVM part needs a byte offset, hence * 2 */
   13511 		act_offset = bank_offset + ((offset + i) * 2);
   13512 		/* but we must read dword aligned, so mask ... */
   13513 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13514 		if (rv) {
   13515 			aprint_error_dev(sc->sc_dev,
   13516 			    "%s: failed to read NVM\n", __func__);
   13517 			break;
   13518 		}
   13519 		/* ... and pick out low or high word */
   13520 		if ((act_offset & 0x2) == 0)
   13521 			data[i] = (uint16_t)(dword & 0xFFFF);
   13522 		else
   13523 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13524 	}
   13525 
   13526 	sc->nvm.release(sc);
   13527 	return rv;
   13528 }
   13529 
   13530 /* iNVM */
   13531 
   13532 static int
   13533 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13534 {
   13535 	int32_t	 rv = 0;
   13536 	uint32_t invm_dword;
   13537 	uint16_t i;
   13538 	uint8_t record_type, word_address;
   13539 
   13540 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13541 		device_xname(sc->sc_dev), __func__));
   13542 
   13543 	for (i = 0; i < INVM_SIZE; i++) {
   13544 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13545 		/* Get record type */
   13546 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13547 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13548 			break;
   13549 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13550 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13551 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13552 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13553 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13554 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13555 			if (word_address == address) {
   13556 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13557 				rv = 0;
   13558 				break;
   13559 			}
   13560 		}
   13561 	}
   13562 
   13563 	return rv;
   13564 }
   13565 
   13566 static int
   13567 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13568 {
   13569 	int rv = 0;
   13570 	int i;
   13571 
   13572 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13573 		device_xname(sc->sc_dev), __func__));
   13574 
   13575 	if (sc->nvm.acquire(sc) != 0)
   13576 		return -1;
   13577 
   13578 	for (i = 0; i < words; i++) {
   13579 		switch (offset + i) {
   13580 		case NVM_OFF_MACADDR:
   13581 		case NVM_OFF_MACADDR1:
   13582 		case NVM_OFF_MACADDR2:
   13583 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13584 			if (rv != 0) {
   13585 				data[i] = 0xffff;
   13586 				rv = -1;
   13587 			}
   13588 			break;
   13589 		case NVM_OFF_CFG2:
   13590 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13591 			if (rv != 0) {
   13592 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13593 				rv = 0;
   13594 			}
   13595 			break;
   13596 		case NVM_OFF_CFG4:
   13597 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13598 			if (rv != 0) {
   13599 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13600 				rv = 0;
   13601 			}
   13602 			break;
   13603 		case NVM_OFF_LED_1_CFG:
   13604 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13605 			if (rv != 0) {
   13606 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13607 				rv = 0;
   13608 			}
   13609 			break;
   13610 		case NVM_OFF_LED_0_2_CFG:
   13611 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13612 			if (rv != 0) {
   13613 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13614 				rv = 0;
   13615 			}
   13616 			break;
   13617 		case NVM_OFF_ID_LED_SETTINGS:
   13618 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13619 			if (rv != 0) {
   13620 				*data = ID_LED_RESERVED_FFFF;
   13621 				rv = 0;
   13622 			}
   13623 			break;
   13624 		default:
   13625 			DPRINTF(WM_DEBUG_NVM,
   13626 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13627 			*data = NVM_RESERVED_WORD;
   13628 			break;
   13629 		}
   13630 	}
   13631 
   13632 	sc->nvm.release(sc);
   13633 	return rv;
   13634 }
   13635 
   13636 /* Lock, detecting NVM type, validate checksum, version and read */
   13637 
   13638 static int
   13639 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13640 {
   13641 	uint32_t eecd = 0;
   13642 
   13643 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13644 	    || sc->sc_type == WM_T_82583) {
   13645 		eecd = CSR_READ(sc, WMREG_EECD);
   13646 
   13647 		/* Isolate bits 15 & 16 */
   13648 		eecd = ((eecd >> 15) & 0x03);
   13649 
   13650 		/* If both bits are set, device is Flash type */
   13651 		if (eecd == 0x03)
   13652 			return 0;
   13653 	}
   13654 	return 1;
   13655 }
   13656 
   13657 static int
   13658 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13659 {
   13660 	uint32_t eec;
   13661 
   13662 	eec = CSR_READ(sc, WMREG_EEC);
   13663 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13664 		return 1;
   13665 
   13666 	return 0;
   13667 }
   13668 
   13669 /*
   13670  * wm_nvm_validate_checksum
   13671  *
   13672  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13673  */
   13674 static int
   13675 wm_nvm_validate_checksum(struct wm_softc *sc)
   13676 {
   13677 	uint16_t checksum;
   13678 	uint16_t eeprom_data;
   13679 #ifdef WM_DEBUG
   13680 	uint16_t csum_wordaddr, valid_checksum;
   13681 #endif
   13682 	int i;
   13683 
   13684 	checksum = 0;
   13685 
   13686 	/* Don't check for I211 */
   13687 	if (sc->sc_type == WM_T_I211)
   13688 		return 0;
   13689 
   13690 #ifdef WM_DEBUG
   13691 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13692 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13693 		csum_wordaddr = NVM_OFF_COMPAT;
   13694 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13695 	} else {
   13696 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13697 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13698 	}
   13699 
   13700 	/* Dump EEPROM image for debug */
   13701 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13702 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13703 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13704 		/* XXX PCH_SPT? */
   13705 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13706 		if ((eeprom_data & valid_checksum) == 0)
   13707 			DPRINTF(WM_DEBUG_NVM,
   13708 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13709 				device_xname(sc->sc_dev), eeprom_data,
   13710 				    valid_checksum));
   13711 	}
   13712 
   13713 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13714 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13715 		for (i = 0; i < NVM_SIZE; i++) {
   13716 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13717 				printf("XXXX ");
   13718 			else
   13719 				printf("%04hx ", eeprom_data);
   13720 			if (i % 8 == 7)
   13721 				printf("\n");
   13722 		}
   13723 	}
   13724 
   13725 #endif /* WM_DEBUG */
   13726 
   13727 	for (i = 0; i < NVM_SIZE; i++) {
   13728 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13729 			return 1;
   13730 		checksum += eeprom_data;
   13731 	}
   13732 
   13733 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13734 #ifdef WM_DEBUG
   13735 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13736 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13737 #endif
   13738 	}
   13739 
   13740 	return 0;
   13741 }
   13742 
   13743 static void
   13744 wm_nvm_version_invm(struct wm_softc *sc)
   13745 {
   13746 	uint32_t dword;
   13747 
   13748 	/*
   13749 	 * Linux's code to decode version is very strange, so we don't
   13750 	 * obey that algorithm and just use word 61 as the document.
   13751 	 * Perhaps it's not perfect though...
   13752 	 *
   13753 	 * Example:
   13754 	 *
   13755 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13756 	 */
   13757 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13758 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13759 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13760 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13761 }
   13762 
   13763 static void
   13764 wm_nvm_version(struct wm_softc *sc)
   13765 {
   13766 	uint16_t major, minor, build, patch;
   13767 	uint16_t uid0, uid1;
   13768 	uint16_t nvm_data;
   13769 	uint16_t off;
   13770 	bool check_version = false;
   13771 	bool check_optionrom = false;
   13772 	bool have_build = false;
   13773 	bool have_uid = true;
   13774 
   13775 	/*
   13776 	 * Version format:
   13777 	 *
   13778 	 * XYYZ
   13779 	 * X0YZ
   13780 	 * X0YY
   13781 	 *
   13782 	 * Example:
   13783 	 *
   13784 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13785 	 *	82571	0x50a6	5.10.6?
   13786 	 *	82572	0x506a	5.6.10?
   13787 	 *	82572EI	0x5069	5.6.9?
   13788 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13789 	 *		0x2013	2.1.3?
   13790 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   13791 	 * ICH8+82567	0x0040	0.4.0?
   13792 	 * ICH9+82566	0x1040	1.4.0?
   13793 	 *ICH10+82567	0x0043	0.4.3?
   13794 	 *  PCH+82577	0x00c1	0.12.1?
   13795 	 * PCH2+82579	0x00d3	0.13.3?
   13796 	 *		0x00d4	0.13.4?
   13797 	 *  LPT+I218	0x0023	0.2.3?
   13798 	 *  SPT+I219	0x0084	0.8.4?
   13799 	 *  CNP+I219	0x0054	0.5.4?
   13800 	 */
   13801 
   13802 	/*
   13803 	 * XXX
   13804 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13805 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13806 	 */
   13807 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13808 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13809 		have_uid = false;
   13810 
   13811 	switch (sc->sc_type) {
   13812 	case WM_T_82571:
   13813 	case WM_T_82572:
   13814 	case WM_T_82574:
   13815 	case WM_T_82583:
   13816 		check_version = true;
   13817 		check_optionrom = true;
   13818 		have_build = true;
   13819 		break;
   13820 	case WM_T_ICH8:
   13821 	case WM_T_ICH9:
   13822 	case WM_T_ICH10:
   13823 	case WM_T_PCH:
   13824 	case WM_T_PCH2:
   13825 	case WM_T_PCH_LPT:
   13826 	case WM_T_PCH_SPT:
   13827 	case WM_T_PCH_CNP:
   13828 		check_version = true;
   13829 		have_build = true;
   13830 		have_uid = false;
   13831 		break;
   13832 	case WM_T_82575:
   13833 	case WM_T_82576:
   13834 	case WM_T_82580:
   13835 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13836 			check_version = true;
   13837 		break;
   13838 	case WM_T_I211:
   13839 		wm_nvm_version_invm(sc);
   13840 		have_uid = false;
   13841 		goto printver;
   13842 	case WM_T_I210:
   13843 		if (!wm_nvm_flash_presence_i210(sc)) {
   13844 			wm_nvm_version_invm(sc);
   13845 			have_uid = false;
   13846 			goto printver;
   13847 		}
   13848 		/* FALLTHROUGH */
   13849 	case WM_T_I350:
   13850 	case WM_T_I354:
   13851 		check_version = true;
   13852 		check_optionrom = true;
   13853 		break;
   13854 	default:
   13855 		return;
   13856 	}
   13857 	if (check_version
   13858 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13859 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13860 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13861 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13862 			build = nvm_data & NVM_BUILD_MASK;
   13863 			have_build = true;
   13864 		} else
   13865 			minor = nvm_data & 0x00ff;
   13866 
   13867 		/* Decimal */
   13868 		minor = (minor / 16) * 10 + (minor % 16);
   13869 		sc->sc_nvm_ver_major = major;
   13870 		sc->sc_nvm_ver_minor = minor;
   13871 
   13872 printver:
   13873 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13874 		    sc->sc_nvm_ver_minor);
   13875 		if (have_build) {
   13876 			sc->sc_nvm_ver_build = build;
   13877 			aprint_verbose(".%d", build);
   13878 		}
   13879 	}
   13880 
   13881 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13882 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13883 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13884 		/* Option ROM Version */
   13885 		if ((off != 0x0000) && (off != 0xffff)) {
   13886 			int rv;
   13887 
   13888 			off += NVM_COMBO_VER_OFF;
   13889 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13890 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13891 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13892 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13893 				/* 16bits */
   13894 				major = uid0 >> 8;
   13895 				build = (uid0 << 8) | (uid1 >> 8);
   13896 				patch = uid1 & 0x00ff;
   13897 				aprint_verbose(", option ROM Version %d.%d.%d",
   13898 				    major, build, patch);
   13899 			}
   13900 		}
   13901 	}
   13902 
   13903 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13904 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13905 }
   13906 
   13907 /*
   13908  * wm_nvm_read:
   13909  *
   13910  *	Read data from the serial EEPROM.
   13911  */
   13912 static int
   13913 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13914 {
   13915 	int rv;
   13916 
   13917 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13918 		device_xname(sc->sc_dev), __func__));
   13919 
   13920 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13921 		return -1;
   13922 
   13923 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13924 
   13925 	return rv;
   13926 }
   13927 
   13928 /*
   13929  * Hardware semaphores.
   13930  * Very complexed...
   13931  */
   13932 
   13933 static int
   13934 wm_get_null(struct wm_softc *sc)
   13935 {
   13936 
   13937 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13938 		device_xname(sc->sc_dev), __func__));
   13939 	return 0;
   13940 }
   13941 
   13942 static void
   13943 wm_put_null(struct wm_softc *sc)
   13944 {
   13945 
   13946 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13947 		device_xname(sc->sc_dev), __func__));
   13948 	return;
   13949 }
   13950 
   13951 static int
   13952 wm_get_eecd(struct wm_softc *sc)
   13953 {
   13954 	uint32_t reg;
   13955 	int x;
   13956 
   13957 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13958 		device_xname(sc->sc_dev), __func__));
   13959 
   13960 	reg = CSR_READ(sc, WMREG_EECD);
   13961 
   13962 	/* Request EEPROM access. */
   13963 	reg |= EECD_EE_REQ;
   13964 	CSR_WRITE(sc, WMREG_EECD, reg);
   13965 
   13966 	/* ..and wait for it to be granted. */
   13967 	for (x = 0; x < 1000; x++) {
   13968 		reg = CSR_READ(sc, WMREG_EECD);
   13969 		if (reg & EECD_EE_GNT)
   13970 			break;
   13971 		delay(5);
   13972 	}
   13973 	if ((reg & EECD_EE_GNT) == 0) {
   13974 		aprint_error_dev(sc->sc_dev,
   13975 		    "could not acquire EEPROM GNT\n");
   13976 		reg &= ~EECD_EE_REQ;
   13977 		CSR_WRITE(sc, WMREG_EECD, reg);
   13978 		return -1;
   13979 	}
   13980 
   13981 	return 0;
   13982 }
   13983 
   13984 static void
   13985 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13986 {
   13987 
   13988 	*eecd |= EECD_SK;
   13989 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13990 	CSR_WRITE_FLUSH(sc);
   13991 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13992 		delay(1);
   13993 	else
   13994 		delay(50);
   13995 }
   13996 
   13997 static void
   13998 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13999 {
   14000 
   14001 	*eecd &= ~EECD_SK;
   14002 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14003 	CSR_WRITE_FLUSH(sc);
   14004 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14005 		delay(1);
   14006 	else
   14007 		delay(50);
   14008 }
   14009 
   14010 static void
   14011 wm_put_eecd(struct wm_softc *sc)
   14012 {
   14013 	uint32_t reg;
   14014 
   14015 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14016 		device_xname(sc->sc_dev), __func__));
   14017 
   14018 	/* Stop nvm */
   14019 	reg = CSR_READ(sc, WMREG_EECD);
   14020 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14021 		/* Pull CS high */
   14022 		reg |= EECD_CS;
   14023 		wm_nvm_eec_clock_lower(sc, &reg);
   14024 	} else {
   14025 		/* CS on Microwire is active-high */
   14026 		reg &= ~(EECD_CS | EECD_DI);
   14027 		CSR_WRITE(sc, WMREG_EECD, reg);
   14028 		wm_nvm_eec_clock_raise(sc, &reg);
   14029 		wm_nvm_eec_clock_lower(sc, &reg);
   14030 	}
   14031 
   14032 	reg = CSR_READ(sc, WMREG_EECD);
   14033 	reg &= ~EECD_EE_REQ;
   14034 	CSR_WRITE(sc, WMREG_EECD, reg);
   14035 
   14036 	return;
   14037 }
   14038 
   14039 /*
   14040  * Get hardware semaphore.
   14041  * Same as e1000_get_hw_semaphore_generic()
   14042  */
   14043 static int
   14044 wm_get_swsm_semaphore(struct wm_softc *sc)
   14045 {
   14046 	int32_t timeout;
   14047 	uint32_t swsm;
   14048 
   14049 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14050 		device_xname(sc->sc_dev), __func__));
   14051 	KASSERT(sc->sc_nvm_wordsize > 0);
   14052 
   14053 retry:
   14054 	/* Get the SW semaphore. */
   14055 	timeout = sc->sc_nvm_wordsize + 1;
   14056 	while (timeout) {
   14057 		swsm = CSR_READ(sc, WMREG_SWSM);
   14058 
   14059 		if ((swsm & SWSM_SMBI) == 0)
   14060 			break;
   14061 
   14062 		delay(50);
   14063 		timeout--;
   14064 	}
   14065 
   14066 	if (timeout == 0) {
   14067 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14068 			/*
   14069 			 * In rare circumstances, the SW semaphore may already
   14070 			 * be held unintentionally. Clear the semaphore once
   14071 			 * before giving up.
   14072 			 */
   14073 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14074 			wm_put_swsm_semaphore(sc);
   14075 			goto retry;
   14076 		}
   14077 		aprint_error_dev(sc->sc_dev,
   14078 		    "could not acquire SWSM SMBI\n");
   14079 		return 1;
   14080 	}
   14081 
   14082 	/* Get the FW semaphore. */
   14083 	timeout = sc->sc_nvm_wordsize + 1;
   14084 	while (timeout) {
   14085 		swsm = CSR_READ(sc, WMREG_SWSM);
   14086 		swsm |= SWSM_SWESMBI;
   14087 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14088 		/* If we managed to set the bit we got the semaphore. */
   14089 		swsm = CSR_READ(sc, WMREG_SWSM);
   14090 		if (swsm & SWSM_SWESMBI)
   14091 			break;
   14092 
   14093 		delay(50);
   14094 		timeout--;
   14095 	}
   14096 
   14097 	if (timeout == 0) {
   14098 		aprint_error_dev(sc->sc_dev,
   14099 		    "could not acquire SWSM SWESMBI\n");
   14100 		/* Release semaphores */
   14101 		wm_put_swsm_semaphore(sc);
   14102 		return 1;
   14103 	}
   14104 	return 0;
   14105 }
   14106 
   14107 /*
   14108  * Put hardware semaphore.
   14109  * Same as e1000_put_hw_semaphore_generic()
   14110  */
   14111 static void
   14112 wm_put_swsm_semaphore(struct wm_softc *sc)
   14113 {
   14114 	uint32_t swsm;
   14115 
   14116 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14117 		device_xname(sc->sc_dev), __func__));
   14118 
   14119 	swsm = CSR_READ(sc, WMREG_SWSM);
   14120 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14121 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14122 }
   14123 
   14124 /*
   14125  * Get SW/FW semaphore.
   14126  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14127  */
   14128 static int
   14129 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14130 {
   14131 	uint32_t swfw_sync;
   14132 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14133 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14134 	int timeout;
   14135 
   14136 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14137 		device_xname(sc->sc_dev), __func__));
   14138 
   14139 	if (sc->sc_type == WM_T_80003)
   14140 		timeout = 50;
   14141 	else
   14142 		timeout = 200;
   14143 
   14144 	while (timeout) {
   14145 		if (wm_get_swsm_semaphore(sc)) {
   14146 			aprint_error_dev(sc->sc_dev,
   14147 			    "%s: failed to get semaphore\n",
   14148 			    __func__);
   14149 			return 1;
   14150 		}
   14151 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14152 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14153 			swfw_sync |= swmask;
   14154 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14155 			wm_put_swsm_semaphore(sc);
   14156 			return 0;
   14157 		}
   14158 		wm_put_swsm_semaphore(sc);
   14159 		delay(5000);
   14160 		timeout--;
   14161 	}
   14162 	device_printf(sc->sc_dev,
   14163 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   14164 	    mask, swfw_sync);
   14165 	return 1;
   14166 }
   14167 
   14168 static void
   14169 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14170 {
   14171 	uint32_t swfw_sync;
   14172 
   14173 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14174 		device_xname(sc->sc_dev), __func__));
   14175 
   14176 	while (wm_get_swsm_semaphore(sc) != 0)
   14177 		continue;
   14178 
   14179 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14180 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   14181 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14182 
   14183 	wm_put_swsm_semaphore(sc);
   14184 }
   14185 
   14186 static int
   14187 wm_get_nvm_80003(struct wm_softc *sc)
   14188 {
   14189 	int rv;
   14190 
   14191 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14192 		device_xname(sc->sc_dev), __func__));
   14193 
   14194 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   14195 		aprint_error_dev(sc->sc_dev,
   14196 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   14197 		return rv;
   14198 	}
   14199 
   14200 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14201 	    && (rv = wm_get_eecd(sc)) != 0) {
   14202 		aprint_error_dev(sc->sc_dev,
   14203 		    "%s: failed to get semaphore(EECD)\n", __func__);
   14204 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14205 		return rv;
   14206 	}
   14207 
   14208 	return 0;
   14209 }
   14210 
   14211 static void
   14212 wm_put_nvm_80003(struct wm_softc *sc)
   14213 {
   14214 
   14215 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14216 		device_xname(sc->sc_dev), __func__));
   14217 
   14218 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14219 		wm_put_eecd(sc);
   14220 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14221 }
   14222 
   14223 static int
   14224 wm_get_nvm_82571(struct wm_softc *sc)
   14225 {
   14226 	int rv;
   14227 
   14228 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14229 		device_xname(sc->sc_dev), __func__));
   14230 
   14231 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   14232 		return rv;
   14233 
   14234 	switch (sc->sc_type) {
   14235 	case WM_T_82573:
   14236 		break;
   14237 	default:
   14238 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14239 			rv = wm_get_eecd(sc);
   14240 		break;
   14241 	}
   14242 
   14243 	if (rv != 0) {
   14244 		aprint_error_dev(sc->sc_dev,
   14245 		    "%s: failed to get semaphore\n",
   14246 		    __func__);
   14247 		wm_put_swsm_semaphore(sc);
   14248 	}
   14249 
   14250 	return rv;
   14251 }
   14252 
   14253 static void
   14254 wm_put_nvm_82571(struct wm_softc *sc)
   14255 {
   14256 
   14257 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14258 		device_xname(sc->sc_dev), __func__));
   14259 
   14260 	switch (sc->sc_type) {
   14261 	case WM_T_82573:
   14262 		break;
   14263 	default:
   14264 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14265 			wm_put_eecd(sc);
   14266 		break;
   14267 	}
   14268 
   14269 	wm_put_swsm_semaphore(sc);
   14270 }
   14271 
   14272 static int
   14273 wm_get_phy_82575(struct wm_softc *sc)
   14274 {
   14275 
   14276 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14277 		device_xname(sc->sc_dev), __func__));
   14278 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14279 }
   14280 
   14281 static void
   14282 wm_put_phy_82575(struct wm_softc *sc)
   14283 {
   14284 
   14285 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14286 		device_xname(sc->sc_dev), __func__));
   14287 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14288 }
   14289 
   14290 static int
   14291 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14292 {
   14293 	uint32_t ext_ctrl;
   14294 	int timeout = 200;
   14295 
   14296 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14297 		device_xname(sc->sc_dev), __func__));
   14298 
   14299 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14300 	for (timeout = 0; timeout < 200; timeout++) {
   14301 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14302 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14303 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14304 
   14305 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14306 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14307 			return 0;
   14308 		delay(5000);
   14309 	}
   14310 	device_printf(sc->sc_dev,
   14311 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14312 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14313 	return 1;
   14314 }
   14315 
   14316 static void
   14317 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14318 {
   14319 	uint32_t ext_ctrl;
   14320 
   14321 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14322 		device_xname(sc->sc_dev), __func__));
   14323 
   14324 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14325 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14326 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14327 
   14328 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14329 }
   14330 
   14331 static int
   14332 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14333 {
   14334 	uint32_t ext_ctrl;
   14335 	int timeout;
   14336 
   14337 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14338 		device_xname(sc->sc_dev), __func__));
   14339 	mutex_enter(sc->sc_ich_phymtx);
   14340 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14341 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14342 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14343 			break;
   14344 		delay(1000);
   14345 	}
   14346 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14347 		device_printf(sc->sc_dev,
   14348 		    "SW has already locked the resource\n");
   14349 		goto out;
   14350 	}
   14351 
   14352 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14353 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14354 	for (timeout = 0; timeout < 1000; timeout++) {
   14355 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14356 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14357 			break;
   14358 		delay(1000);
   14359 	}
   14360 	if (timeout >= 1000) {
   14361 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14362 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14363 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14364 		goto out;
   14365 	}
   14366 	return 0;
   14367 
   14368 out:
   14369 	mutex_exit(sc->sc_ich_phymtx);
   14370 	return 1;
   14371 }
   14372 
   14373 static void
   14374 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14375 {
   14376 	uint32_t ext_ctrl;
   14377 
   14378 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14379 		device_xname(sc->sc_dev), __func__));
   14380 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14381 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14382 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14383 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14384 	} else {
   14385 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14386 	}
   14387 
   14388 	mutex_exit(sc->sc_ich_phymtx);
   14389 }
   14390 
   14391 static int
   14392 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14393 {
   14394 
   14395 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14396 		device_xname(sc->sc_dev), __func__));
   14397 	mutex_enter(sc->sc_ich_nvmmtx);
   14398 
   14399 	return 0;
   14400 }
   14401 
   14402 static void
   14403 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14404 {
   14405 
   14406 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14407 		device_xname(sc->sc_dev), __func__));
   14408 	mutex_exit(sc->sc_ich_nvmmtx);
   14409 }
   14410 
   14411 static int
   14412 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14413 {
   14414 	int i = 0;
   14415 	uint32_t reg;
   14416 
   14417 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14418 		device_xname(sc->sc_dev), __func__));
   14419 
   14420 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14421 	do {
   14422 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14423 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14424 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14425 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14426 			break;
   14427 		delay(2*1000);
   14428 		i++;
   14429 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14430 
   14431 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14432 		wm_put_hw_semaphore_82573(sc);
   14433 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14434 		    device_xname(sc->sc_dev));
   14435 		return -1;
   14436 	}
   14437 
   14438 	return 0;
   14439 }
   14440 
   14441 static void
   14442 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14443 {
   14444 	uint32_t reg;
   14445 
   14446 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14447 		device_xname(sc->sc_dev), __func__));
   14448 
   14449 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14450 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14451 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14452 }
   14453 
   14454 /*
   14455  * Management mode and power management related subroutines.
   14456  * BMC, AMT, suspend/resume and EEE.
   14457  */
   14458 
   14459 #ifdef WM_WOL
   14460 static int
   14461 wm_check_mng_mode(struct wm_softc *sc)
   14462 {
   14463 	int rv;
   14464 
   14465 	switch (sc->sc_type) {
   14466 	case WM_T_ICH8:
   14467 	case WM_T_ICH9:
   14468 	case WM_T_ICH10:
   14469 	case WM_T_PCH:
   14470 	case WM_T_PCH2:
   14471 	case WM_T_PCH_LPT:
   14472 	case WM_T_PCH_SPT:
   14473 	case WM_T_PCH_CNP:
   14474 		rv = wm_check_mng_mode_ich8lan(sc);
   14475 		break;
   14476 	case WM_T_82574:
   14477 	case WM_T_82583:
   14478 		rv = wm_check_mng_mode_82574(sc);
   14479 		break;
   14480 	case WM_T_82571:
   14481 	case WM_T_82572:
   14482 	case WM_T_82573:
   14483 	case WM_T_80003:
   14484 		rv = wm_check_mng_mode_generic(sc);
   14485 		break;
   14486 	default:
   14487 		/* Noting to do */
   14488 		rv = 0;
   14489 		break;
   14490 	}
   14491 
   14492 	return rv;
   14493 }
   14494 
   14495 static int
   14496 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14497 {
   14498 	uint32_t fwsm;
   14499 
   14500 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14501 
   14502 	if (((fwsm & FWSM_FW_VALID) != 0)
   14503 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14504 		return 1;
   14505 
   14506 	return 0;
   14507 }
   14508 
   14509 static int
   14510 wm_check_mng_mode_82574(struct wm_softc *sc)
   14511 {
   14512 	uint16_t data;
   14513 
   14514 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14515 
   14516 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14517 		return 1;
   14518 
   14519 	return 0;
   14520 }
   14521 
   14522 static int
   14523 wm_check_mng_mode_generic(struct wm_softc *sc)
   14524 {
   14525 	uint32_t fwsm;
   14526 
   14527 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14528 
   14529 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14530 		return 1;
   14531 
   14532 	return 0;
   14533 }
   14534 #endif /* WM_WOL */
   14535 
   14536 static int
   14537 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14538 {
   14539 	uint32_t manc, fwsm, factps;
   14540 
   14541 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14542 		return 0;
   14543 
   14544 	manc = CSR_READ(sc, WMREG_MANC);
   14545 
   14546 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14547 		device_xname(sc->sc_dev), manc));
   14548 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14549 		return 0;
   14550 
   14551 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14552 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14553 		factps = CSR_READ(sc, WMREG_FACTPS);
   14554 		if (((factps & FACTPS_MNGCG) == 0)
   14555 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14556 			return 1;
   14557 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14558 		uint16_t data;
   14559 
   14560 		factps = CSR_READ(sc, WMREG_FACTPS);
   14561 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14562 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14563 			device_xname(sc->sc_dev), factps, data));
   14564 		if (((factps & FACTPS_MNGCG) == 0)
   14565 		    && ((data & NVM_CFG2_MNGM_MASK)
   14566 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14567 			return 1;
   14568 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14569 	    && ((manc & MANC_ASF_EN) == 0))
   14570 		return 1;
   14571 
   14572 	return 0;
   14573 }
   14574 
   14575 static bool
   14576 wm_phy_resetisblocked(struct wm_softc *sc)
   14577 {
   14578 	bool blocked = false;
   14579 	uint32_t reg;
   14580 	int i = 0;
   14581 
   14582 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14583 		device_xname(sc->sc_dev), __func__));
   14584 
   14585 	switch (sc->sc_type) {
   14586 	case WM_T_ICH8:
   14587 	case WM_T_ICH9:
   14588 	case WM_T_ICH10:
   14589 	case WM_T_PCH:
   14590 	case WM_T_PCH2:
   14591 	case WM_T_PCH_LPT:
   14592 	case WM_T_PCH_SPT:
   14593 	case WM_T_PCH_CNP:
   14594 		do {
   14595 			reg = CSR_READ(sc, WMREG_FWSM);
   14596 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14597 				blocked = true;
   14598 				delay(10*1000);
   14599 				continue;
   14600 			}
   14601 			blocked = false;
   14602 		} while (blocked && (i++ < 30));
   14603 		return blocked;
   14604 		break;
   14605 	case WM_T_82571:
   14606 	case WM_T_82572:
   14607 	case WM_T_82573:
   14608 	case WM_T_82574:
   14609 	case WM_T_82583:
   14610 	case WM_T_80003:
   14611 		reg = CSR_READ(sc, WMREG_MANC);
   14612 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14613 			return true;
   14614 		else
   14615 			return false;
   14616 		break;
   14617 	default:
   14618 		/* No problem */
   14619 		break;
   14620 	}
   14621 
   14622 	return false;
   14623 }
   14624 
   14625 static void
   14626 wm_get_hw_control(struct wm_softc *sc)
   14627 {
   14628 	uint32_t reg;
   14629 
   14630 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14631 		device_xname(sc->sc_dev), __func__));
   14632 
   14633 	if (sc->sc_type == WM_T_82573) {
   14634 		reg = CSR_READ(sc, WMREG_SWSM);
   14635 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14636 	} else if (sc->sc_type >= WM_T_82571) {
   14637 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14638 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14639 	}
   14640 }
   14641 
   14642 static void
   14643 wm_release_hw_control(struct wm_softc *sc)
   14644 {
   14645 	uint32_t reg;
   14646 
   14647 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14648 		device_xname(sc->sc_dev), __func__));
   14649 
   14650 	if (sc->sc_type == WM_T_82573) {
   14651 		reg = CSR_READ(sc, WMREG_SWSM);
   14652 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14653 	} else if (sc->sc_type >= WM_T_82571) {
   14654 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14655 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14656 	}
   14657 }
   14658 
   14659 static void
   14660 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14661 {
   14662 	uint32_t reg;
   14663 
   14664 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14665 		device_xname(sc->sc_dev), __func__));
   14666 
   14667 	if (sc->sc_type < WM_T_PCH2)
   14668 		return;
   14669 
   14670 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14671 
   14672 	if (gate)
   14673 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14674 	else
   14675 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14676 
   14677 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14678 }
   14679 
   14680 static int
   14681 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14682 {
   14683 	uint32_t fwsm, reg;
   14684 	int rv = 0;
   14685 
   14686 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14687 		device_xname(sc->sc_dev), __func__));
   14688 
   14689 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14690 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14691 
   14692 	/* Disable ULP */
   14693 	wm_ulp_disable(sc);
   14694 
   14695 	/* Acquire PHY semaphore */
   14696 	rv = sc->phy.acquire(sc);
   14697 	if (rv != 0) {
   14698 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14699 		device_xname(sc->sc_dev), __func__));
   14700 		return -1;
   14701 	}
   14702 
   14703 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14704 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14705 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14706 	 */
   14707 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14708 	switch (sc->sc_type) {
   14709 	case WM_T_PCH_LPT:
   14710 	case WM_T_PCH_SPT:
   14711 	case WM_T_PCH_CNP:
   14712 		if (wm_phy_is_accessible_pchlan(sc))
   14713 			break;
   14714 
   14715 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14716 		 * forcing MAC to SMBus mode first.
   14717 		 */
   14718 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14719 		reg |= CTRL_EXT_FORCE_SMBUS;
   14720 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14721 #if 0
   14722 		/* XXX Isn't this required??? */
   14723 		CSR_WRITE_FLUSH(sc);
   14724 #endif
   14725 		/* Wait 50 milliseconds for MAC to finish any retries
   14726 		 * that it might be trying to perform from previous
   14727 		 * attempts to acknowledge any phy read requests.
   14728 		 */
   14729 		delay(50 * 1000);
   14730 		/* FALLTHROUGH */
   14731 	case WM_T_PCH2:
   14732 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14733 			break;
   14734 		/* FALLTHROUGH */
   14735 	case WM_T_PCH:
   14736 		if (sc->sc_type == WM_T_PCH)
   14737 			if ((fwsm & FWSM_FW_VALID) != 0)
   14738 				break;
   14739 
   14740 		if (wm_phy_resetisblocked(sc) == true) {
   14741 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   14742 			break;
   14743 		}
   14744 
   14745 		/* Toggle LANPHYPC Value bit */
   14746 		wm_toggle_lanphypc_pch_lpt(sc);
   14747 
   14748 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14749 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14750 				break;
   14751 
   14752 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14753 			 * so ensure that the MAC is also out of SMBus mode
   14754 			 */
   14755 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14756 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14757 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14758 
   14759 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14760 				break;
   14761 			rv = -1;
   14762 		}
   14763 		break;
   14764 	default:
   14765 		break;
   14766 	}
   14767 
   14768 	/* Release semaphore */
   14769 	sc->phy.release(sc);
   14770 
   14771 	if (rv == 0) {
   14772 		/* Check to see if able to reset PHY.  Print error if not */
   14773 		if (wm_phy_resetisblocked(sc)) {
   14774 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14775 			goto out;
   14776 		}
   14777 
   14778 		/* Reset the PHY before any access to it.  Doing so, ensures
   14779 		 * that the PHY is in a known good state before we read/write
   14780 		 * PHY registers.  The generic reset is sufficient here,
   14781 		 * because we haven't determined the PHY type yet.
   14782 		 */
   14783 		if (wm_reset_phy(sc) != 0)
   14784 			goto out;
   14785 
   14786 		/* On a successful reset, possibly need to wait for the PHY
   14787 		 * to quiesce to an accessible state before returning control
   14788 		 * to the calling function.  If the PHY does not quiesce, then
   14789 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14790 		 *  the PHY is in.
   14791 		 */
   14792 		if (wm_phy_resetisblocked(sc))
   14793 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14794 	}
   14795 
   14796 out:
   14797 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14798 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14799 		delay(10*1000);
   14800 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14801 	}
   14802 
   14803 	return 0;
   14804 }
   14805 
   14806 static void
   14807 wm_init_manageability(struct wm_softc *sc)
   14808 {
   14809 
   14810 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14811 		device_xname(sc->sc_dev), __func__));
   14812 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14813 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14814 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14815 
   14816 		/* Disable hardware interception of ARP */
   14817 		manc &= ~MANC_ARP_EN;
   14818 
   14819 		/* Enable receiving management packets to the host */
   14820 		if (sc->sc_type >= WM_T_82571) {
   14821 			manc |= MANC_EN_MNG2HOST;
   14822 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14823 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14824 		}
   14825 
   14826 		CSR_WRITE(sc, WMREG_MANC, manc);
   14827 	}
   14828 }
   14829 
   14830 static void
   14831 wm_release_manageability(struct wm_softc *sc)
   14832 {
   14833 
   14834 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14835 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14836 
   14837 		manc |= MANC_ARP_EN;
   14838 		if (sc->sc_type >= WM_T_82571)
   14839 			manc &= ~MANC_EN_MNG2HOST;
   14840 
   14841 		CSR_WRITE(sc, WMREG_MANC, manc);
   14842 	}
   14843 }
   14844 
   14845 static void
   14846 wm_get_wakeup(struct wm_softc *sc)
   14847 {
   14848 
   14849 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14850 	switch (sc->sc_type) {
   14851 	case WM_T_82573:
   14852 	case WM_T_82583:
   14853 		sc->sc_flags |= WM_F_HAS_AMT;
   14854 		/* FALLTHROUGH */
   14855 	case WM_T_80003:
   14856 	case WM_T_82575:
   14857 	case WM_T_82576:
   14858 	case WM_T_82580:
   14859 	case WM_T_I350:
   14860 	case WM_T_I354:
   14861 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14862 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14863 		/* FALLTHROUGH */
   14864 	case WM_T_82541:
   14865 	case WM_T_82541_2:
   14866 	case WM_T_82547:
   14867 	case WM_T_82547_2:
   14868 	case WM_T_82571:
   14869 	case WM_T_82572:
   14870 	case WM_T_82574:
   14871 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14872 		break;
   14873 	case WM_T_ICH8:
   14874 	case WM_T_ICH9:
   14875 	case WM_T_ICH10:
   14876 	case WM_T_PCH:
   14877 	case WM_T_PCH2:
   14878 	case WM_T_PCH_LPT:
   14879 	case WM_T_PCH_SPT:
   14880 	case WM_T_PCH_CNP:
   14881 		sc->sc_flags |= WM_F_HAS_AMT;
   14882 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14883 		break;
   14884 	default:
   14885 		break;
   14886 	}
   14887 
   14888 	/* 1: HAS_MANAGE */
   14889 	if (wm_enable_mng_pass_thru(sc) != 0)
   14890 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14891 
   14892 	/*
   14893 	 * Note that the WOL flags is set after the resetting of the eeprom
   14894 	 * stuff
   14895 	 */
   14896 }
   14897 
   14898 /*
   14899  * Unconfigure Ultra Low Power mode.
   14900  * Only for I217 and newer (see below).
   14901  */
   14902 static int
   14903 wm_ulp_disable(struct wm_softc *sc)
   14904 {
   14905 	uint32_t reg;
   14906 	uint16_t phyreg;
   14907 	int i = 0, rv = 0;
   14908 
   14909 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14910 		device_xname(sc->sc_dev), __func__));
   14911 	/* Exclude old devices */
   14912 	if ((sc->sc_type < WM_T_PCH_LPT)
   14913 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14914 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14915 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14916 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14917 		return 0;
   14918 
   14919 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14920 		/* Request ME un-configure ULP mode in the PHY */
   14921 		reg = CSR_READ(sc, WMREG_H2ME);
   14922 		reg &= ~H2ME_ULP;
   14923 		reg |= H2ME_ENFORCE_SETTINGS;
   14924 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14925 
   14926 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14927 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14928 			if (i++ == 30) {
   14929 				device_printf(sc->sc_dev, "%s timed out\n",
   14930 				    __func__);
   14931 				return -1;
   14932 			}
   14933 			delay(10 * 1000);
   14934 		}
   14935 		reg = CSR_READ(sc, WMREG_H2ME);
   14936 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14937 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14938 
   14939 		return 0;
   14940 	}
   14941 
   14942 	/* Acquire semaphore */
   14943 	rv = sc->phy.acquire(sc);
   14944 	if (rv != 0) {
   14945 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14946 		device_xname(sc->sc_dev), __func__));
   14947 		return -1;
   14948 	}
   14949 
   14950 	/* Toggle LANPHYPC */
   14951 	wm_toggle_lanphypc_pch_lpt(sc);
   14952 
   14953 	/* Unforce SMBus mode in PHY */
   14954 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14955 	if (rv != 0) {
   14956 		uint32_t reg2;
   14957 
   14958 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   14959 			__func__);
   14960 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14961 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14962 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14963 		delay(50 * 1000);
   14964 
   14965 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14966 		    &phyreg);
   14967 		if (rv != 0)
   14968 			goto release;
   14969 	}
   14970 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14971 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14972 
   14973 	/* Unforce SMBus mode in MAC */
   14974 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14975 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14976 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14977 
   14978 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14979 	if (rv != 0)
   14980 		goto release;
   14981 	phyreg |= HV_PM_CTRL_K1_ENA;
   14982 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14983 
   14984 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14985 		&phyreg);
   14986 	if (rv != 0)
   14987 		goto release;
   14988 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14989 	    | I218_ULP_CONFIG1_STICKY_ULP
   14990 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14991 	    | I218_ULP_CONFIG1_WOL_HOST
   14992 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14993 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14994 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14995 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14996 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14997 	phyreg |= I218_ULP_CONFIG1_START;
   14998 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14999 
   15000 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15001 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15002 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15003 
   15004 release:
   15005 	/* Release semaphore */
   15006 	sc->phy.release(sc);
   15007 	wm_gmii_reset(sc);
   15008 	delay(50 * 1000);
   15009 
   15010 	return rv;
   15011 }
   15012 
   15013 /* WOL in the newer chipset interfaces (pchlan) */
   15014 static int
   15015 wm_enable_phy_wakeup(struct wm_softc *sc)
   15016 {
   15017 	device_t dev = sc->sc_dev;
   15018 	uint32_t mreg, moff;
   15019 	uint16_t wuce, wuc, wufc, preg;
   15020 	int i, rv;
   15021 
   15022 	KASSERT(sc->sc_type >= WM_T_PCH);
   15023 
   15024 	/* Copy MAC RARs to PHY RARs */
   15025 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15026 
   15027 	/* Activate PHY wakeup */
   15028 	rv = sc->phy.acquire(sc);
   15029 	if (rv != 0) {
   15030 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15031 		    __func__);
   15032 		return rv;
   15033 	}
   15034 
   15035 	/*
   15036 	 * Enable access to PHY wakeup registers.
   15037 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15038 	 */
   15039 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15040 	if (rv != 0) {
   15041 		device_printf(dev,
   15042 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15043 		goto release;
   15044 	}
   15045 
   15046 	/* Copy MAC MTA to PHY MTA */
   15047 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15048 		uint16_t lo, hi;
   15049 
   15050 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15051 		lo = (uint16_t)(mreg & 0xffff);
   15052 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15053 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15054 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15055 	}
   15056 
   15057 	/* Configure PHY Rx Control register */
   15058 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15059 	mreg = CSR_READ(sc, WMREG_RCTL);
   15060 	if (mreg & RCTL_UPE)
   15061 		preg |= BM_RCTL_UPE;
   15062 	if (mreg & RCTL_MPE)
   15063 		preg |= BM_RCTL_MPE;
   15064 	preg &= ~(BM_RCTL_MO_MASK);
   15065 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15066 	if (moff != 0)
   15067 		preg |= moff << BM_RCTL_MO_SHIFT;
   15068 	if (mreg & RCTL_BAM)
   15069 		preg |= BM_RCTL_BAM;
   15070 	if (mreg & RCTL_PMCF)
   15071 		preg |= BM_RCTL_PMCF;
   15072 	mreg = CSR_READ(sc, WMREG_CTRL);
   15073 	if (mreg & CTRL_RFCE)
   15074 		preg |= BM_RCTL_RFCE;
   15075 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15076 
   15077 	wuc = WUC_APME | WUC_PME_EN;
   15078 	wufc = WUFC_MAG;
   15079 	/* Enable PHY wakeup in MAC register */
   15080 	CSR_WRITE(sc, WMREG_WUC,
   15081 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15082 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15083 
   15084 	/* Configure and enable PHY wakeup in PHY registers */
   15085 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15086 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15087 
   15088 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15089 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15090 
   15091 release:
   15092 	sc->phy.release(sc);
   15093 
   15094 	return 0;
   15095 }
   15096 
   15097 /* Power down workaround on D3 */
   15098 static void
   15099 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15100 {
   15101 	uint32_t reg;
   15102 	uint16_t phyreg;
   15103 	int i;
   15104 
   15105 	for (i = 0; i < 2; i++) {
   15106 		/* Disable link */
   15107 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15108 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15109 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15110 
   15111 		/*
   15112 		 * Call gig speed drop workaround on Gig disable before
   15113 		 * accessing any PHY registers
   15114 		 */
   15115 		if (sc->sc_type == WM_T_ICH8)
   15116 			wm_gig_downshift_workaround_ich8lan(sc);
   15117 
   15118 		/* Write VR power-down enable */
   15119 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15120 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15121 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15122 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15123 
   15124 		/* Read it back and test */
   15125 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15126 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15127 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15128 			break;
   15129 
   15130 		/* Issue PHY reset and repeat at most one more time */
   15131 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15132 	}
   15133 }
   15134 
   15135 /*
   15136  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15137  *  @sc: pointer to the HW structure
   15138  *
   15139  *  During S0 to Sx transition, it is possible the link remains at gig
   15140  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15141  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15142  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15143  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15144  *  needs to be written.
   15145  *  Parts that support (and are linked to a partner which support) EEE in
   15146  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15147  *  than 10Mbps w/o EEE.
   15148  */
   15149 static void
   15150 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15151 {
   15152 	device_t dev = sc->sc_dev;
   15153 	struct ethercom *ec = &sc->sc_ethercom;
   15154 	uint32_t phy_ctrl;
   15155 	int rv;
   15156 
   15157 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15158 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15159 
   15160 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   15161 
   15162 	if (sc->sc_phytype == WMPHY_I217) {
   15163 		uint16_t devid = sc->sc_pcidevid;
   15164 
   15165 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   15166 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   15167 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   15168 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   15169 		    (sc->sc_type >= WM_T_PCH_SPT))
   15170 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   15171 			    CSR_READ(sc, WMREG_FEXTNVM6)
   15172 			    & ~FEXTNVM6_REQ_PLL_CLK);
   15173 
   15174 		if (sc->phy.acquire(sc) != 0)
   15175 			goto out;
   15176 
   15177 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15178 			uint16_t eee_advert;
   15179 
   15180 			rv = wm_read_emi_reg_locked(dev,
   15181 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   15182 			if (rv)
   15183 				goto release;
   15184 
   15185 			/*
   15186 			 * Disable LPLU if both link partners support 100BaseT
   15187 			 * EEE and 100Full is advertised on both ends of the
   15188 			 * link, and enable Auto Enable LPI since there will
   15189 			 * be no driver to enable LPI while in Sx.
   15190 			 */
   15191 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   15192 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   15193 				uint16_t anar, phy_reg;
   15194 
   15195 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   15196 				    &anar);
   15197 				if (anar & ANAR_TX_FD) {
   15198 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   15199 					    PHY_CTRL_NOND0A_LPLU);
   15200 
   15201 					/* Set Auto Enable LPI after link up */
   15202 					sc->phy.readreg_locked(dev, 2,
   15203 					    I217_LPI_GPIO_CTRL, &phy_reg);
   15204 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15205 					sc->phy.writereg_locked(dev, 2,
   15206 					    I217_LPI_GPIO_CTRL, phy_reg);
   15207 				}
   15208 			}
   15209 		}
   15210 
   15211 		/*
   15212 		 * For i217 Intel Rapid Start Technology support,
   15213 		 * when the system is going into Sx and no manageability engine
   15214 		 * is present, the driver must configure proxy to reset only on
   15215 		 * power good.	LPI (Low Power Idle) state must also reset only
   15216 		 * on power good, as well as the MTA (Multicast table array).
   15217 		 * The SMBus release must also be disabled on LCD reset.
   15218 		 */
   15219 
   15220 		/*
   15221 		 * Enable MTA to reset for Intel Rapid Start Technology
   15222 		 * Support
   15223 		 */
   15224 
   15225 release:
   15226 		sc->phy.release(sc);
   15227 	}
   15228 out:
   15229 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   15230 
   15231 	if (sc->sc_type == WM_T_ICH8)
   15232 		wm_gig_downshift_workaround_ich8lan(sc);
   15233 
   15234 	if (sc->sc_type >= WM_T_PCH) {
   15235 		wm_oem_bits_config_ich8lan(sc, false);
   15236 
   15237 		/* Reset PHY to activate OEM bits on 82577/8 */
   15238 		if (sc->sc_type == WM_T_PCH)
   15239 			wm_reset_phy(sc);
   15240 
   15241 		if (sc->phy.acquire(sc) != 0)
   15242 			return;
   15243 		wm_write_smbus_addr(sc);
   15244 		sc->phy.release(sc);
   15245 	}
   15246 }
   15247 
   15248 /*
   15249  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   15250  *  @sc: pointer to the HW structure
   15251  *
   15252  *  During Sx to S0 transitions on non-managed devices or managed devices
   15253  *  on which PHY resets are not blocked, if the PHY registers cannot be
   15254  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   15255  *  the PHY.
   15256  *  On i217, setup Intel Rapid Start Technology.
   15257  */
   15258 static int
   15259 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   15260 {
   15261 	device_t dev = sc->sc_dev;
   15262 	int rv;
   15263 
   15264 	if (sc->sc_type < WM_T_PCH2)
   15265 		return 0;
   15266 
   15267 	rv = wm_init_phy_workarounds_pchlan(sc);
   15268 	if (rv != 0)
   15269 		return -1;
   15270 
   15271 	/* For i217 Intel Rapid Start Technology support when the system
   15272 	 * is transitioning from Sx and no manageability engine is present
   15273 	 * configure SMBus to restore on reset, disable proxy, and enable
   15274 	 * the reset on MTA (Multicast table array).
   15275 	 */
   15276 	if (sc->sc_phytype == WMPHY_I217) {
   15277 		uint16_t phy_reg;
   15278 
   15279 		if (sc->phy.acquire(sc) != 0)
   15280 			return -1;
   15281 
   15282 		/* Clear Auto Enable LPI after link up */
   15283 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15284 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15285 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15286 
   15287 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15288 			/* Restore clear on SMB if no manageability engine
   15289 			 * is present
   15290 			 */
   15291 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15292 			    &phy_reg);
   15293 			if (rv != 0)
   15294 				goto release;
   15295 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15296 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15297 
   15298 			/* Disable Proxy */
   15299 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15300 		}
   15301 		/* Enable reset on MTA */
   15302 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15303 		if (rv != 0)
   15304 			goto release;
   15305 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15306 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15307 
   15308 release:
   15309 		sc->phy.release(sc);
   15310 		return rv;
   15311 	}
   15312 
   15313 	return 0;
   15314 }
   15315 
   15316 static void
   15317 wm_enable_wakeup(struct wm_softc *sc)
   15318 {
   15319 	uint32_t reg, pmreg;
   15320 	pcireg_t pmode;
   15321 	int rv = 0;
   15322 
   15323 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15324 		device_xname(sc->sc_dev), __func__));
   15325 
   15326 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15327 	    &pmreg, NULL) == 0)
   15328 		return;
   15329 
   15330 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15331 		goto pme;
   15332 
   15333 	/* Advertise the wakeup capability */
   15334 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15335 	    | CTRL_SWDPIN(3));
   15336 
   15337 	/* Keep the laser running on fiber adapters */
   15338 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15339 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15340 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15341 		reg |= CTRL_EXT_SWDPIN(3);
   15342 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15343 	}
   15344 
   15345 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15346 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15347 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15348 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15349 		wm_suspend_workarounds_ich8lan(sc);
   15350 
   15351 #if 0	/* For the multicast packet */
   15352 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15353 	reg |= WUFC_MC;
   15354 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15355 #endif
   15356 
   15357 	if (sc->sc_type >= WM_T_PCH) {
   15358 		rv = wm_enable_phy_wakeup(sc);
   15359 		if (rv != 0)
   15360 			goto pme;
   15361 	} else {
   15362 		/* Enable wakeup by the MAC */
   15363 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15364 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15365 	}
   15366 
   15367 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15368 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15369 		|| (sc->sc_type == WM_T_PCH2))
   15370 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15371 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15372 
   15373 pme:
   15374 	/* Request PME */
   15375 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15376 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   15377 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15378 		/* For WOL */
   15379 		pmode |= PCI_PMCSR_PME_EN;
   15380 	} else {
   15381 		/* Disable WOL */
   15382 		pmode &= ~PCI_PMCSR_PME_EN;
   15383 	}
   15384 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15385 }
   15386 
   15387 /* Disable ASPM L0s and/or L1 for workaround */
   15388 static void
   15389 wm_disable_aspm(struct wm_softc *sc)
   15390 {
   15391 	pcireg_t reg, mask = 0;
   15392 	unsigned const char *str = "";
   15393 
   15394 	/*
   15395 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15396 	 * space.
   15397 	 */
   15398 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15399 		return;
   15400 
   15401 	switch (sc->sc_type) {
   15402 	case WM_T_82571:
   15403 	case WM_T_82572:
   15404 		/*
   15405 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15406 		 * State Power management L1 State (ASPM L1).
   15407 		 */
   15408 		mask = PCIE_LCSR_ASPM_L1;
   15409 		str = "L1 is";
   15410 		break;
   15411 	case WM_T_82573:
   15412 	case WM_T_82574:
   15413 	case WM_T_82583:
   15414 		/*
   15415 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15416 		 *
   15417 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15418 		 * some chipset.  The document of 82574 and 82583 says that
   15419 		 * disabling L0s with some specific chipset is sufficient,
   15420 		 * but we follow as of the Intel em driver does.
   15421 		 *
   15422 		 * References:
   15423 		 * Errata 8 of the Specification Update of i82573.
   15424 		 * Errata 20 of the Specification Update of i82574.
   15425 		 * Errata 9 of the Specification Update of i82583.
   15426 		 */
   15427 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15428 		str = "L0s and L1 are";
   15429 		break;
   15430 	default:
   15431 		return;
   15432 	}
   15433 
   15434 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15435 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15436 	reg &= ~mask;
   15437 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15438 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15439 
   15440 	/* Print only in wm_attach() */
   15441 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15442 		aprint_verbose_dev(sc->sc_dev,
   15443 		    "ASPM %s disabled to workaround the errata.\n", str);
   15444 }
   15445 
   15446 /* LPLU */
   15447 
   15448 static void
   15449 wm_lplu_d0_disable(struct wm_softc *sc)
   15450 {
   15451 	struct mii_data *mii = &sc->sc_mii;
   15452 	uint32_t reg;
   15453 	uint16_t phyval;
   15454 
   15455 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15456 		device_xname(sc->sc_dev), __func__));
   15457 
   15458 	if (sc->sc_phytype == WMPHY_IFE)
   15459 		return;
   15460 
   15461 	switch (sc->sc_type) {
   15462 	case WM_T_82571:
   15463 	case WM_T_82572:
   15464 	case WM_T_82573:
   15465 	case WM_T_82575:
   15466 	case WM_T_82576:
   15467 		mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
   15468 		phyval &= ~PMR_D0_LPLU;
   15469 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
   15470 		break;
   15471 	case WM_T_82580:
   15472 	case WM_T_I350:
   15473 	case WM_T_I210:
   15474 	case WM_T_I211:
   15475 		reg = CSR_READ(sc, WMREG_PHPM);
   15476 		reg &= ~PHPM_D0A_LPLU;
   15477 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15478 		break;
   15479 	case WM_T_82574:
   15480 	case WM_T_82583:
   15481 	case WM_T_ICH8:
   15482 	case WM_T_ICH9:
   15483 	case WM_T_ICH10:
   15484 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15485 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15486 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15487 		CSR_WRITE_FLUSH(sc);
   15488 		break;
   15489 	case WM_T_PCH:
   15490 	case WM_T_PCH2:
   15491 	case WM_T_PCH_LPT:
   15492 	case WM_T_PCH_SPT:
   15493 	case WM_T_PCH_CNP:
   15494 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15495 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15496 		if (wm_phy_resetisblocked(sc) == false)
   15497 			phyval |= HV_OEM_BITS_ANEGNOW;
   15498 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15499 		break;
   15500 	default:
   15501 		break;
   15502 	}
   15503 }
   15504 
   15505 /* EEE */
   15506 
   15507 static int
   15508 wm_set_eee_i350(struct wm_softc *sc)
   15509 {
   15510 	struct ethercom *ec = &sc->sc_ethercom;
   15511 	uint32_t ipcnfg, eeer;
   15512 	uint32_t ipcnfg_mask
   15513 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15514 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15515 
   15516 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15517 
   15518 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15519 	eeer = CSR_READ(sc, WMREG_EEER);
   15520 
   15521 	/* Enable or disable per user setting */
   15522 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15523 		ipcnfg |= ipcnfg_mask;
   15524 		eeer |= eeer_mask;
   15525 	} else {
   15526 		ipcnfg &= ~ipcnfg_mask;
   15527 		eeer &= ~eeer_mask;
   15528 	}
   15529 
   15530 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15531 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15532 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15533 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15534 
   15535 	return 0;
   15536 }
   15537 
   15538 static int
   15539 wm_set_eee_pchlan(struct wm_softc *sc)
   15540 {
   15541 	device_t dev = sc->sc_dev;
   15542 	struct ethercom *ec = &sc->sc_ethercom;
   15543 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15544 	int rv = 0;
   15545 
   15546 	switch (sc->sc_phytype) {
   15547 	case WMPHY_82579:
   15548 		lpa = I82579_EEE_LP_ABILITY;
   15549 		pcs_status = I82579_EEE_PCS_STATUS;
   15550 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15551 		break;
   15552 	case WMPHY_I217:
   15553 		lpa = I217_EEE_LP_ABILITY;
   15554 		pcs_status = I217_EEE_PCS_STATUS;
   15555 		adv_addr = I217_EEE_ADVERTISEMENT;
   15556 		break;
   15557 	default:
   15558 		return 0;
   15559 	}
   15560 
   15561 	if (sc->phy.acquire(sc)) {
   15562 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15563 		return 0;
   15564 	}
   15565 
   15566 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15567 	if (rv != 0)
   15568 		goto release;
   15569 
   15570 	/* Clear bits that enable EEE in various speeds */
   15571 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15572 
   15573 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15574 		/* Save off link partner's EEE ability */
   15575 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15576 		if (rv != 0)
   15577 			goto release;
   15578 
   15579 		/* Read EEE advertisement */
   15580 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15581 			goto release;
   15582 
   15583 		/*
   15584 		 * Enable EEE only for speeds in which the link partner is
   15585 		 * EEE capable and for which we advertise EEE.
   15586 		 */
   15587 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15588 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15589 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15590 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15591 			if ((data & ANLPAR_TX_FD) != 0)
   15592 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15593 			else {
   15594 				/*
   15595 				 * EEE is not supported in 100Half, so ignore
   15596 				 * partner's EEE in 100 ability if full-duplex
   15597 				 * is not advertised.
   15598 				 */
   15599 				sc->eee_lp_ability
   15600 				    &= ~AN_EEEADVERT_100_TX;
   15601 			}
   15602 		}
   15603 	}
   15604 
   15605 	if (sc->sc_phytype == WMPHY_82579) {
   15606 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15607 		if (rv != 0)
   15608 			goto release;
   15609 
   15610 		data &= ~I82579_LPI_PLL_SHUT_100;
   15611 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15612 	}
   15613 
   15614 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15615 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15616 		goto release;
   15617 
   15618 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15619 release:
   15620 	sc->phy.release(sc);
   15621 
   15622 	return rv;
   15623 }
   15624 
   15625 static int
   15626 wm_set_eee(struct wm_softc *sc)
   15627 {
   15628 	struct ethercom *ec = &sc->sc_ethercom;
   15629 
   15630 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15631 		return 0;
   15632 
   15633 	if (sc->sc_type == WM_T_I354) {
   15634 		/* I354 uses an external PHY */
   15635 		return 0; /* not yet */
   15636 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15637 		return wm_set_eee_i350(sc);
   15638 	else if (sc->sc_type >= WM_T_PCH2)
   15639 		return wm_set_eee_pchlan(sc);
   15640 
   15641 	return 0;
   15642 }
   15643 
   15644 /*
   15645  * Workarounds (mainly PHY related).
   15646  * Basically, PHY's workarounds are in the PHY drivers.
   15647  */
   15648 
   15649 /* Work-around for 82566 Kumeran PCS lock loss */
   15650 static int
   15651 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15652 {
   15653 	struct mii_data *mii = &sc->sc_mii;
   15654 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15655 	int i, reg, rv;
   15656 	uint16_t phyreg;
   15657 
   15658 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15659 		device_xname(sc->sc_dev), __func__));
   15660 
   15661 	/* If the link is not up, do nothing */
   15662 	if ((status & STATUS_LU) == 0)
   15663 		return 0;
   15664 
   15665 	/* Nothing to do if the link is other than 1Gbps */
   15666 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15667 		return 0;
   15668 
   15669 	for (i = 0; i < 10; i++) {
   15670 		/* read twice */
   15671 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15672 		if (rv != 0)
   15673 			return rv;
   15674 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15675 		if (rv != 0)
   15676 			return rv;
   15677 
   15678 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15679 			goto out;	/* GOOD! */
   15680 
   15681 		/* Reset the PHY */
   15682 		wm_reset_phy(sc);
   15683 		delay(5*1000);
   15684 	}
   15685 
   15686 	/* Disable GigE link negotiation */
   15687 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15688 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15689 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15690 
   15691 	/*
   15692 	 * Call gig speed drop workaround on Gig disable before accessing
   15693 	 * any PHY registers.
   15694 	 */
   15695 	wm_gig_downshift_workaround_ich8lan(sc);
   15696 
   15697 out:
   15698 	return 0;
   15699 }
   15700 
   15701 /*
   15702  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15703  *  @sc: pointer to the HW structure
   15704  *
   15705  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15706  *  LPLU, Gig disable, MDIC PHY reset):
   15707  *    1) Set Kumeran Near-end loopback
   15708  *    2) Clear Kumeran Near-end loopback
   15709  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15710  */
   15711 static void
   15712 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15713 {
   15714 	uint16_t kmreg;
   15715 
   15716 	/* Only for igp3 */
   15717 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15718 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15719 			return;
   15720 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15721 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15722 			return;
   15723 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15724 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15725 	}
   15726 }
   15727 
   15728 /*
   15729  * Workaround for pch's PHYs
   15730  * XXX should be moved to new PHY driver?
   15731  */
   15732 static int
   15733 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15734 {
   15735 	device_t dev = sc->sc_dev;
   15736 	struct mii_data *mii = &sc->sc_mii;
   15737 	struct mii_softc *child;
   15738 	uint16_t phy_data, phyrev = 0;
   15739 	int phytype = sc->sc_phytype;
   15740 	int rv;
   15741 
   15742 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15743 		device_xname(dev), __func__));
   15744 	KASSERT(sc->sc_type == WM_T_PCH);
   15745 
   15746 	/* Set MDIO slow mode before any other MDIO access */
   15747 	if (phytype == WMPHY_82577)
   15748 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15749 			return rv;
   15750 
   15751 	child = LIST_FIRST(&mii->mii_phys);
   15752 	if (child != NULL)
   15753 		phyrev = child->mii_mpd_rev;
   15754 
   15755 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15756 	if ((child != NULL) &&
   15757 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   15758 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   15759 		/* Disable generation of early preamble (0x4431) */
   15760 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15761 		    &phy_data);
   15762 		if (rv != 0)
   15763 			return rv;
   15764 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   15765 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   15766 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15767 		    phy_data);
   15768 		if (rv != 0)
   15769 			return rv;
   15770 
   15771 		/* Preamble tuning for SSC */
   15772 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   15773 		if (rv != 0)
   15774 			return rv;
   15775 	}
   15776 
   15777 	/* 82578 */
   15778 	if (phytype == WMPHY_82578) {
   15779 		/*
   15780 		 * Return registers to default by doing a soft reset then
   15781 		 * writing 0x3140 to the control register
   15782 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15783 		 */
   15784 		if ((child != NULL) && (phyrev < 2)) {
   15785 			PHY_RESET(child);
   15786 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   15787 			if (rv != 0)
   15788 				return rv;
   15789 		}
   15790 	}
   15791 
   15792 	/* Select page 0 */
   15793 	if ((rv = sc->phy.acquire(sc)) != 0)
   15794 		return rv;
   15795 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   15796 	sc->phy.release(sc);
   15797 	if (rv != 0)
   15798 		return rv;
   15799 
   15800 	/*
   15801 	 * Configure the K1 Si workaround during phy reset assuming there is
   15802 	 * link so that it disables K1 if link is in 1Gbps.
   15803 	 */
   15804 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15805 		return rv;
   15806 
   15807 	/* Workaround for link disconnects on a busy hub in half duplex */
   15808 	rv = sc->phy.acquire(sc);
   15809 	if (rv)
   15810 		return rv;
   15811 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   15812 	if (rv)
   15813 		goto release;
   15814 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   15815 	    phy_data & 0x00ff);
   15816 	if (rv)
   15817 		goto release;
   15818 
   15819 	/* Set MSE higher to enable link to stay up when noise is high */
   15820 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   15821 release:
   15822 	sc->phy.release(sc);
   15823 
   15824 	return rv;
   15825 }
   15826 
   15827 /*
   15828  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15829  *  @sc:   pointer to the HW structure
   15830  */
   15831 static void
   15832 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15833 {
   15834 	device_t dev = sc->sc_dev;
   15835 	uint32_t mac_reg;
   15836 	uint16_t i, wuce;
   15837 	int count;
   15838 
   15839 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15840 		device_xname(sc->sc_dev), __func__));
   15841 
   15842 	if (sc->phy.acquire(sc) != 0)
   15843 		return;
   15844 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15845 		goto release;
   15846 
   15847 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15848 	count = wm_rar_count(sc);
   15849 	for (i = 0; i < count; i++) {
   15850 		uint16_t lo, hi;
   15851 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15852 		lo = (uint16_t)(mac_reg & 0xffff);
   15853 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15854 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15855 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15856 
   15857 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15858 		lo = (uint16_t)(mac_reg & 0xffff);
   15859 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15860 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15861 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15862 	}
   15863 
   15864 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15865 
   15866 release:
   15867 	sc->phy.release(sc);
   15868 }
   15869 
   15870 /*
   15871  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15872  *  done after every PHY reset.
   15873  */
   15874 static int
   15875 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15876 {
   15877 	device_t dev = sc->sc_dev;
   15878 	int rv;
   15879 
   15880 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15881 		device_xname(dev), __func__));
   15882 	KASSERT(sc->sc_type == WM_T_PCH2);
   15883 
   15884 	/* Set MDIO slow mode before any other MDIO access */
   15885 	rv = wm_set_mdio_slow_mode_hv(sc);
   15886 	if (rv != 0)
   15887 		return rv;
   15888 
   15889 	rv = sc->phy.acquire(sc);
   15890 	if (rv != 0)
   15891 		return rv;
   15892 	/* Set MSE higher to enable link to stay up when noise is high */
   15893 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   15894 	if (rv != 0)
   15895 		goto release;
   15896 	/* Drop link after 5 times MSE threshold was reached */
   15897 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   15898 release:
   15899 	sc->phy.release(sc);
   15900 
   15901 	return rv;
   15902 }
   15903 
   15904 /**
   15905  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15906  *  @link: link up bool flag
   15907  *
   15908  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15909  *  preventing further DMA write requests.  Workaround the issue by disabling
   15910  *  the de-assertion of the clock request when in 1Gpbs mode.
   15911  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15912  *  speeds in order to avoid Tx hangs.
   15913  **/
   15914 static int
   15915 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15916 {
   15917 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15918 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15919 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15920 	uint16_t phyreg;
   15921 
   15922 	if (link && (speed == STATUS_SPEED_1000)) {
   15923 		sc->phy.acquire(sc);
   15924 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15925 		    &phyreg);
   15926 		if (rv != 0)
   15927 			goto release;
   15928 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15929 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15930 		if (rv != 0)
   15931 			goto release;
   15932 		delay(20);
   15933 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15934 
   15935 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15936 		    &phyreg);
   15937 release:
   15938 		sc->phy.release(sc);
   15939 		return rv;
   15940 	}
   15941 
   15942 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15943 
   15944 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15945 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15946 	    || !link
   15947 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15948 		goto update_fextnvm6;
   15949 
   15950 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   15951 
   15952 	/* Clear link status transmit timeout */
   15953 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   15954 	if (speed == STATUS_SPEED_100) {
   15955 		/* Set inband Tx timeout to 5x10us for 100Half */
   15956 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15957 
   15958 		/* Do not extend the K1 entry latency for 100Half */
   15959 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15960 	} else {
   15961 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   15962 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15963 
   15964 		/* Extend the K1 entry latency for 10 Mbps */
   15965 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15966 	}
   15967 
   15968 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   15969 
   15970 update_fextnvm6:
   15971 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   15972 	return 0;
   15973 }
   15974 
   15975 /*
   15976  *  wm_k1_gig_workaround_hv - K1 Si workaround
   15977  *  @sc:   pointer to the HW structure
   15978  *  @link: link up bool flag
   15979  *
   15980  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   15981  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   15982  *  If link is down, the function will restore the default K1 setting located
   15983  *  in the NVM.
   15984  */
   15985 static int
   15986 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   15987 {
   15988 	int k1_enable = sc->sc_nvm_k1_enabled;
   15989 
   15990 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15991 		device_xname(sc->sc_dev), __func__));
   15992 
   15993 	if (sc->phy.acquire(sc) != 0)
   15994 		return -1;
   15995 
   15996 	if (link) {
   15997 		k1_enable = 0;
   15998 
   15999 		/* Link stall fix for link up */
   16000 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16001 		    0x0100);
   16002 	} else {
   16003 		/* Link stall fix for link down */
   16004 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16005 		    0x4100);
   16006 	}
   16007 
   16008 	wm_configure_k1_ich8lan(sc, k1_enable);
   16009 	sc->phy.release(sc);
   16010 
   16011 	return 0;
   16012 }
   16013 
   16014 /*
   16015  *  wm_k1_workaround_lv - K1 Si workaround
   16016  *  @sc:   pointer to the HW structure
   16017  *
   16018  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   16019  *  Disable K1 for 1000 and 100 speeds
   16020  */
   16021 static int
   16022 wm_k1_workaround_lv(struct wm_softc *sc)
   16023 {
   16024 	uint32_t reg;
   16025 	uint16_t phyreg;
   16026 	int rv;
   16027 
   16028 	if (sc->sc_type != WM_T_PCH2)
   16029 		return 0;
   16030 
   16031 	/* Set K1 beacon duration based on 10Mbps speed */
   16032 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   16033 	if (rv != 0)
   16034 		return rv;
   16035 
   16036 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   16037 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   16038 		if (phyreg &
   16039 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   16040 			/* LV 1G/100 Packet drop issue wa  */
   16041 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   16042 			    &phyreg);
   16043 			if (rv != 0)
   16044 				return rv;
   16045 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   16046 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   16047 			    phyreg);
   16048 			if (rv != 0)
   16049 				return rv;
   16050 		} else {
   16051 			/* For 10Mbps */
   16052 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   16053 			reg &= ~FEXTNVM4_BEACON_DURATION;
   16054 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   16055 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   16056 		}
   16057 	}
   16058 
   16059 	return 0;
   16060 }
   16061 
   16062 /*
   16063  *  wm_link_stall_workaround_hv - Si workaround
   16064  *  @sc: pointer to the HW structure
   16065  *
   16066  *  This function works around a Si bug where the link partner can get
   16067  *  a link up indication before the PHY does. If small packets are sent
   16068  *  by the link partner they can be placed in the packet buffer without
   16069  *  being properly accounted for by the PHY and will stall preventing
   16070  *  further packets from being received.  The workaround is to clear the
   16071  *  packet buffer after the PHY detects link up.
   16072  */
   16073 static int
   16074 wm_link_stall_workaround_hv(struct wm_softc *sc)
   16075 {
   16076 	uint16_t phyreg;
   16077 
   16078 	if (sc->sc_phytype != WMPHY_82578)
   16079 		return 0;
   16080 
   16081 	/* Do not apply workaround if in PHY loopback bit 14 set */
   16082 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   16083 	if ((phyreg & BMCR_LOOP) != 0)
   16084 		return 0;
   16085 
   16086 	/* Check if link is up and at 1Gbps */
   16087 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   16088 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16089 	    | BM_CS_STATUS_SPEED_MASK;
   16090 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16091 		| BM_CS_STATUS_SPEED_1000))
   16092 		return 0;
   16093 
   16094 	delay(200 * 1000);	/* XXX too big */
   16095 
   16096 	/* Flush the packets in the fifo buffer */
   16097 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16098 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   16099 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16100 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   16101 
   16102 	return 0;
   16103 }
   16104 
   16105 static int
   16106 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   16107 {
   16108 	int rv;
   16109 	uint16_t reg;
   16110 
   16111 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   16112 	if (rv != 0)
   16113 		return rv;
   16114 
   16115 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   16116 	    reg | HV_KMRN_MDIO_SLOW);
   16117 }
   16118 
   16119 /*
   16120  *  wm_configure_k1_ich8lan - Configure K1 power state
   16121  *  @sc: pointer to the HW structure
   16122  *  @enable: K1 state to configure
   16123  *
   16124  *  Configure the K1 power state based on the provided parameter.
   16125  *  Assumes semaphore already acquired.
   16126  */
   16127 static void
   16128 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   16129 {
   16130 	uint32_t ctrl, ctrl_ext, tmp;
   16131 	uint16_t kmreg;
   16132 	int rv;
   16133 
   16134 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16135 
   16136 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   16137 	if (rv != 0)
   16138 		return;
   16139 
   16140 	if (k1_enable)
   16141 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   16142 	else
   16143 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   16144 
   16145 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   16146 	if (rv != 0)
   16147 		return;
   16148 
   16149 	delay(20);
   16150 
   16151 	ctrl = CSR_READ(sc, WMREG_CTRL);
   16152 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   16153 
   16154 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   16155 	tmp |= CTRL_FRCSPD;
   16156 
   16157 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   16158 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   16159 	CSR_WRITE_FLUSH(sc);
   16160 	delay(20);
   16161 
   16162 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   16163 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   16164 	CSR_WRITE_FLUSH(sc);
   16165 	delay(20);
   16166 
   16167 	return;
   16168 }
   16169 
   16170 /* special case - for 82575 - need to do manual init ... */
   16171 static void
   16172 wm_reset_init_script_82575(struct wm_softc *sc)
   16173 {
   16174 	/*
   16175 	 * Remark: this is untested code - we have no board without EEPROM
   16176 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   16177 	 */
   16178 
   16179 	/* SerDes configuration via SERDESCTRL */
   16180 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   16181 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   16182 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   16183 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   16184 
   16185 	/* CCM configuration via CCMCTL register */
   16186 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   16187 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   16188 
   16189 	/* PCIe lanes configuration */
   16190 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   16191 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   16192 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   16193 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   16194 
   16195 	/* PCIe PLL Configuration */
   16196 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   16197 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   16198 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   16199 }
   16200 
   16201 static void
   16202 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   16203 {
   16204 	uint32_t reg;
   16205 	uint16_t nvmword;
   16206 	int rv;
   16207 
   16208 	if (sc->sc_type != WM_T_82580)
   16209 		return;
   16210 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   16211 		return;
   16212 
   16213 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   16214 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   16215 	if (rv != 0) {
   16216 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   16217 		    __func__);
   16218 		return;
   16219 	}
   16220 
   16221 	reg = CSR_READ(sc, WMREG_MDICNFG);
   16222 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   16223 		reg |= MDICNFG_DEST;
   16224 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   16225 		reg |= MDICNFG_COM_MDIO;
   16226 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16227 }
   16228 
   16229 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   16230 
   16231 static bool
   16232 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   16233 {
   16234 	uint32_t reg;
   16235 	uint16_t id1, id2;
   16236 	int i, rv;
   16237 
   16238 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16239 		device_xname(sc->sc_dev), __func__));
   16240 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16241 
   16242 	id1 = id2 = 0xffff;
   16243 	for (i = 0; i < 2; i++) {
   16244 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   16245 		    &id1);
   16246 		if ((rv != 0) || MII_INVALIDID(id1))
   16247 			continue;
   16248 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   16249 		    &id2);
   16250 		if ((rv != 0) || MII_INVALIDID(id2))
   16251 			continue;
   16252 		break;
   16253 	}
   16254 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   16255 		goto out;
   16256 
   16257 	/*
   16258 	 * In case the PHY needs to be in mdio slow mode,
   16259 	 * set slow mode and try to get the PHY id again.
   16260 	 */
   16261 	rv = 0;
   16262 	if (sc->sc_type < WM_T_PCH_LPT) {
   16263 		sc->phy.release(sc);
   16264 		wm_set_mdio_slow_mode_hv(sc);
   16265 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   16266 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   16267 		sc->phy.acquire(sc);
   16268 	}
   16269 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   16270 		device_printf(sc->sc_dev, "XXX return with false\n");
   16271 		return false;
   16272 	}
   16273 out:
   16274 	if (sc->sc_type >= WM_T_PCH_LPT) {
   16275 		/* Only unforce SMBus if ME is not active */
   16276 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16277 			uint16_t phyreg;
   16278 
   16279 			/* Unforce SMBus mode in PHY */
   16280 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   16281 			    CV_SMB_CTRL, &phyreg);
   16282 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16283 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16284 			    CV_SMB_CTRL, phyreg);
   16285 
   16286 			/* Unforce SMBus mode in MAC */
   16287 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16288 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16289 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16290 		}
   16291 	}
   16292 	return true;
   16293 }
   16294 
   16295 static void
   16296 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16297 {
   16298 	uint32_t reg;
   16299 	int i;
   16300 
   16301 	/* Set PHY Config Counter to 50msec */
   16302 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16303 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16304 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16305 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16306 
   16307 	/* Toggle LANPHYPC */
   16308 	reg = CSR_READ(sc, WMREG_CTRL);
   16309 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16310 	reg &= ~CTRL_LANPHYPC_VALUE;
   16311 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16312 	CSR_WRITE_FLUSH(sc);
   16313 	delay(1000);
   16314 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16315 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16316 	CSR_WRITE_FLUSH(sc);
   16317 
   16318 	if (sc->sc_type < WM_T_PCH_LPT)
   16319 		delay(50 * 1000);
   16320 	else {
   16321 		i = 20;
   16322 
   16323 		do {
   16324 			delay(5 * 1000);
   16325 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16326 		    && i--);
   16327 
   16328 		delay(30 * 1000);
   16329 	}
   16330 }
   16331 
   16332 static int
   16333 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16334 {
   16335 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16336 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16337 	uint32_t rxa;
   16338 	uint16_t scale = 0, lat_enc = 0;
   16339 	int32_t obff_hwm = 0;
   16340 	int64_t lat_ns, value;
   16341 
   16342 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16343 		device_xname(sc->sc_dev), __func__));
   16344 
   16345 	if (link) {
   16346 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16347 		uint32_t status;
   16348 		uint16_t speed;
   16349 		pcireg_t preg;
   16350 
   16351 		status = CSR_READ(sc, WMREG_STATUS);
   16352 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16353 		case STATUS_SPEED_10:
   16354 			speed = 10;
   16355 			break;
   16356 		case STATUS_SPEED_100:
   16357 			speed = 100;
   16358 			break;
   16359 		case STATUS_SPEED_1000:
   16360 			speed = 1000;
   16361 			break;
   16362 		default:
   16363 			device_printf(sc->sc_dev, "Unknown speed "
   16364 			    "(status = %08x)\n", status);
   16365 			return -1;
   16366 		}
   16367 
   16368 		/* Rx Packet Buffer Allocation size (KB) */
   16369 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16370 
   16371 		/*
   16372 		 * Determine the maximum latency tolerated by the device.
   16373 		 *
   16374 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16375 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16376 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16377 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16378 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16379 		 */
   16380 		lat_ns = ((int64_t)rxa * 1024 -
   16381 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16382 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16383 		if (lat_ns < 0)
   16384 			lat_ns = 0;
   16385 		else
   16386 			lat_ns /= speed;
   16387 		value = lat_ns;
   16388 
   16389 		while (value > LTRV_VALUE) {
   16390 			scale ++;
   16391 			value = howmany(value, __BIT(5));
   16392 		}
   16393 		if (scale > LTRV_SCALE_MAX) {
   16394 			device_printf(sc->sc_dev,
   16395 			    "Invalid LTR latency scale %d\n", scale);
   16396 			return -1;
   16397 		}
   16398 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16399 
   16400 		/* Determine the maximum latency tolerated by the platform */
   16401 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16402 		    WM_PCI_LTR_CAP_LPT);
   16403 		max_snoop = preg & 0xffff;
   16404 		max_nosnoop = preg >> 16;
   16405 
   16406 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16407 
   16408 		if (lat_enc > max_ltr_enc) {
   16409 			lat_enc = max_ltr_enc;
   16410 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16411 			    * PCI_LTR_SCALETONS(
   16412 				    __SHIFTOUT(lat_enc,
   16413 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16414 		}
   16415 
   16416 		if (lat_ns) {
   16417 			lat_ns *= speed * 1000;
   16418 			lat_ns /= 8;
   16419 			lat_ns /= 1000000000;
   16420 			obff_hwm = (int32_t)(rxa - lat_ns);
   16421 		}
   16422 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16423 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16424 			    "(rxa = %d, lat_ns = %d)\n",
   16425 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16426 			return -1;
   16427 		}
   16428 	}
   16429 	/* Snoop and No-Snoop latencies the same */
   16430 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16431 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16432 
   16433 	/* Set OBFF high water mark */
   16434 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16435 	reg |= obff_hwm;
   16436 	CSR_WRITE(sc, WMREG_SVT, reg);
   16437 
   16438 	/* Enable OBFF */
   16439 	reg = CSR_READ(sc, WMREG_SVCR);
   16440 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16441 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16442 
   16443 	return 0;
   16444 }
   16445 
   16446 /*
   16447  * I210 Errata 25 and I211 Errata 10
   16448  * Slow System Clock.
   16449  */
   16450 static int
   16451 wm_pll_workaround_i210(struct wm_softc *sc)
   16452 {
   16453 	uint32_t mdicnfg, wuc;
   16454 	uint32_t reg;
   16455 	pcireg_t pcireg;
   16456 	uint32_t pmreg;
   16457 	uint16_t nvmword, tmp_nvmword;
   16458 	uint16_t phyval;
   16459 	bool wa_done = false;
   16460 	int i, rv = 0;
   16461 
   16462 	/* Get Power Management cap offset */
   16463 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16464 	    &pmreg, NULL) == 0)
   16465 		return -1;
   16466 
   16467 	/* Save WUC and MDICNFG registers */
   16468 	wuc = CSR_READ(sc, WMREG_WUC);
   16469 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16470 
   16471 	reg = mdicnfg & ~MDICNFG_DEST;
   16472 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16473 
   16474 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   16475 		nvmword = INVM_DEFAULT_AL;
   16476 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16477 
   16478 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16479 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16480 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16481 
   16482 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16483 			rv = 0;
   16484 			break; /* OK */
   16485 		} else
   16486 			rv = -1;
   16487 
   16488 		wa_done = true;
   16489 		/* Directly reset the internal PHY */
   16490 		reg = CSR_READ(sc, WMREG_CTRL);
   16491 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16492 
   16493 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16494 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16495 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16496 
   16497 		CSR_WRITE(sc, WMREG_WUC, 0);
   16498 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16499 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16500 
   16501 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16502 		    pmreg + PCI_PMCSR);
   16503 		pcireg |= PCI_PMCSR_STATE_D3;
   16504 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16505 		    pmreg + PCI_PMCSR, pcireg);
   16506 		delay(1000);
   16507 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16508 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16509 		    pmreg + PCI_PMCSR, pcireg);
   16510 
   16511 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16512 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16513 
   16514 		/* Restore WUC register */
   16515 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16516 	}
   16517 
   16518 	/* Restore MDICNFG setting */
   16519 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16520 	if (wa_done)
   16521 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16522 	return rv;
   16523 }
   16524 
   16525 static void
   16526 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16527 {
   16528 	uint32_t reg;
   16529 
   16530 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16531 		device_xname(sc->sc_dev), __func__));
   16532 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16533 	    || (sc->sc_type == WM_T_PCH_CNP));
   16534 
   16535 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16536 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16537 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16538 
   16539 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16540 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16541 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16542 }
   16543