Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.597
      1 /*	$NetBSD: if_wm.c,v 1.597 2018/11/14 03:41:20 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.597 2018/11/14 03:41:20 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    161 #else
    162 #define	DPRINTF(x, y)	/* nothing */
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544. We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 64 DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    203  * m_defrag() is called to reduce it.
    204  */
    205 #define	WM_NTXSEGS		64
    206 #define	WM_IFQUEUELEN		256
    207 #define	WM_TXQUEUELEN_MAX	64
    208 #define	WM_TXQUEUELEN_MAX_82547	16
    209 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    210 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    211 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    212 #define	WM_NTXDESC_82542	256
    213 #define	WM_NTXDESC_82544	4096
    214 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    215 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    216 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    217 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    218 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    219 
    220 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    221 
    222 #define	WM_TXINTERQSIZE		256
    223 
    224 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    225 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    226 #endif
    227 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    228 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    229 #endif
    230 
    231 /*
    232  * Receive descriptor list size.  We have one Rx buffer for normal
    233  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    234  * packet.  We allocate 256 receive descriptors, each with a 2k
    235  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    236  */
    237 #define	WM_NRXDESC		256
    238 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    239 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    240 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    241 
    242 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 typedef union txdescs {
    250 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    251 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    252 } txdescs_t;
    253 
    254 typedef union rxdescs {
    255 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    256 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    257 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    258 } rxdescs_t;
    259 
    260 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    261 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    262 
    263 /*
    264  * Software state for transmit jobs.
    265  */
    266 struct wm_txsoft {
    267 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    268 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    269 	int txs_firstdesc;		/* first descriptor in packet */
    270 	int txs_lastdesc;		/* last descriptor in packet */
    271 	int txs_ndesc;			/* # of descriptors used */
    272 };
    273 
    274 /*
    275  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    276  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    277  * them together.
    278  */
    279 struct wm_rxsoft {
    280 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    281 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    282 };
    283 
    284 #define WM_LINKUP_TIMEOUT	50
    285 
    286 static uint16_t swfwphysem[] = {
    287 	SWFW_PHY0_SM,
    288 	SWFW_PHY1_SM,
    289 	SWFW_PHY2_SM,
    290 	SWFW_PHY3_SM
    291 };
    292 
    293 static const uint32_t wm_82580_rxpbs_table[] = {
    294 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    295 };
    296 
    297 struct wm_softc;
    298 
    299 #ifdef WM_EVENT_COUNTERS
    300 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    301 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    302 	struct evcnt qname##_ev_##evname;
    303 
    304 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    305 	do {								\
    306 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    307 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    308 		    "%s%02d%s", #qname, (qnum), #evname);		\
    309 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    310 		    (evtype), NULL, (xname),				\
    311 		    (q)->qname##_##evname##_evcnt_name);		\
    312 	} while (0)
    313 
    314 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    315 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    316 
    317 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    318 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    319 
    320 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    321 	evcnt_detach(&(q)->qname##_ev_##evname);
    322 #endif /* WM_EVENT_COUNTERS */
    323 
    324 struct wm_txqueue {
    325 	kmutex_t *txq_lock;		/* lock for tx operations */
    326 
    327 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    328 
    329 	/* Software state for the transmit descriptors. */
    330 	int txq_num;			/* must be a power of two */
    331 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    332 
    333 	/* TX control data structures. */
    334 	int txq_ndesc;			/* must be a power of two */
    335 	size_t txq_descsize;		/* a tx descriptor size */
    336 	txdescs_t *txq_descs_u;
    337 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    338 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    339 	int txq_desc_rseg;		/* real number of control segment */
    340 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    341 #define	txq_descs	txq_descs_u->sctxu_txdescs
    342 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    343 
    344 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    345 
    346 	int txq_free;			/* number of free Tx descriptors */
    347 	int txq_next;			/* next ready Tx descriptor */
    348 
    349 	int txq_sfree;			/* number of free Tx jobs */
    350 	int txq_snext;			/* next free Tx job */
    351 	int txq_sdirty;			/* dirty Tx jobs */
    352 
    353 	/* These 4 variables are used only on the 82547. */
    354 	int txq_fifo_size;		/* Tx FIFO size */
    355 	int txq_fifo_head;		/* current head of FIFO */
    356 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    357 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    358 
    359 	/*
    360 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    361 	 * CPUs. This queue intermediate them without block.
    362 	 */
    363 	pcq_t *txq_interq;
    364 
    365 	/*
    366 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    367 	 * to manage Tx H/W queue's busy flag.
    368 	 */
    369 	int txq_flags;			/* flags for H/W queue, see below */
    370 #define	WM_TXQ_NO_SPACE	0x1
    371 
    372 	bool txq_stopping;
    373 
    374 	bool txq_sending;
    375 	time_t txq_lastsent;
    376 
    377 	uint32_t txq_packets;		/* for AIM */
    378 	uint32_t txq_bytes;		/* for AIM */
    379 #ifdef WM_EVENT_COUNTERS
    380 	/* TX event counters */
    381 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    382 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    383 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    384 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    385 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    386 					    /* XXX not used? */
    387 
    388 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    389 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    392 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    393 	WM_Q_EVCNT_DEFINE(txq, tsopain)     /* Painful header manip. for TSO */
    394 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    395 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    396 					    /* other than toomanyseg */
    397 
    398 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    399 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    400 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    401 
    402 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    403 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    404 #endif /* WM_EVENT_COUNTERS */
    405 };
    406 
    407 struct wm_rxqueue {
    408 	kmutex_t *rxq_lock;		/* lock for rx operations */
    409 
    410 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    411 
    412 	/* Software state for the receive descriptors. */
    413 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    414 
    415 	/* RX control data structures. */
    416 	int rxq_ndesc;			/* must be a power of two */
    417 	size_t rxq_descsize;		/* a rx descriptor size */
    418 	rxdescs_t *rxq_descs_u;
    419 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    420 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    421 	int rxq_desc_rseg;		/* real number of control segment */
    422 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    423 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    424 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    425 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    426 
    427 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    428 
    429 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    430 	int rxq_discard;
    431 	int rxq_len;
    432 	struct mbuf *rxq_head;
    433 	struct mbuf *rxq_tail;
    434 	struct mbuf **rxq_tailp;
    435 
    436 	bool rxq_stopping;
    437 
    438 	uint32_t rxq_packets;		/* for AIM */
    439 	uint32_t rxq_bytes;		/* for AIM */
    440 #ifdef WM_EVENT_COUNTERS
    441 	/* RX event counters */
    442 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    443 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    444 
    445 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    446 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    447 #endif
    448 };
    449 
    450 struct wm_queue {
    451 	int wmq_id;			/* index of TX/RX queues */
    452 	int wmq_intr_idx;		/* index of MSI-X tables */
    453 
    454 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    455 	bool wmq_set_itr;
    456 
    457 	struct wm_txqueue wmq_txq;
    458 	struct wm_rxqueue wmq_rxq;
    459 
    460 	void *wmq_si;
    461 };
    462 
    463 struct wm_phyop {
    464 	int (*acquire)(struct wm_softc *);
    465 	void (*release)(struct wm_softc *);
    466 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    467 	int (*writereg_locked)(device_t, int, int, uint16_t);
    468 	int reset_delay_us;
    469 };
    470 
    471 struct wm_nvmop {
    472 	int (*acquire)(struct wm_softc *);
    473 	void (*release)(struct wm_softc *);
    474 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    475 };
    476 
    477 /*
    478  * Software state per device.
    479  */
    480 struct wm_softc {
    481 	device_t sc_dev;		/* generic device information */
    482 	bus_space_tag_t sc_st;		/* bus space tag */
    483 	bus_space_handle_t sc_sh;	/* bus space handle */
    484 	bus_size_t sc_ss;		/* bus space size */
    485 	bus_space_tag_t sc_iot;		/* I/O space tag */
    486 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    487 	bus_size_t sc_ios;		/* I/O space size */
    488 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    489 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    490 	bus_size_t sc_flashs;		/* flash registers space size */
    491 	off_t sc_flashreg_offset;	/*
    492 					 * offset to flash registers from
    493 					 * start of BAR
    494 					 */
    495 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    496 
    497 	struct ethercom sc_ethercom;	/* ethernet common data */
    498 	struct mii_data sc_mii;		/* MII/media information */
    499 
    500 	pci_chipset_tag_t sc_pc;
    501 	pcitag_t sc_pcitag;
    502 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    503 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    504 
    505 	uint16_t sc_pcidevid;		/* PCI device ID */
    506 	wm_chip_type sc_type;		/* MAC type */
    507 	int sc_rev;			/* MAC revision */
    508 	wm_phy_type sc_phytype;		/* PHY type */
    509 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    510 #define	WM_MEDIATYPE_UNKNOWN		0x00
    511 #define	WM_MEDIATYPE_FIBER		0x01
    512 #define	WM_MEDIATYPE_COPPER		0x02
    513 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    514 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    515 	int sc_flags;			/* flags; see below */
    516 	int sc_if_flags;		/* last if_flags */
    517 	int sc_flowflags;		/* 802.3x flow control flags */
    518 	int sc_align_tweak;
    519 
    520 	void *sc_ihs[WM_MAX_NINTR];	/*
    521 					 * interrupt cookie.
    522 					 * - legacy and msi use sc_ihs[0] only
    523 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    524 					 */
    525 	pci_intr_handle_t *sc_intrs;	/*
    526 					 * legacy and msi use sc_intrs[0] only
    527 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    528 					 */
    529 	int sc_nintrs;			/* number of interrupts */
    530 
    531 	int sc_link_intr_idx;		/* index of MSI-X tables */
    532 
    533 	callout_t sc_tick_ch;		/* tick callout */
    534 	bool sc_core_stopping;
    535 
    536 	int sc_nvm_ver_major;
    537 	int sc_nvm_ver_minor;
    538 	int sc_nvm_ver_build;
    539 	int sc_nvm_addrbits;		/* NVM address bits */
    540 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    541 	int sc_ich8_flash_base;
    542 	int sc_ich8_flash_bank_size;
    543 	int sc_nvm_k1_enabled;
    544 
    545 	int sc_nqueues;
    546 	struct wm_queue *sc_queue;
    547 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
    548 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
    549 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    550 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    551 
    552 	int sc_affinity_offset;
    553 
    554 #ifdef WM_EVENT_COUNTERS
    555 	/* Event counters. */
    556 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    557 
    558 	/* WM_T_82542_2_1 only */
    559 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    560 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    561 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    562 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    563 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    564 #endif /* WM_EVENT_COUNTERS */
    565 
    566 	/* This variable are used only on the 82547. */
    567 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    568 
    569 	uint32_t sc_ctrl;		/* prototype CTRL register */
    570 #if 0
    571 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    572 #endif
    573 	uint32_t sc_icr;		/* prototype interrupt bits */
    574 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    575 	uint32_t sc_tctl;		/* prototype TCTL register */
    576 	uint32_t sc_rctl;		/* prototype RCTL register */
    577 	uint32_t sc_txcw;		/* prototype TXCW register */
    578 	uint32_t sc_tipg;		/* prototype TIPG register */
    579 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    580 	uint32_t sc_pba;		/* prototype PBA register */
    581 
    582 	int sc_tbi_linkup;		/* TBI link status */
    583 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    584 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    585 
    586 	int sc_mchash_type;		/* multicast filter offset */
    587 
    588 	krndsource_t rnd_source;	/* random source */
    589 
    590 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    591 
    592 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    593 	kmutex_t *sc_ich_phymtx;	/*
    594 					 * 82574/82583/ICH/PCH specific PHY
    595 					 * mutex. For 82574/82583, the mutex
    596 					 * is used for both PHY and NVM.
    597 					 */
    598 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    599 
    600 	struct wm_phyop phy;
    601 	struct wm_nvmop nvm;
    602 };
    603 
    604 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    605 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    606 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    607 
    608 #define	WM_RXCHAIN_RESET(rxq)						\
    609 do {									\
    610 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    611 	*(rxq)->rxq_tailp = NULL;					\
    612 	(rxq)->rxq_len = 0;						\
    613 } while (/*CONSTCOND*/0)
    614 
    615 #define	WM_RXCHAIN_LINK(rxq, m)						\
    616 do {									\
    617 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    618 	(rxq)->rxq_tailp = &(m)->m_next;				\
    619 } while (/*CONSTCOND*/0)
    620 
    621 #ifdef WM_EVENT_COUNTERS
    622 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    623 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    624 
    625 #define WM_Q_EVCNT_INCR(qname, evname)			\
    626 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    627 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    628 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    629 #else /* !WM_EVENT_COUNTERS */
    630 #define	WM_EVCNT_INCR(ev)	/* nothing */
    631 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    632 
    633 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    634 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    635 #endif /* !WM_EVENT_COUNTERS */
    636 
    637 #define	CSR_READ(sc, reg)						\
    638 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    639 #define	CSR_WRITE(sc, reg, val)						\
    640 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    641 #define	CSR_WRITE_FLUSH(sc)						\
    642 	(void) CSR_READ((sc), WMREG_STATUS)
    643 
    644 #define ICH8_FLASH_READ32(sc, reg)					\
    645 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    646 	    (reg) + sc->sc_flashreg_offset)
    647 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    648 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    649 	    (reg) + sc->sc_flashreg_offset, (data))
    650 
    651 #define ICH8_FLASH_READ16(sc, reg)					\
    652 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    653 	    (reg) + sc->sc_flashreg_offset)
    654 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    655 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    656 	    (reg) + sc->sc_flashreg_offset, (data))
    657 
    658 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    659 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    660 
    661 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    662 #define	WM_CDTXADDR_HI(txq, x)						\
    663 	(sizeof(bus_addr_t) == 8 ?					\
    664 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    665 
    666 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    667 #define	WM_CDRXADDR_HI(rxq, x)						\
    668 	(sizeof(bus_addr_t) == 8 ?					\
    669 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    670 
    671 /*
    672  * Register read/write functions.
    673  * Other than CSR_{READ|WRITE}().
    674  */
    675 #if 0
    676 static inline uint32_t wm_io_read(struct wm_softc *, int);
    677 #endif
    678 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    679 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    680     uint32_t, uint32_t);
    681 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    682 
    683 /*
    684  * Descriptor sync/init functions.
    685  */
    686 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    687 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    688 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    689 
    690 /*
    691  * Device driver interface functions and commonly used functions.
    692  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    693  */
    694 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    695 static int	wm_match(device_t, cfdata_t, void *);
    696 static void	wm_attach(device_t, device_t, void *);
    697 static int	wm_detach(device_t, int);
    698 static bool	wm_suspend(device_t, const pmf_qual_t *);
    699 static bool	wm_resume(device_t, const pmf_qual_t *);
    700 static void	wm_watchdog(struct ifnet *);
    701 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    702     uint16_t *);
    703 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    704     uint16_t *);
    705 static void	wm_tick(void *);
    706 static int	wm_ifflags_cb(struct ethercom *);
    707 static int	wm_ioctl(struct ifnet *, u_long, void *);
    708 /* MAC address related */
    709 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    710 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    711 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    712 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    713 static void	wm_set_filter(struct wm_softc *);
    714 /* Reset and init related */
    715 static void	wm_set_vlan(struct wm_softc *);
    716 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    717 static void	wm_get_auto_rd_done(struct wm_softc *);
    718 static void	wm_lan_init_done(struct wm_softc *);
    719 static void	wm_get_cfg_done(struct wm_softc *);
    720 static void	wm_phy_post_reset(struct wm_softc *);
    721 static int	wm_write_smbus_addr(struct wm_softc *);
    722 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    723 static void	wm_initialize_hardware_bits(struct wm_softc *);
    724 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    725 static void	wm_reset_phy(struct wm_softc *);
    726 static void	wm_flush_desc_rings(struct wm_softc *);
    727 static void	wm_reset(struct wm_softc *);
    728 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    729 static void	wm_rxdrain(struct wm_rxqueue *);
    730 static void	wm_init_rss(struct wm_softc *);
    731 static void	wm_adjust_qnum(struct wm_softc *, int);
    732 static inline bool	wm_is_using_msix(struct wm_softc *);
    733 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    734 static int	wm_softint_establish(struct wm_softc *, int, int);
    735 static int	wm_setup_legacy(struct wm_softc *);
    736 static int	wm_setup_msix(struct wm_softc *);
    737 static int	wm_init(struct ifnet *);
    738 static int	wm_init_locked(struct ifnet *);
    739 static void	wm_unset_stopping_flags(struct wm_softc *);
    740 static void	wm_set_stopping_flags(struct wm_softc *);
    741 static void	wm_stop(struct ifnet *, int);
    742 static void	wm_stop_locked(struct ifnet *, int);
    743 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    744 static void	wm_82547_txfifo_stall(void *);
    745 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    746 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    747 /* DMA related */
    748 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    749 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    750 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    751 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    752     struct wm_txqueue *);
    753 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    754 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    755 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    756     struct wm_rxqueue *);
    757 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    758 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    759 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    760 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    761 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    762 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    763 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    764     struct wm_txqueue *);
    765 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    766     struct wm_rxqueue *);
    767 static int	wm_alloc_txrx_queues(struct wm_softc *);
    768 static void	wm_free_txrx_queues(struct wm_softc *);
    769 static int	wm_init_txrx_queues(struct wm_softc *);
    770 /* Start */
    771 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    772     struct wm_txsoft *, uint32_t *, uint8_t *);
    773 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    774 static void	wm_start(struct ifnet *);
    775 static void	wm_start_locked(struct ifnet *);
    776 static int	wm_transmit(struct ifnet *, struct mbuf *);
    777 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    778 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    779     bool);
    780 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    781     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    782 static void	wm_nq_start(struct ifnet *);
    783 static void	wm_nq_start_locked(struct ifnet *);
    784 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    785 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    786 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    787     bool);
    788 static void	wm_deferred_start_locked(struct wm_txqueue *);
    789 static void	wm_handle_queue(void *);
    790 /* Interrupt */
    791 static bool	wm_txeof(struct wm_txqueue *, u_int);
    792 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    793 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    794 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    795 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    796 static void	wm_linkintr(struct wm_softc *, uint32_t);
    797 static int	wm_intr_legacy(void *);
    798 static inline void	wm_txrxintr_disable(struct wm_queue *);
    799 static inline void	wm_txrxintr_enable(struct wm_queue *);
    800 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    801 static int	wm_txrxintr_msix(void *);
    802 static int	wm_linkintr_msix(void *);
    803 
    804 /*
    805  * Media related.
    806  * GMII, SGMII, TBI, SERDES and SFP.
    807  */
    808 /* Common */
    809 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    810 /* GMII related */
    811 static void	wm_gmii_reset(struct wm_softc *);
    812 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    813 static int	wm_get_phy_id_82575(struct wm_softc *);
    814 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    815 static int	wm_gmii_mediachange(struct ifnet *);
    816 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    817 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    818 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    819 static int	wm_gmii_i82543_readreg(device_t, int, int);
    820 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    821 static int	wm_gmii_mdic_readreg(device_t, int, int);
    822 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    823 static int	wm_gmii_i82544_readreg(device_t, int, int);
    824 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    825 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    826 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    827 static int	wm_gmii_i80003_readreg(device_t, int, int);
    828 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    829 static int	wm_gmii_bm_readreg(device_t, int, int);
    830 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    831 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    832 static int	wm_gmii_hv_readreg(device_t, int, int);
    833 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    834 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    835 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    836 static int	wm_gmii_82580_readreg(device_t, int, int);
    837 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    838 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    839 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    840 static void	wm_gmii_statchg(struct ifnet *);
    841 /*
    842  * kumeran related (80003, ICH* and PCH*).
    843  * These functions are not for accessing MII registers but for accessing
    844  * kumeran specific registers.
    845  */
    846 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    847 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    848 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    849 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    850 /* SGMII */
    851 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    852 static int	wm_sgmii_readreg(device_t, int, int);
    853 static void	wm_sgmii_writereg(device_t, int, int, int);
    854 /* TBI related */
    855 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    856 static void	wm_tbi_mediainit(struct wm_softc *);
    857 static int	wm_tbi_mediachange(struct ifnet *);
    858 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    859 static int	wm_check_for_link(struct wm_softc *);
    860 static void	wm_tbi_tick(struct wm_softc *);
    861 /* SERDES related */
    862 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    863 static int	wm_serdes_mediachange(struct ifnet *);
    864 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    865 static void	wm_serdes_tick(struct wm_softc *);
    866 /* SFP related */
    867 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    868 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    869 
    870 /*
    871  * NVM related.
    872  * Microwire, SPI (w/wo EERD) and Flash.
    873  */
    874 /* Misc functions */
    875 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    876 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    877 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    878 /* Microwire */
    879 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    880 /* SPI */
    881 static int	wm_nvm_ready_spi(struct wm_softc *);
    882 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    883 /* Using with EERD */
    884 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    885 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    886 /* Flash */
    887 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    888     unsigned int *);
    889 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    890 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    891 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    892     uint32_t *);
    893 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    894 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    895 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    896 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    897 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    898 /* iNVM */
    899 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    900 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    901 /* Lock, detecting NVM type, validate checksum and read */
    902 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    903 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    904 static int	wm_nvm_validate_checksum(struct wm_softc *);
    905 static void	wm_nvm_version_invm(struct wm_softc *);
    906 static void	wm_nvm_version(struct wm_softc *);
    907 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    908 
    909 /*
    910  * Hardware semaphores.
    911  * Very complexed...
    912  */
    913 static int	wm_get_null(struct wm_softc *);
    914 static void	wm_put_null(struct wm_softc *);
    915 static int	wm_get_eecd(struct wm_softc *);
    916 static void	wm_put_eecd(struct wm_softc *);
    917 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    918 static void	wm_put_swsm_semaphore(struct wm_softc *);
    919 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    920 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    921 static int	wm_get_nvm_80003(struct wm_softc *);
    922 static void	wm_put_nvm_80003(struct wm_softc *);
    923 static int	wm_get_nvm_82571(struct wm_softc *);
    924 static void	wm_put_nvm_82571(struct wm_softc *);
    925 static int	wm_get_phy_82575(struct wm_softc *);
    926 static void	wm_put_phy_82575(struct wm_softc *);
    927 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    928 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    929 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    930 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    931 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    932 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    933 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    934 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    935 
    936 /*
    937  * Management mode and power management related subroutines.
    938  * BMC, AMT, suspend/resume and EEE.
    939  */
    940 #if 0
    941 static int	wm_check_mng_mode(struct wm_softc *);
    942 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    943 static int	wm_check_mng_mode_82574(struct wm_softc *);
    944 static int	wm_check_mng_mode_generic(struct wm_softc *);
    945 #endif
    946 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    947 static bool	wm_phy_resetisblocked(struct wm_softc *);
    948 static void	wm_get_hw_control(struct wm_softc *);
    949 static void	wm_release_hw_control(struct wm_softc *);
    950 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    951 static void	wm_smbustopci(struct wm_softc *);
    952 static void	wm_init_manageability(struct wm_softc *);
    953 static void	wm_release_manageability(struct wm_softc *);
    954 static void	wm_get_wakeup(struct wm_softc *);
    955 static int	wm_ulp_disable(struct wm_softc *);
    956 static void	wm_enable_phy_wakeup(struct wm_softc *);
    957 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    958 static void	wm_enable_wakeup(struct wm_softc *);
    959 static void	wm_disable_aspm(struct wm_softc *);
    960 /* LPLU (Low Power Link Up) */
    961 static void	wm_lplu_d0_disable(struct wm_softc *);
    962 /* EEE */
    963 static void	wm_set_eee_i350(struct wm_softc *);
    964 
    965 /*
    966  * Workarounds (mainly PHY related).
    967  * Basically, PHY's workarounds are in the PHY drivers.
    968  */
    969 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    970 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    971 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    972 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    973 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    974 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    975 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    976 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    977 static void	wm_reset_init_script_82575(struct wm_softc *);
    978 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    979 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    980 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    981 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    982 static void	wm_pll_workaround_i210(struct wm_softc *);
    983 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    984 
    985 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    986     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    987 
    988 /*
    989  * Devices supported by this driver.
    990  */
    991 static const struct wm_product {
    992 	pci_vendor_id_t		wmp_vendor;
    993 	pci_product_id_t	wmp_product;
    994 	const char		*wmp_name;
    995 	wm_chip_type		wmp_type;
    996 	uint32_t		wmp_flags;
    997 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    998 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    999 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1000 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1001 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1002 } wm_products[] = {
   1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1004 	  "Intel i82542 1000BASE-X Ethernet",
   1005 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1006 
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1008 	  "Intel i82543GC 1000BASE-X Ethernet",
   1009 	  WM_T_82543,		WMP_F_FIBER },
   1010 
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1012 	  "Intel i82543GC 1000BASE-T Ethernet",
   1013 	  WM_T_82543,		WMP_F_COPPER },
   1014 
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1016 	  "Intel i82544EI 1000BASE-T Ethernet",
   1017 	  WM_T_82544,		WMP_F_COPPER },
   1018 
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1020 	  "Intel i82544EI 1000BASE-X Ethernet",
   1021 	  WM_T_82544,		WMP_F_FIBER },
   1022 
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1024 	  "Intel i82544GC 1000BASE-T Ethernet",
   1025 	  WM_T_82544,		WMP_F_COPPER },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1028 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1029 	  WM_T_82544,		WMP_F_COPPER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1032 	  "Intel i82540EM 1000BASE-T Ethernet",
   1033 	  WM_T_82540,		WMP_F_COPPER },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1036 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1037 	  WM_T_82540,		WMP_F_COPPER },
   1038 
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1040 	  "Intel i82540EP 1000BASE-T Ethernet",
   1041 	  WM_T_82540,		WMP_F_COPPER },
   1042 
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1044 	  "Intel i82540EP 1000BASE-T Ethernet",
   1045 	  WM_T_82540,		WMP_F_COPPER },
   1046 
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1048 	  "Intel i82540EP 1000BASE-T Ethernet",
   1049 	  WM_T_82540,		WMP_F_COPPER },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1052 	  "Intel i82545EM 1000BASE-T Ethernet",
   1053 	  WM_T_82545,		WMP_F_COPPER },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1056 	  "Intel i82545GM 1000BASE-T Ethernet",
   1057 	  WM_T_82545_3,		WMP_F_COPPER },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1060 	  "Intel i82545GM 1000BASE-X Ethernet",
   1061 	  WM_T_82545_3,		WMP_F_FIBER },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1064 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1065 	  WM_T_82545_3,		WMP_F_SERDES },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1068 	  "Intel i82546EB 1000BASE-T Ethernet",
   1069 	  WM_T_82546,		WMP_F_COPPER },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1072 	  "Intel i82546EB 1000BASE-T Ethernet",
   1073 	  WM_T_82546,		WMP_F_COPPER },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1076 	  "Intel i82545EM 1000BASE-X Ethernet",
   1077 	  WM_T_82545,		WMP_F_FIBER },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1080 	  "Intel i82546EB 1000BASE-X Ethernet",
   1081 	  WM_T_82546,		WMP_F_FIBER },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1084 	  "Intel i82546GB 1000BASE-T Ethernet",
   1085 	  WM_T_82546_3,		WMP_F_COPPER },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1088 	  "Intel i82546GB 1000BASE-X Ethernet",
   1089 	  WM_T_82546_3,		WMP_F_FIBER },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1092 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1093 	  WM_T_82546_3,		WMP_F_SERDES },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1096 	  "i82546GB quad-port Gigabit Ethernet",
   1097 	  WM_T_82546_3,		WMP_F_COPPER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1100 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1101 	  WM_T_82546_3,		WMP_F_COPPER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1104 	  "Intel PRO/1000MT (82546GB)",
   1105 	  WM_T_82546_3,		WMP_F_COPPER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1108 	  "Intel i82541EI 1000BASE-T Ethernet",
   1109 	  WM_T_82541,		WMP_F_COPPER },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1112 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1113 	  WM_T_82541,		WMP_F_COPPER },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1116 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1117 	  WM_T_82541,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1120 	  "Intel i82541ER 1000BASE-T Ethernet",
   1121 	  WM_T_82541_2,		WMP_F_COPPER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1124 	  "Intel i82541GI 1000BASE-T Ethernet",
   1125 	  WM_T_82541_2,		WMP_F_COPPER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1128 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1129 	  WM_T_82541_2,		WMP_F_COPPER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1132 	  "Intel i82541PI 1000BASE-T Ethernet",
   1133 	  WM_T_82541_2,		WMP_F_COPPER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1136 	  "Intel i82547EI 1000BASE-T Ethernet",
   1137 	  WM_T_82547,		WMP_F_COPPER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1140 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1141 	  WM_T_82547,		WMP_F_COPPER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1144 	  "Intel i82547GI 1000BASE-T Ethernet",
   1145 	  WM_T_82547_2,		WMP_F_COPPER },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1148 	  "Intel PRO/1000 PT (82571EB)",
   1149 	  WM_T_82571,		WMP_F_COPPER },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1152 	  "Intel PRO/1000 PF (82571EB)",
   1153 	  WM_T_82571,		WMP_F_FIBER },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1156 	  "Intel PRO/1000 PB (82571EB)",
   1157 	  WM_T_82571,		WMP_F_SERDES },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1160 	  "Intel PRO/1000 QT (82571EB)",
   1161 	  WM_T_82571,		WMP_F_COPPER },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1164 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1165 	  WM_T_82571,		WMP_F_COPPER, },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1168 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1169 	  WM_T_82571,		WMP_F_COPPER, },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1172 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1173 	  WM_T_82571,		WMP_F_SERDES, },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1176 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1177 	  WM_T_82571,		WMP_F_SERDES, },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1180 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1181 	  WM_T_82571,		WMP_F_FIBER, },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1184 	  "Intel i82572EI 1000baseT Ethernet",
   1185 	  WM_T_82572,		WMP_F_COPPER },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1188 	  "Intel i82572EI 1000baseX Ethernet",
   1189 	  WM_T_82572,		WMP_F_FIBER },
   1190 
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1192 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1193 	  WM_T_82572,		WMP_F_SERDES },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1196 	  "Intel i82572EI 1000baseT Ethernet",
   1197 	  WM_T_82572,		WMP_F_COPPER },
   1198 
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1200 	  "Intel i82573E",
   1201 	  WM_T_82573,		WMP_F_COPPER },
   1202 
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1204 	  "Intel i82573E IAMT",
   1205 	  WM_T_82573,		WMP_F_COPPER },
   1206 
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1208 	  "Intel i82573L Gigabit Ethernet",
   1209 	  WM_T_82573,		WMP_F_COPPER },
   1210 
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1212 	  "Intel i82574L",
   1213 	  WM_T_82574,		WMP_F_COPPER },
   1214 
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1216 	  "Intel i82574L",
   1217 	  WM_T_82574,		WMP_F_COPPER },
   1218 
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1220 	  "Intel i82583V",
   1221 	  WM_T_82583,		WMP_F_COPPER },
   1222 
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1224 	  "i80003 dual 1000baseT Ethernet",
   1225 	  WM_T_80003,		WMP_F_COPPER },
   1226 
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1228 	  "i80003 dual 1000baseX Ethernet",
   1229 	  WM_T_80003,		WMP_F_COPPER },
   1230 
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1232 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1233 	  WM_T_80003,		WMP_F_SERDES },
   1234 
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1236 	  "Intel i80003 1000baseT Ethernet",
   1237 	  WM_T_80003,		WMP_F_COPPER },
   1238 
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1240 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1241 	  WM_T_80003,		WMP_F_SERDES },
   1242 
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1244 	  "Intel i82801H (M_AMT) LAN Controller",
   1245 	  WM_T_ICH8,		WMP_F_COPPER },
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1247 	  "Intel i82801H (AMT) LAN Controller",
   1248 	  WM_T_ICH8,		WMP_F_COPPER },
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1250 	  "Intel i82801H LAN Controller",
   1251 	  WM_T_ICH8,		WMP_F_COPPER },
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1253 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1254 	  WM_T_ICH8,		WMP_F_COPPER },
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1256 	  "Intel i82801H (M) LAN Controller",
   1257 	  WM_T_ICH8,		WMP_F_COPPER },
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1259 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1260 	  WM_T_ICH8,		WMP_F_COPPER },
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1262 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1263 	  WM_T_ICH8,		WMP_F_COPPER },
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1265 	  "82567V-3 LAN Controller",
   1266 	  WM_T_ICH8,		WMP_F_COPPER },
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1268 	  "82801I (AMT) LAN Controller",
   1269 	  WM_T_ICH9,		WMP_F_COPPER },
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1271 	  "82801I 10/100 LAN Controller",
   1272 	  WM_T_ICH9,		WMP_F_COPPER },
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1274 	  "82801I (G) 10/100 LAN Controller",
   1275 	  WM_T_ICH9,		WMP_F_COPPER },
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1277 	  "82801I (GT) 10/100 LAN Controller",
   1278 	  WM_T_ICH9,		WMP_F_COPPER },
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1280 	  "82801I (C) LAN Controller",
   1281 	  WM_T_ICH9,		WMP_F_COPPER },
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1283 	  "82801I mobile LAN Controller",
   1284 	  WM_T_ICH9,		WMP_F_COPPER },
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1286 	  "82801I mobile (V) LAN Controller",
   1287 	  WM_T_ICH9,		WMP_F_COPPER },
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1289 	  "82801I mobile (AMT) LAN Controller",
   1290 	  WM_T_ICH9,		WMP_F_COPPER },
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1292 	  "82567LM-4 LAN Controller",
   1293 	  WM_T_ICH9,		WMP_F_COPPER },
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1295 	  "82567LM-2 LAN Controller",
   1296 	  WM_T_ICH10,		WMP_F_COPPER },
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1298 	  "82567LF-2 LAN Controller",
   1299 	  WM_T_ICH10,		WMP_F_COPPER },
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1301 	  "82567LM-3 LAN Controller",
   1302 	  WM_T_ICH10,		WMP_F_COPPER },
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1304 	  "82567LF-3 LAN Controller",
   1305 	  WM_T_ICH10,		WMP_F_COPPER },
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1307 	  "82567V-2 LAN Controller",
   1308 	  WM_T_ICH10,		WMP_F_COPPER },
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1310 	  "82567V-3? LAN Controller",
   1311 	  WM_T_ICH10,		WMP_F_COPPER },
   1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1313 	  "HANKSVILLE LAN Controller",
   1314 	  WM_T_ICH10,		WMP_F_COPPER },
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1316 	  "PCH LAN (82577LM) Controller",
   1317 	  WM_T_PCH,		WMP_F_COPPER },
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1319 	  "PCH LAN (82577LC) Controller",
   1320 	  WM_T_PCH,		WMP_F_COPPER },
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1322 	  "PCH LAN (82578DM) Controller",
   1323 	  WM_T_PCH,		WMP_F_COPPER },
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1325 	  "PCH LAN (82578DC) Controller",
   1326 	  WM_T_PCH,		WMP_F_COPPER },
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1328 	  "PCH2 LAN (82579LM) Controller",
   1329 	  WM_T_PCH2,		WMP_F_COPPER },
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1331 	  "PCH2 LAN (82579V) Controller",
   1332 	  WM_T_PCH2,		WMP_F_COPPER },
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1334 	  "82575EB dual-1000baseT Ethernet",
   1335 	  WM_T_82575,		WMP_F_COPPER },
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1337 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1338 	  WM_T_82575,		WMP_F_SERDES },
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1340 	  "82575GB quad-1000baseT Ethernet",
   1341 	  WM_T_82575,		WMP_F_COPPER },
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1343 	  "82575GB quad-1000baseT Ethernet (PM)",
   1344 	  WM_T_82575,		WMP_F_COPPER },
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1346 	  "82576 1000BaseT Ethernet",
   1347 	  WM_T_82576,		WMP_F_COPPER },
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1349 	  "82576 1000BaseX Ethernet",
   1350 	  WM_T_82576,		WMP_F_FIBER },
   1351 
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1353 	  "82576 gigabit Ethernet (SERDES)",
   1354 	  WM_T_82576,		WMP_F_SERDES },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1357 	  "82576 quad-1000BaseT Ethernet",
   1358 	  WM_T_82576,		WMP_F_COPPER },
   1359 
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1361 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1362 	  WM_T_82576,		WMP_F_COPPER },
   1363 
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1365 	  "82576 gigabit Ethernet",
   1366 	  WM_T_82576,		WMP_F_COPPER },
   1367 
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1369 	  "82576 gigabit Ethernet (SERDES)",
   1370 	  WM_T_82576,		WMP_F_SERDES },
   1371 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1372 	  "82576 quad-gigabit Ethernet (SERDES)",
   1373 	  WM_T_82576,		WMP_F_SERDES },
   1374 
   1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1376 	  "82580 1000BaseT Ethernet",
   1377 	  WM_T_82580,		WMP_F_COPPER },
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1379 	  "82580 1000BaseX Ethernet",
   1380 	  WM_T_82580,		WMP_F_FIBER },
   1381 
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1383 	  "82580 1000BaseT Ethernet (SERDES)",
   1384 	  WM_T_82580,		WMP_F_SERDES },
   1385 
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1387 	  "82580 gigabit Ethernet (SGMII)",
   1388 	  WM_T_82580,		WMP_F_COPPER },
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1390 	  "82580 dual-1000BaseT Ethernet",
   1391 	  WM_T_82580,		WMP_F_COPPER },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1394 	  "82580 quad-1000BaseX Ethernet",
   1395 	  WM_T_82580,		WMP_F_FIBER },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1398 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1399 	  WM_T_82580,		WMP_F_COPPER },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1402 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1403 	  WM_T_82580,		WMP_F_SERDES },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1406 	  "DH89XXCC 1000BASE-KX Ethernet",
   1407 	  WM_T_82580,		WMP_F_SERDES },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1410 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1411 	  WM_T_82580,		WMP_F_SERDES },
   1412 
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1414 	  "I350 Gigabit Network Connection",
   1415 	  WM_T_I350,		WMP_F_COPPER },
   1416 
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1418 	  "I350 Gigabit Fiber Network Connection",
   1419 	  WM_T_I350,		WMP_F_FIBER },
   1420 
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1422 	  "I350 Gigabit Backplane Connection",
   1423 	  WM_T_I350,		WMP_F_SERDES },
   1424 
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1426 	  "I350 Quad Port Gigabit Ethernet",
   1427 	  WM_T_I350,		WMP_F_SERDES },
   1428 
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1430 	  "I350 Gigabit Connection",
   1431 	  WM_T_I350,		WMP_F_COPPER },
   1432 
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1434 	  "I354 Gigabit Ethernet (KX)",
   1435 	  WM_T_I354,		WMP_F_SERDES },
   1436 
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1438 	  "I354 Gigabit Ethernet (SGMII)",
   1439 	  WM_T_I354,		WMP_F_COPPER },
   1440 
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1442 	  "I354 Gigabit Ethernet (2.5G)",
   1443 	  WM_T_I354,		WMP_F_COPPER },
   1444 
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1446 	  "I210-T1 Ethernet Server Adapter",
   1447 	  WM_T_I210,		WMP_F_COPPER },
   1448 
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1450 	  "I210 Ethernet (Copper OEM)",
   1451 	  WM_T_I210,		WMP_F_COPPER },
   1452 
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1454 	  "I210 Ethernet (Copper IT)",
   1455 	  WM_T_I210,		WMP_F_COPPER },
   1456 
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1458 	  "I210 Ethernet (FLASH less)",
   1459 	  WM_T_I210,		WMP_F_COPPER },
   1460 
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1462 	  "I210 Gigabit Ethernet (Fiber)",
   1463 	  WM_T_I210,		WMP_F_FIBER },
   1464 
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1466 	  "I210 Gigabit Ethernet (SERDES)",
   1467 	  WM_T_I210,		WMP_F_SERDES },
   1468 
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1470 	  "I210 Gigabit Ethernet (FLASH less)",
   1471 	  WM_T_I210,		WMP_F_SERDES },
   1472 
   1473 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1474 	  "I210 Gigabit Ethernet (SGMII)",
   1475 	  WM_T_I210,		WMP_F_COPPER },
   1476 
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1478 	  "I211 Ethernet (COPPER)",
   1479 	  WM_T_I211,		WMP_F_COPPER },
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1481 	  "I217 V Ethernet Connection",
   1482 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1484 	  "I217 LM Ethernet Connection",
   1485 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1487 	  "I218 V Ethernet Connection",
   1488 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1490 	  "I218 V Ethernet Connection",
   1491 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1493 	  "I218 V Ethernet Connection",
   1494 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1496 	  "I218 LM Ethernet Connection",
   1497 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1499 	  "I218 LM Ethernet Connection",
   1500 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1502 	  "I218 LM Ethernet Connection",
   1503 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1505 	  "I219 V Ethernet Connection",
   1506 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1508 	  "I219 V Ethernet Connection",
   1509 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1511 	  "I219 V Ethernet Connection",
   1512 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1514 	  "I219 V Ethernet Connection",
   1515 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1517 	  "I219 LM Ethernet Connection",
   1518 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1520 	  "I219 LM Ethernet Connection",
   1521 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1523 	  "I219 LM Ethernet Connection",
   1524 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1526 	  "I219 LM Ethernet Connection",
   1527 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1529 	  "I219 LM Ethernet Connection",
   1530 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1532 	  "I219 V Ethernet Connection",
   1533 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1534 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1535 	  "I219 V Ethernet Connection",
   1536 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1538 	  "I219 LM Ethernet Connection",
   1539 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1541 	  "I219 LM Ethernet Connection",
   1542 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1543 	{ 0,			0,
   1544 	  NULL,
   1545 	  0,			0 },
   1546 };
   1547 
   1548 /*
   1549  * Register read/write functions.
   1550  * Other than CSR_{READ|WRITE}().
   1551  */
   1552 
   1553 #if 0 /* Not currently used */
   1554 static inline uint32_t
   1555 wm_io_read(struct wm_softc *sc, int reg)
   1556 {
   1557 
   1558 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1559 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1560 }
   1561 #endif
   1562 
   1563 static inline void
   1564 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1565 {
   1566 
   1567 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1568 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1569 }
   1570 
   1571 static inline void
   1572 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1573     uint32_t data)
   1574 {
   1575 	uint32_t regval;
   1576 	int i;
   1577 
   1578 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1579 
   1580 	CSR_WRITE(sc, reg, regval);
   1581 
   1582 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1583 		delay(5);
   1584 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1585 			break;
   1586 	}
   1587 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1588 		aprint_error("%s: WARNING:"
   1589 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1590 		    device_xname(sc->sc_dev), reg);
   1591 	}
   1592 }
   1593 
   1594 static inline void
   1595 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1596 {
   1597 	wa->wa_low = htole32(v & 0xffffffffU);
   1598 	if (sizeof(bus_addr_t) == 8)
   1599 		wa->wa_high = htole32((uint64_t) v >> 32);
   1600 	else
   1601 		wa->wa_high = 0;
   1602 }
   1603 
   1604 /*
   1605  * Descriptor sync/init functions.
   1606  */
   1607 static inline void
   1608 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1609 {
   1610 	struct wm_softc *sc = txq->txq_sc;
   1611 
   1612 	/* If it will wrap around, sync to the end of the ring. */
   1613 	if ((start + num) > WM_NTXDESC(txq)) {
   1614 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1615 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1616 		    (WM_NTXDESC(txq) - start), ops);
   1617 		num -= (WM_NTXDESC(txq) - start);
   1618 		start = 0;
   1619 	}
   1620 
   1621 	/* Now sync whatever is left. */
   1622 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1623 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1624 }
   1625 
   1626 static inline void
   1627 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1628 {
   1629 	struct wm_softc *sc = rxq->rxq_sc;
   1630 
   1631 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1632 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1633 }
   1634 
   1635 static inline void
   1636 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1637 {
   1638 	struct wm_softc *sc = rxq->rxq_sc;
   1639 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1640 	struct mbuf *m = rxs->rxs_mbuf;
   1641 
   1642 	/*
   1643 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1644 	 * so that the payload after the Ethernet header is aligned
   1645 	 * to a 4-byte boundary.
   1646 
   1647 	 * XXX BRAINDAMAGE ALERT!
   1648 	 * The stupid chip uses the same size for every buffer, which
   1649 	 * is set in the Receive Control register.  We are using the 2K
   1650 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1651 	 * reason, we can't "scoot" packets longer than the standard
   1652 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1653 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1654 	 * the upper layer copy the headers.
   1655 	 */
   1656 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1657 
   1658 	if (sc->sc_type == WM_T_82574) {
   1659 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1660 		rxd->erx_data.erxd_addr =
   1661 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1662 		rxd->erx_data.erxd_dd = 0;
   1663 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1664 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1665 
   1666 		rxd->nqrx_data.nrxd_paddr =
   1667 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1668 		/* Currently, split header is not supported. */
   1669 		rxd->nqrx_data.nrxd_haddr = 0;
   1670 	} else {
   1671 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1672 
   1673 		wm_set_dma_addr(&rxd->wrx_addr,
   1674 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1675 		rxd->wrx_len = 0;
   1676 		rxd->wrx_cksum = 0;
   1677 		rxd->wrx_status = 0;
   1678 		rxd->wrx_errors = 0;
   1679 		rxd->wrx_special = 0;
   1680 	}
   1681 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1682 
   1683 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1684 }
   1685 
   1686 /*
   1687  * Device driver interface functions and commonly used functions.
   1688  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1689  */
   1690 
   1691 /* Lookup supported device table */
   1692 static const struct wm_product *
   1693 wm_lookup(const struct pci_attach_args *pa)
   1694 {
   1695 	const struct wm_product *wmp;
   1696 
   1697 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1698 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1699 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1700 			return wmp;
   1701 	}
   1702 	return NULL;
   1703 }
   1704 
   1705 /* The match function (ca_match) */
   1706 static int
   1707 wm_match(device_t parent, cfdata_t cf, void *aux)
   1708 {
   1709 	struct pci_attach_args *pa = aux;
   1710 
   1711 	if (wm_lookup(pa) != NULL)
   1712 		return 1;
   1713 
   1714 	return 0;
   1715 }
   1716 
   1717 /* The attach function (ca_attach) */
   1718 static void
   1719 wm_attach(device_t parent, device_t self, void *aux)
   1720 {
   1721 	struct wm_softc *sc = device_private(self);
   1722 	struct pci_attach_args *pa = aux;
   1723 	prop_dictionary_t dict;
   1724 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1725 	pci_chipset_tag_t pc = pa->pa_pc;
   1726 	int counts[PCI_INTR_TYPE_SIZE];
   1727 	pci_intr_type_t max_type;
   1728 	const char *eetype, *xname;
   1729 	bus_space_tag_t memt;
   1730 	bus_space_handle_t memh;
   1731 	bus_size_t memsize;
   1732 	int memh_valid;
   1733 	int i, error;
   1734 	const struct wm_product *wmp;
   1735 	prop_data_t ea;
   1736 	prop_number_t pn;
   1737 	uint8_t enaddr[ETHER_ADDR_LEN];
   1738 	char buf[256];
   1739 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1740 	pcireg_t preg, memtype;
   1741 	uint16_t eeprom_data, apme_mask;
   1742 	bool force_clear_smbi;
   1743 	uint32_t link_mode;
   1744 	uint32_t reg;
   1745 
   1746 	sc->sc_dev = self;
   1747 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1748 	sc->sc_core_stopping = false;
   1749 
   1750 	wmp = wm_lookup(pa);
   1751 #ifdef DIAGNOSTIC
   1752 	if (wmp == NULL) {
   1753 		printf("\n");
   1754 		panic("wm_attach: impossible");
   1755 	}
   1756 #endif
   1757 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1758 
   1759 	sc->sc_pc = pa->pa_pc;
   1760 	sc->sc_pcitag = pa->pa_tag;
   1761 
   1762 	if (pci_dma64_available(pa))
   1763 		sc->sc_dmat = pa->pa_dmat64;
   1764 	else
   1765 		sc->sc_dmat = pa->pa_dmat;
   1766 
   1767 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1768 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1769 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1770 
   1771 	sc->sc_type = wmp->wmp_type;
   1772 
   1773 	/* Set default function pointers */
   1774 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1775 	sc->phy.release = sc->nvm.release = wm_put_null;
   1776 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1777 
   1778 	if (sc->sc_type < WM_T_82543) {
   1779 		if (sc->sc_rev < 2) {
   1780 			aprint_error_dev(sc->sc_dev,
   1781 			    "i82542 must be at least rev. 2\n");
   1782 			return;
   1783 		}
   1784 		if (sc->sc_rev < 3)
   1785 			sc->sc_type = WM_T_82542_2_0;
   1786 	}
   1787 
   1788 	/*
   1789 	 * Disable MSI for Errata:
   1790 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1791 	 *
   1792 	 *  82544: Errata 25
   1793 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1794 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1795 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1796 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1797 	 *
   1798 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1799 	 *
   1800 	 *  82571 & 82572: Errata 63
   1801 	 */
   1802 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1803 	    || (sc->sc_type == WM_T_82572))
   1804 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1805 
   1806 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1807 	    || (sc->sc_type == WM_T_82580)
   1808 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1809 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1810 		sc->sc_flags |= WM_F_NEWQUEUE;
   1811 
   1812 	/* Set device properties (mactype) */
   1813 	dict = device_properties(sc->sc_dev);
   1814 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1815 
   1816 	/*
   1817 	 * Map the device.  All devices support memory-mapped acccess,
   1818 	 * and it is really required for normal operation.
   1819 	 */
   1820 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1821 	switch (memtype) {
   1822 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1823 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1824 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1825 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1826 		break;
   1827 	default:
   1828 		memh_valid = 0;
   1829 		break;
   1830 	}
   1831 
   1832 	if (memh_valid) {
   1833 		sc->sc_st = memt;
   1834 		sc->sc_sh = memh;
   1835 		sc->sc_ss = memsize;
   1836 	} else {
   1837 		aprint_error_dev(sc->sc_dev,
   1838 		    "unable to map device registers\n");
   1839 		return;
   1840 	}
   1841 
   1842 	/*
   1843 	 * In addition, i82544 and later support I/O mapped indirect
   1844 	 * register access.  It is not desirable (nor supported in
   1845 	 * this driver) to use it for normal operation, though it is
   1846 	 * required to work around bugs in some chip versions.
   1847 	 */
   1848 	if (sc->sc_type >= WM_T_82544) {
   1849 		/* First we have to find the I/O BAR. */
   1850 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1851 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1852 			if (memtype == PCI_MAPREG_TYPE_IO)
   1853 				break;
   1854 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1855 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1856 				i += 4;	/* skip high bits, too */
   1857 		}
   1858 		if (i < PCI_MAPREG_END) {
   1859 			/*
   1860 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1861 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1862 			 * It's no problem because newer chips has no this
   1863 			 * bug.
   1864 			 *
   1865 			 * The i8254x doesn't apparently respond when the
   1866 			 * I/O BAR is 0, which looks somewhat like it's not
   1867 			 * been configured.
   1868 			 */
   1869 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1870 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1871 				aprint_error_dev(sc->sc_dev,
   1872 				    "WARNING: I/O BAR at zero.\n");
   1873 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1874 					0, &sc->sc_iot, &sc->sc_ioh,
   1875 					NULL, &sc->sc_ios) == 0) {
   1876 				sc->sc_flags |= WM_F_IOH_VALID;
   1877 			} else
   1878 				aprint_error_dev(sc->sc_dev,
   1879 				    "WARNING: unable to map I/O space\n");
   1880 		}
   1881 
   1882 	}
   1883 
   1884 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1885 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1886 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1887 	if (sc->sc_type < WM_T_82542_2_1)
   1888 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1889 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1890 
   1891 	/* power up chip */
   1892 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1893 	    && error != EOPNOTSUPP) {
   1894 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1895 		return;
   1896 	}
   1897 
   1898 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1899 	/*
   1900 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1901 	 * resource.
   1902 	 */
   1903 	if (sc->sc_nqueues > 1) {
   1904 		max_type = PCI_INTR_TYPE_MSIX;
   1905 		/*
   1906 		 *  82583 has a MSI-X capability in the PCI configuration space
   1907 		 * but it doesn't support it. At least the document doesn't
   1908 		 * say anything about MSI-X.
   1909 		 */
   1910 		counts[PCI_INTR_TYPE_MSIX]
   1911 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1912 	} else {
   1913 		max_type = PCI_INTR_TYPE_MSI;
   1914 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1915 	}
   1916 
   1917 	/* Allocation settings */
   1918 	counts[PCI_INTR_TYPE_MSI] = 1;
   1919 	counts[PCI_INTR_TYPE_INTX] = 1;
   1920 	/* overridden by disable flags */
   1921 	if (wm_disable_msi != 0) {
   1922 		counts[PCI_INTR_TYPE_MSI] = 0;
   1923 		if (wm_disable_msix != 0) {
   1924 			max_type = PCI_INTR_TYPE_INTX;
   1925 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1926 		}
   1927 	} else if (wm_disable_msix != 0) {
   1928 		max_type = PCI_INTR_TYPE_MSI;
   1929 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1930 	}
   1931 
   1932 alloc_retry:
   1933 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1934 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1935 		return;
   1936 	}
   1937 
   1938 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1939 		error = wm_setup_msix(sc);
   1940 		if (error) {
   1941 			pci_intr_release(pc, sc->sc_intrs,
   1942 			    counts[PCI_INTR_TYPE_MSIX]);
   1943 
   1944 			/* Setup for MSI: Disable MSI-X */
   1945 			max_type = PCI_INTR_TYPE_MSI;
   1946 			counts[PCI_INTR_TYPE_MSI] = 1;
   1947 			counts[PCI_INTR_TYPE_INTX] = 1;
   1948 			goto alloc_retry;
   1949 		}
   1950 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1951 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1952 		error = wm_setup_legacy(sc);
   1953 		if (error) {
   1954 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1955 			    counts[PCI_INTR_TYPE_MSI]);
   1956 
   1957 			/* The next try is for INTx: Disable MSI */
   1958 			max_type = PCI_INTR_TYPE_INTX;
   1959 			counts[PCI_INTR_TYPE_INTX] = 1;
   1960 			goto alloc_retry;
   1961 		}
   1962 	} else {
   1963 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1964 		error = wm_setup_legacy(sc);
   1965 		if (error) {
   1966 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1967 			    counts[PCI_INTR_TYPE_INTX]);
   1968 			return;
   1969 		}
   1970 	}
   1971 
   1972 	/*
   1973 	 * Check the function ID (unit number of the chip).
   1974 	 */
   1975 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1976 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   1977 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1978 	    || (sc->sc_type == WM_T_82580)
   1979 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1980 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1981 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1982 	else
   1983 		sc->sc_funcid = 0;
   1984 
   1985 	/*
   1986 	 * Determine a few things about the bus we're connected to.
   1987 	 */
   1988 	if (sc->sc_type < WM_T_82543) {
   1989 		/* We don't really know the bus characteristics here. */
   1990 		sc->sc_bus_speed = 33;
   1991 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1992 		/*
   1993 		 * CSA (Communication Streaming Architecture) is about as fast
   1994 		 * a 32-bit 66MHz PCI Bus.
   1995 		 */
   1996 		sc->sc_flags |= WM_F_CSA;
   1997 		sc->sc_bus_speed = 66;
   1998 		aprint_verbose_dev(sc->sc_dev,
   1999 		    "Communication Streaming Architecture\n");
   2000 		if (sc->sc_type == WM_T_82547) {
   2001 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2002 			callout_setfunc(&sc->sc_txfifo_ch,
   2003 			    wm_82547_txfifo_stall, sc);
   2004 			aprint_verbose_dev(sc->sc_dev,
   2005 			    "using 82547 Tx FIFO stall work-around\n");
   2006 		}
   2007 	} else if (sc->sc_type >= WM_T_82571) {
   2008 		sc->sc_flags |= WM_F_PCIE;
   2009 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2010 		    && (sc->sc_type != WM_T_ICH10)
   2011 		    && (sc->sc_type != WM_T_PCH)
   2012 		    && (sc->sc_type != WM_T_PCH2)
   2013 		    && (sc->sc_type != WM_T_PCH_LPT)
   2014 		    && (sc->sc_type != WM_T_PCH_SPT)
   2015 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2016 			/* ICH* and PCH* have no PCIe capability registers */
   2017 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2018 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2019 				NULL) == 0)
   2020 				aprint_error_dev(sc->sc_dev,
   2021 				    "unable to find PCIe capability\n");
   2022 		}
   2023 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2024 	} else {
   2025 		reg = CSR_READ(sc, WMREG_STATUS);
   2026 		if (reg & STATUS_BUS64)
   2027 			sc->sc_flags |= WM_F_BUS64;
   2028 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2029 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2030 
   2031 			sc->sc_flags |= WM_F_PCIX;
   2032 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2033 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2034 				aprint_error_dev(sc->sc_dev,
   2035 				    "unable to find PCIX capability\n");
   2036 			else if (sc->sc_type != WM_T_82545_3 &&
   2037 				 sc->sc_type != WM_T_82546_3) {
   2038 				/*
   2039 				 * Work around a problem caused by the BIOS
   2040 				 * setting the max memory read byte count
   2041 				 * incorrectly.
   2042 				 */
   2043 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2044 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2045 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2046 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2047 
   2048 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2049 				    PCIX_CMD_BYTECNT_SHIFT;
   2050 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2051 				    PCIX_STATUS_MAXB_SHIFT;
   2052 				if (bytecnt > maxb) {
   2053 					aprint_verbose_dev(sc->sc_dev,
   2054 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2055 					    512 << bytecnt, 512 << maxb);
   2056 					pcix_cmd = (pcix_cmd &
   2057 					    ~PCIX_CMD_BYTECNT_MASK) |
   2058 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2059 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2060 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2061 					    pcix_cmd);
   2062 				}
   2063 			}
   2064 		}
   2065 		/*
   2066 		 * The quad port adapter is special; it has a PCIX-PCIX
   2067 		 * bridge on the board, and can run the secondary bus at
   2068 		 * a higher speed.
   2069 		 */
   2070 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2071 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2072 								      : 66;
   2073 		} else if (sc->sc_flags & WM_F_PCIX) {
   2074 			switch (reg & STATUS_PCIXSPD_MASK) {
   2075 			case STATUS_PCIXSPD_50_66:
   2076 				sc->sc_bus_speed = 66;
   2077 				break;
   2078 			case STATUS_PCIXSPD_66_100:
   2079 				sc->sc_bus_speed = 100;
   2080 				break;
   2081 			case STATUS_PCIXSPD_100_133:
   2082 				sc->sc_bus_speed = 133;
   2083 				break;
   2084 			default:
   2085 				aprint_error_dev(sc->sc_dev,
   2086 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2087 				    reg & STATUS_PCIXSPD_MASK);
   2088 				sc->sc_bus_speed = 66;
   2089 				break;
   2090 			}
   2091 		} else
   2092 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2093 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2094 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2095 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2096 	}
   2097 
   2098 	/* Disable ASPM L0s and/or L1 for workaround */
   2099 	wm_disable_aspm(sc);
   2100 
   2101 	/* clear interesting stat counters */
   2102 	CSR_READ(sc, WMREG_COLC);
   2103 	CSR_READ(sc, WMREG_RXERRC);
   2104 
   2105 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2106 	    || (sc->sc_type >= WM_T_ICH8))
   2107 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2108 	if (sc->sc_type >= WM_T_ICH8)
   2109 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2110 
   2111 	/* Set PHY, NVM mutex related stuff */
   2112 	switch (sc->sc_type) {
   2113 	case WM_T_82542_2_0:
   2114 	case WM_T_82542_2_1:
   2115 	case WM_T_82543:
   2116 	case WM_T_82544:
   2117 		/* Microwire */
   2118 		sc->nvm.read = wm_nvm_read_uwire;
   2119 		sc->sc_nvm_wordsize = 64;
   2120 		sc->sc_nvm_addrbits = 6;
   2121 		break;
   2122 	case WM_T_82540:
   2123 	case WM_T_82545:
   2124 	case WM_T_82545_3:
   2125 	case WM_T_82546:
   2126 	case WM_T_82546_3:
   2127 		/* Microwire */
   2128 		sc->nvm.read = wm_nvm_read_uwire;
   2129 		reg = CSR_READ(sc, WMREG_EECD);
   2130 		if (reg & EECD_EE_SIZE) {
   2131 			sc->sc_nvm_wordsize = 256;
   2132 			sc->sc_nvm_addrbits = 8;
   2133 		} else {
   2134 			sc->sc_nvm_wordsize = 64;
   2135 			sc->sc_nvm_addrbits = 6;
   2136 		}
   2137 		sc->sc_flags |= WM_F_LOCK_EECD;
   2138 		sc->nvm.acquire = wm_get_eecd;
   2139 		sc->nvm.release = wm_put_eecd;
   2140 		break;
   2141 	case WM_T_82541:
   2142 	case WM_T_82541_2:
   2143 	case WM_T_82547:
   2144 	case WM_T_82547_2:
   2145 		reg = CSR_READ(sc, WMREG_EECD);
   2146 		/*
   2147 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2148 		 * on 8254[17], so set flags and functios before calling it.
   2149 		 */
   2150 		sc->sc_flags |= WM_F_LOCK_EECD;
   2151 		sc->nvm.acquire = wm_get_eecd;
   2152 		sc->nvm.release = wm_put_eecd;
   2153 		if (reg & EECD_EE_TYPE) {
   2154 			/* SPI */
   2155 			sc->nvm.read = wm_nvm_read_spi;
   2156 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2157 			wm_nvm_set_addrbits_size_eecd(sc);
   2158 		} else {
   2159 			/* Microwire */
   2160 			sc->nvm.read = wm_nvm_read_uwire;
   2161 			if ((reg & EECD_EE_ABITS) != 0) {
   2162 				sc->sc_nvm_wordsize = 256;
   2163 				sc->sc_nvm_addrbits = 8;
   2164 			} else {
   2165 				sc->sc_nvm_wordsize = 64;
   2166 				sc->sc_nvm_addrbits = 6;
   2167 			}
   2168 		}
   2169 		break;
   2170 	case WM_T_82571:
   2171 	case WM_T_82572:
   2172 		/* SPI */
   2173 		sc->nvm.read = wm_nvm_read_eerd;
   2174 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2175 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2176 		wm_nvm_set_addrbits_size_eecd(sc);
   2177 		sc->phy.acquire = wm_get_swsm_semaphore;
   2178 		sc->phy.release = wm_put_swsm_semaphore;
   2179 		sc->nvm.acquire = wm_get_nvm_82571;
   2180 		sc->nvm.release = wm_put_nvm_82571;
   2181 		break;
   2182 	case WM_T_82573:
   2183 	case WM_T_82574:
   2184 	case WM_T_82583:
   2185 		sc->nvm.read = wm_nvm_read_eerd;
   2186 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2187 		if (sc->sc_type == WM_T_82573) {
   2188 			sc->phy.acquire = wm_get_swsm_semaphore;
   2189 			sc->phy.release = wm_put_swsm_semaphore;
   2190 			sc->nvm.acquire = wm_get_nvm_82571;
   2191 			sc->nvm.release = wm_put_nvm_82571;
   2192 		} else {
   2193 			/* Both PHY and NVM use the same semaphore. */
   2194 			sc->phy.acquire = sc->nvm.acquire
   2195 			    = wm_get_swfwhw_semaphore;
   2196 			sc->phy.release = sc->nvm.release
   2197 			    = wm_put_swfwhw_semaphore;
   2198 		}
   2199 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2200 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2201 			sc->sc_nvm_wordsize = 2048;
   2202 		} else {
   2203 			/* SPI */
   2204 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2205 			wm_nvm_set_addrbits_size_eecd(sc);
   2206 		}
   2207 		break;
   2208 	case WM_T_82575:
   2209 	case WM_T_82576:
   2210 	case WM_T_82580:
   2211 	case WM_T_I350:
   2212 	case WM_T_I354:
   2213 	case WM_T_80003:
   2214 		/* SPI */
   2215 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2216 		wm_nvm_set_addrbits_size_eecd(sc);
   2217 		if ((sc->sc_type == WM_T_80003)
   2218 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2219 			sc->nvm.read = wm_nvm_read_eerd;
   2220 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2221 		} else {
   2222 			sc->nvm.read = wm_nvm_read_spi;
   2223 			sc->sc_flags |= WM_F_LOCK_EECD;
   2224 		}
   2225 		sc->phy.acquire = wm_get_phy_82575;
   2226 		sc->phy.release = wm_put_phy_82575;
   2227 		sc->nvm.acquire = wm_get_nvm_80003;
   2228 		sc->nvm.release = wm_put_nvm_80003;
   2229 		break;
   2230 	case WM_T_ICH8:
   2231 	case WM_T_ICH9:
   2232 	case WM_T_ICH10:
   2233 	case WM_T_PCH:
   2234 	case WM_T_PCH2:
   2235 	case WM_T_PCH_LPT:
   2236 		sc->nvm.read = wm_nvm_read_ich8;
   2237 		/* FLASH */
   2238 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2239 		sc->sc_nvm_wordsize = 2048;
   2240 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2241 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2242 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2243 			aprint_error_dev(sc->sc_dev,
   2244 			    "can't map FLASH registers\n");
   2245 			goto out;
   2246 		}
   2247 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2248 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2249 		    ICH_FLASH_SECTOR_SIZE;
   2250 		sc->sc_ich8_flash_bank_size =
   2251 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2252 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2253 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2254 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2255 		sc->sc_flashreg_offset = 0;
   2256 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2257 		sc->phy.release = wm_put_swflag_ich8lan;
   2258 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2259 		sc->nvm.release = wm_put_nvm_ich8lan;
   2260 		break;
   2261 	case WM_T_PCH_SPT:
   2262 	case WM_T_PCH_CNP:
   2263 		sc->nvm.read = wm_nvm_read_spt;
   2264 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2265 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2266 		sc->sc_flasht = sc->sc_st;
   2267 		sc->sc_flashh = sc->sc_sh;
   2268 		sc->sc_ich8_flash_base = 0;
   2269 		sc->sc_nvm_wordsize =
   2270 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2271 		    * NVM_SIZE_MULTIPLIER;
   2272 		/* It is size in bytes, we want words */
   2273 		sc->sc_nvm_wordsize /= 2;
   2274 		/* assume 2 banks */
   2275 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2276 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2277 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2278 		sc->phy.release = wm_put_swflag_ich8lan;
   2279 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2280 		sc->nvm.release = wm_put_nvm_ich8lan;
   2281 		break;
   2282 	case WM_T_I210:
   2283 	case WM_T_I211:
   2284 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2285 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2286 		if (wm_nvm_flash_presence_i210(sc)) {
   2287 			sc->nvm.read = wm_nvm_read_eerd;
   2288 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2289 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2290 			wm_nvm_set_addrbits_size_eecd(sc);
   2291 		} else {
   2292 			sc->nvm.read = wm_nvm_read_invm;
   2293 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2294 			sc->sc_nvm_wordsize = INVM_SIZE;
   2295 		}
   2296 		sc->phy.acquire = wm_get_phy_82575;
   2297 		sc->phy.release = wm_put_phy_82575;
   2298 		sc->nvm.acquire = wm_get_nvm_80003;
   2299 		sc->nvm.release = wm_put_nvm_80003;
   2300 		break;
   2301 	default:
   2302 		break;
   2303 	}
   2304 
   2305 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2306 	switch (sc->sc_type) {
   2307 	case WM_T_82571:
   2308 	case WM_T_82572:
   2309 		reg = CSR_READ(sc, WMREG_SWSM2);
   2310 		if ((reg & SWSM2_LOCK) == 0) {
   2311 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2312 			force_clear_smbi = true;
   2313 		} else
   2314 			force_clear_smbi = false;
   2315 		break;
   2316 	case WM_T_82573:
   2317 	case WM_T_82574:
   2318 	case WM_T_82583:
   2319 		force_clear_smbi = true;
   2320 		break;
   2321 	default:
   2322 		force_clear_smbi = false;
   2323 		break;
   2324 	}
   2325 	if (force_clear_smbi) {
   2326 		reg = CSR_READ(sc, WMREG_SWSM);
   2327 		if ((reg & SWSM_SMBI) != 0)
   2328 			aprint_error_dev(sc->sc_dev,
   2329 			    "Please update the Bootagent\n");
   2330 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2331 	}
   2332 
   2333 	/*
   2334 	 * Defer printing the EEPROM type until after verifying the checksum
   2335 	 * This allows the EEPROM type to be printed correctly in the case
   2336 	 * that no EEPROM is attached.
   2337 	 */
   2338 	/*
   2339 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2340 	 * this for later, so we can fail future reads from the EEPROM.
   2341 	 */
   2342 	if (wm_nvm_validate_checksum(sc)) {
   2343 		/*
   2344 		 * Read twice again because some PCI-e parts fail the
   2345 		 * first check due to the link being in sleep state.
   2346 		 */
   2347 		if (wm_nvm_validate_checksum(sc))
   2348 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2349 	}
   2350 
   2351 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2352 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2353 	else {
   2354 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2355 		    sc->sc_nvm_wordsize);
   2356 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2357 			aprint_verbose("iNVM");
   2358 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2359 			aprint_verbose("FLASH(HW)");
   2360 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2361 			aprint_verbose("FLASH");
   2362 		else {
   2363 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2364 				eetype = "SPI";
   2365 			else
   2366 				eetype = "MicroWire";
   2367 			aprint_verbose("(%d address bits) %s EEPROM",
   2368 			    sc->sc_nvm_addrbits, eetype);
   2369 		}
   2370 	}
   2371 	wm_nvm_version(sc);
   2372 	aprint_verbose("\n");
   2373 
   2374 	/*
   2375 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2376 	 * incorrect.
   2377 	 */
   2378 	wm_gmii_setup_phytype(sc, 0, 0);
   2379 
   2380 	/* Reset the chip to a known state. */
   2381 	wm_reset(sc);
   2382 
   2383 	/*
   2384 	 * Check for I21[01] PLL workaround.
   2385 	 *
   2386 	 * Three cases:
   2387 	 * a) Chip is I211.
   2388 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2389 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2390 	 */
   2391 	if (sc->sc_type == WM_T_I211)
   2392 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2393 	if (sc->sc_type == WM_T_I210) {
   2394 		if (!wm_nvm_flash_presence_i210(sc))
   2395 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2396 		else if ((sc->sc_nvm_ver_major < 3)
   2397 		    || ((sc->sc_nvm_ver_major == 3)
   2398 			&& (sc->sc_nvm_ver_minor < 25))) {
   2399 			aprint_verbose_dev(sc->sc_dev,
   2400 			    "ROM image version %d.%d is older than 3.25\n",
   2401 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2402 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2403 		}
   2404 	}
   2405 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2406 		wm_pll_workaround_i210(sc);
   2407 
   2408 	wm_get_wakeup(sc);
   2409 
   2410 	/* Non-AMT based hardware can now take control from firmware */
   2411 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2412 		wm_get_hw_control(sc);
   2413 
   2414 	/*
   2415 	 * Read the Ethernet address from the EEPROM, if not first found
   2416 	 * in device properties.
   2417 	 */
   2418 	ea = prop_dictionary_get(dict, "mac-address");
   2419 	if (ea != NULL) {
   2420 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2421 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2422 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2423 	} else {
   2424 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2425 			aprint_error_dev(sc->sc_dev,
   2426 			    "unable to read Ethernet address\n");
   2427 			goto out;
   2428 		}
   2429 	}
   2430 
   2431 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2432 	    ether_sprintf(enaddr));
   2433 
   2434 	/*
   2435 	 * Read the config info from the EEPROM, and set up various
   2436 	 * bits in the control registers based on their contents.
   2437 	 */
   2438 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2439 	if (pn != NULL) {
   2440 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2441 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2442 	} else {
   2443 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2444 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2445 			goto out;
   2446 		}
   2447 	}
   2448 
   2449 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2450 	if (pn != NULL) {
   2451 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2452 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2453 	} else {
   2454 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2455 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2456 			goto out;
   2457 		}
   2458 	}
   2459 
   2460 	/* check for WM_F_WOL */
   2461 	switch (sc->sc_type) {
   2462 	case WM_T_82542_2_0:
   2463 	case WM_T_82542_2_1:
   2464 	case WM_T_82543:
   2465 		/* dummy? */
   2466 		eeprom_data = 0;
   2467 		apme_mask = NVM_CFG3_APME;
   2468 		break;
   2469 	case WM_T_82544:
   2470 		apme_mask = NVM_CFG2_82544_APM_EN;
   2471 		eeprom_data = cfg2;
   2472 		break;
   2473 	case WM_T_82546:
   2474 	case WM_T_82546_3:
   2475 	case WM_T_82571:
   2476 	case WM_T_82572:
   2477 	case WM_T_82573:
   2478 	case WM_T_82574:
   2479 	case WM_T_82583:
   2480 	case WM_T_80003:
   2481 	default:
   2482 		apme_mask = NVM_CFG3_APME;
   2483 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2484 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2485 		break;
   2486 	case WM_T_82575:
   2487 	case WM_T_82576:
   2488 	case WM_T_82580:
   2489 	case WM_T_I350:
   2490 	case WM_T_I354: /* XXX ok? */
   2491 	case WM_T_ICH8:
   2492 	case WM_T_ICH9:
   2493 	case WM_T_ICH10:
   2494 	case WM_T_PCH:
   2495 	case WM_T_PCH2:
   2496 	case WM_T_PCH_LPT:
   2497 	case WM_T_PCH_SPT:
   2498 	case WM_T_PCH_CNP:
   2499 		/* XXX The funcid should be checked on some devices */
   2500 		apme_mask = WUC_APME;
   2501 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2502 		break;
   2503 	}
   2504 
   2505 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2506 	if ((eeprom_data & apme_mask) != 0)
   2507 		sc->sc_flags |= WM_F_WOL;
   2508 
   2509 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2510 		/* Check NVM for autonegotiation */
   2511 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2512 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2513 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2514 		}
   2515 	}
   2516 
   2517 	/*
   2518 	 * XXX need special handling for some multiple port cards
   2519 	 * to disable a paticular port.
   2520 	 */
   2521 
   2522 	if (sc->sc_type >= WM_T_82544) {
   2523 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2524 		if (pn != NULL) {
   2525 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2526 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2527 		} else {
   2528 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2529 				aprint_error_dev(sc->sc_dev,
   2530 				    "unable to read SWDPIN\n");
   2531 				goto out;
   2532 			}
   2533 		}
   2534 	}
   2535 
   2536 	if (cfg1 & NVM_CFG1_ILOS)
   2537 		sc->sc_ctrl |= CTRL_ILOS;
   2538 
   2539 	/*
   2540 	 * XXX
   2541 	 * This code isn't correct because pin 2 and 3 are located
   2542 	 * in different position on newer chips. Check all datasheet.
   2543 	 *
   2544 	 * Until resolve this problem, check if a chip < 82580
   2545 	 */
   2546 	if (sc->sc_type <= WM_T_82580) {
   2547 		if (sc->sc_type >= WM_T_82544) {
   2548 			sc->sc_ctrl |=
   2549 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2550 			    CTRL_SWDPIO_SHIFT;
   2551 			sc->sc_ctrl |=
   2552 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2553 			    CTRL_SWDPINS_SHIFT;
   2554 		} else {
   2555 			sc->sc_ctrl |=
   2556 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2557 			    CTRL_SWDPIO_SHIFT;
   2558 		}
   2559 	}
   2560 
   2561 	/* XXX For other than 82580? */
   2562 	if (sc->sc_type == WM_T_82580) {
   2563 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2564 		if (nvmword & __BIT(13))
   2565 			sc->sc_ctrl |= CTRL_ILOS;
   2566 	}
   2567 
   2568 #if 0
   2569 	if (sc->sc_type >= WM_T_82544) {
   2570 		if (cfg1 & NVM_CFG1_IPS0)
   2571 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2572 		if (cfg1 & NVM_CFG1_IPS1)
   2573 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2574 		sc->sc_ctrl_ext |=
   2575 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2576 		    CTRL_EXT_SWDPIO_SHIFT;
   2577 		sc->sc_ctrl_ext |=
   2578 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2579 		    CTRL_EXT_SWDPINS_SHIFT;
   2580 	} else {
   2581 		sc->sc_ctrl_ext |=
   2582 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2583 		    CTRL_EXT_SWDPIO_SHIFT;
   2584 	}
   2585 #endif
   2586 
   2587 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2588 #if 0
   2589 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2590 #endif
   2591 
   2592 	if (sc->sc_type == WM_T_PCH) {
   2593 		uint16_t val;
   2594 
   2595 		/* Save the NVM K1 bit setting */
   2596 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2597 
   2598 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2599 			sc->sc_nvm_k1_enabled = 1;
   2600 		else
   2601 			sc->sc_nvm_k1_enabled = 0;
   2602 	}
   2603 
   2604 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2605 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2606 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2607 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2608 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2609 	    || sc->sc_type == WM_T_82573
   2610 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2611 		/* Copper only */
   2612 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2613 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2614 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2615 	    || (sc->sc_type ==WM_T_I211)) {
   2616 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2617 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2618 		switch (link_mode) {
   2619 		case CTRL_EXT_LINK_MODE_1000KX:
   2620 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2621 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2622 			break;
   2623 		case CTRL_EXT_LINK_MODE_SGMII:
   2624 			if (wm_sgmii_uses_mdio(sc)) {
   2625 				aprint_verbose_dev(sc->sc_dev,
   2626 				    "SGMII(MDIO)\n");
   2627 				sc->sc_flags |= WM_F_SGMII;
   2628 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2629 				break;
   2630 			}
   2631 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2632 			/*FALLTHROUGH*/
   2633 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2634 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2635 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2636 				if (link_mode
   2637 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2638 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2639 					sc->sc_flags |= WM_F_SGMII;
   2640 				} else {
   2641 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2642 					aprint_verbose_dev(sc->sc_dev,
   2643 					    "SERDES\n");
   2644 				}
   2645 				break;
   2646 			}
   2647 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2648 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2649 
   2650 			/* Change current link mode setting */
   2651 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2652 			switch (sc->sc_mediatype) {
   2653 			case WM_MEDIATYPE_COPPER:
   2654 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2655 				break;
   2656 			case WM_MEDIATYPE_SERDES:
   2657 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2658 				break;
   2659 			default:
   2660 				break;
   2661 			}
   2662 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2663 			break;
   2664 		case CTRL_EXT_LINK_MODE_GMII:
   2665 		default:
   2666 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2667 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2668 			break;
   2669 		}
   2670 
   2671 		reg &= ~CTRL_EXT_I2C_ENA;
   2672 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2673 			reg |= CTRL_EXT_I2C_ENA;
   2674 		else
   2675 			reg &= ~CTRL_EXT_I2C_ENA;
   2676 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2677 	} else if (sc->sc_type < WM_T_82543 ||
   2678 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2679 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2680 			aprint_error_dev(sc->sc_dev,
   2681 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2682 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2683 		}
   2684 	} else {
   2685 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2686 			aprint_error_dev(sc->sc_dev,
   2687 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2688 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2689 		}
   2690 	}
   2691 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2692 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2693 
   2694 	/* Set device properties (macflags) */
   2695 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2696 
   2697 	/* Initialize the media structures accordingly. */
   2698 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2699 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2700 	else
   2701 		wm_tbi_mediainit(sc); /* All others */
   2702 
   2703 	ifp = &sc->sc_ethercom.ec_if;
   2704 	xname = device_xname(sc->sc_dev);
   2705 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2706 	ifp->if_softc = sc;
   2707 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2708 #ifdef WM_MPSAFE
   2709 	ifp->if_extflags = IFEF_MPSAFE;
   2710 #endif
   2711 	ifp->if_ioctl = wm_ioctl;
   2712 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2713 		ifp->if_start = wm_nq_start;
   2714 		/*
   2715 		 * When the number of CPUs is one and the controller can use
   2716 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2717 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2718 		 * and the other is used for link status changing.
   2719 		 * In this situation, wm_nq_transmit() is disadvantageous
   2720 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2721 		 */
   2722 		if (wm_is_using_multiqueue(sc))
   2723 			ifp->if_transmit = wm_nq_transmit;
   2724 	} else {
   2725 		ifp->if_start = wm_start;
   2726 		/*
   2727 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2728 		 */
   2729 		if (wm_is_using_multiqueue(sc))
   2730 			ifp->if_transmit = wm_transmit;
   2731 	}
   2732 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2733 	ifp->if_init = wm_init;
   2734 	ifp->if_stop = wm_stop;
   2735 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2736 	IFQ_SET_READY(&ifp->if_snd);
   2737 
   2738 	/* Check for jumbo frame */
   2739 	switch (sc->sc_type) {
   2740 	case WM_T_82573:
   2741 		/* XXX limited to 9234 if ASPM is disabled */
   2742 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2743 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2744 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2745 		break;
   2746 	case WM_T_82571:
   2747 	case WM_T_82572:
   2748 	case WM_T_82574:
   2749 	case WM_T_82583:
   2750 	case WM_T_82575:
   2751 	case WM_T_82576:
   2752 	case WM_T_82580:
   2753 	case WM_T_I350:
   2754 	case WM_T_I354:
   2755 	case WM_T_I210:
   2756 	case WM_T_I211:
   2757 	case WM_T_80003:
   2758 	case WM_T_ICH9:
   2759 	case WM_T_ICH10:
   2760 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2761 	case WM_T_PCH_LPT:
   2762 	case WM_T_PCH_SPT:
   2763 	case WM_T_PCH_CNP:
   2764 		/* XXX limited to 9234 */
   2765 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2766 		break;
   2767 	case WM_T_PCH:
   2768 		/* XXX limited to 4096 */
   2769 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2770 		break;
   2771 	case WM_T_82542_2_0:
   2772 	case WM_T_82542_2_1:
   2773 	case WM_T_ICH8:
   2774 		/* No support for jumbo frame */
   2775 		break;
   2776 	default:
   2777 		/* ETHER_MAX_LEN_JUMBO */
   2778 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2779 		break;
   2780 	}
   2781 
   2782 	/* If we're a i82543 or greater, we can support VLANs. */
   2783 	if (sc->sc_type >= WM_T_82543)
   2784 		sc->sc_ethercom.ec_capabilities |=
   2785 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2786 
   2787 	/*
   2788 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2789 	 * on i82543 and later.
   2790 	 */
   2791 	if (sc->sc_type >= WM_T_82543) {
   2792 		ifp->if_capabilities |=
   2793 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2794 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2795 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2796 		    IFCAP_CSUM_TCPv6_Tx |
   2797 		    IFCAP_CSUM_UDPv6_Tx;
   2798 	}
   2799 
   2800 	/*
   2801 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2802 	 *
   2803 	 *	82541GI (8086:1076) ... no
   2804 	 *	82572EI (8086:10b9) ... yes
   2805 	 */
   2806 	if (sc->sc_type >= WM_T_82571) {
   2807 		ifp->if_capabilities |=
   2808 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2809 	}
   2810 
   2811 	/*
   2812 	 * If we're a i82544 or greater (except i82547), we can do
   2813 	 * TCP segmentation offload.
   2814 	 */
   2815 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2816 		ifp->if_capabilities |= IFCAP_TSOv4;
   2817 	}
   2818 
   2819 	if (sc->sc_type >= WM_T_82571) {
   2820 		ifp->if_capabilities |= IFCAP_TSOv6;
   2821 	}
   2822 
   2823 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2824 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2825 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2826 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2827 
   2828 #ifdef WM_MPSAFE
   2829 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2830 #else
   2831 	sc->sc_core_lock = NULL;
   2832 #endif
   2833 
   2834 	/* Attach the interface. */
   2835 	error = if_initialize(ifp);
   2836 	if (error != 0) {
   2837 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2838 		    error);
   2839 		return; /* Error */
   2840 	}
   2841 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2842 	ether_ifattach(ifp, enaddr);
   2843 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2844 	if_register(ifp);
   2845 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2846 	    RND_FLAG_DEFAULT);
   2847 
   2848 #ifdef WM_EVENT_COUNTERS
   2849 	/* Attach event counters. */
   2850 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2851 	    NULL, xname, "linkintr");
   2852 
   2853 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2854 	    NULL, xname, "tx_xoff");
   2855 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2856 	    NULL, xname, "tx_xon");
   2857 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2858 	    NULL, xname, "rx_xoff");
   2859 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2860 	    NULL, xname, "rx_xon");
   2861 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2862 	    NULL, xname, "rx_macctl");
   2863 #endif /* WM_EVENT_COUNTERS */
   2864 
   2865 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2866 		pmf_class_network_register(self, ifp);
   2867 	else
   2868 		aprint_error_dev(self, "couldn't establish power handler\n");
   2869 
   2870 	sc->sc_flags |= WM_F_ATTACHED;
   2871  out:
   2872 	return;
   2873 }
   2874 
   2875 /* The detach function (ca_detach) */
   2876 static int
   2877 wm_detach(device_t self, int flags __unused)
   2878 {
   2879 	struct wm_softc *sc = device_private(self);
   2880 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2881 	int i;
   2882 
   2883 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2884 		return 0;
   2885 
   2886 	/* Stop the interface. Callouts are stopped in it. */
   2887 	wm_stop(ifp, 1);
   2888 
   2889 	pmf_device_deregister(self);
   2890 
   2891 #ifdef WM_EVENT_COUNTERS
   2892 	evcnt_detach(&sc->sc_ev_linkintr);
   2893 
   2894 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2895 	evcnt_detach(&sc->sc_ev_tx_xon);
   2896 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2897 	evcnt_detach(&sc->sc_ev_rx_xon);
   2898 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2899 #endif /* WM_EVENT_COUNTERS */
   2900 
   2901 	/* Tell the firmware about the release */
   2902 	WM_CORE_LOCK(sc);
   2903 	wm_release_manageability(sc);
   2904 	wm_release_hw_control(sc);
   2905 	wm_enable_wakeup(sc);
   2906 	WM_CORE_UNLOCK(sc);
   2907 
   2908 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2909 
   2910 	/* Delete all remaining media. */
   2911 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2912 
   2913 	ether_ifdetach(ifp);
   2914 	if_detach(ifp);
   2915 	if_percpuq_destroy(sc->sc_ipq);
   2916 
   2917 	/* Unload RX dmamaps and free mbufs */
   2918 	for (i = 0; i < sc->sc_nqueues; i++) {
   2919 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2920 		mutex_enter(rxq->rxq_lock);
   2921 		wm_rxdrain(rxq);
   2922 		mutex_exit(rxq->rxq_lock);
   2923 	}
   2924 	/* Must unlock here */
   2925 
   2926 	/* Disestablish the interrupt handler */
   2927 	for (i = 0; i < sc->sc_nintrs; i++) {
   2928 		if (sc->sc_ihs[i] != NULL) {
   2929 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2930 			sc->sc_ihs[i] = NULL;
   2931 		}
   2932 	}
   2933 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2934 
   2935 	wm_free_txrx_queues(sc);
   2936 
   2937 	/* Unmap the registers */
   2938 	if (sc->sc_ss) {
   2939 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2940 		sc->sc_ss = 0;
   2941 	}
   2942 	if (sc->sc_ios) {
   2943 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2944 		sc->sc_ios = 0;
   2945 	}
   2946 	if (sc->sc_flashs) {
   2947 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2948 		sc->sc_flashs = 0;
   2949 	}
   2950 
   2951 	if (sc->sc_core_lock)
   2952 		mutex_obj_free(sc->sc_core_lock);
   2953 	if (sc->sc_ich_phymtx)
   2954 		mutex_obj_free(sc->sc_ich_phymtx);
   2955 	if (sc->sc_ich_nvmmtx)
   2956 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2957 
   2958 	return 0;
   2959 }
   2960 
   2961 static bool
   2962 wm_suspend(device_t self, const pmf_qual_t *qual)
   2963 {
   2964 	struct wm_softc *sc = device_private(self);
   2965 
   2966 	wm_release_manageability(sc);
   2967 	wm_release_hw_control(sc);
   2968 	wm_enable_wakeup(sc);
   2969 
   2970 	return true;
   2971 }
   2972 
   2973 static bool
   2974 wm_resume(device_t self, const pmf_qual_t *qual)
   2975 {
   2976 	struct wm_softc *sc = device_private(self);
   2977 
   2978 	/* Disable ASPM L0s and/or L1 for workaround */
   2979 	wm_disable_aspm(sc);
   2980 	wm_init_manageability(sc);
   2981 
   2982 	return true;
   2983 }
   2984 
   2985 /*
   2986  * wm_watchdog:		[ifnet interface function]
   2987  *
   2988  *	Watchdog timer handler.
   2989  */
   2990 static void
   2991 wm_watchdog(struct ifnet *ifp)
   2992 {
   2993 	int qid;
   2994 	struct wm_softc *sc = ifp->if_softc;
   2995 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   2996 
   2997 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2998 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2999 
   3000 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3001 	}
   3002 
   3003 	/*
   3004 	 * IF any of queues hanged up, reset the interface.
   3005 	 */
   3006 	if (hang_queue != 0) {
   3007 		(void) wm_init(ifp);
   3008 
   3009 		/*
   3010 		 * There are still some upper layer processing which call
   3011 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3012 		 */
   3013 		/* Try to get more packets going. */
   3014 		ifp->if_start(ifp);
   3015 	}
   3016 }
   3017 
   3018 
   3019 static void
   3020 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3021 {
   3022 
   3023 	mutex_enter(txq->txq_lock);
   3024 	if (txq->txq_sending &&
   3025 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
   3026 		wm_watchdog_txq_locked(ifp, txq, hang);
   3027 	}
   3028 	mutex_exit(txq->txq_lock);
   3029 }
   3030 
   3031 static void
   3032 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3033     uint16_t *hang)
   3034 {
   3035 	struct wm_softc *sc = ifp->if_softc;
   3036 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3037 
   3038 	KASSERT(mutex_owned(txq->txq_lock));
   3039 
   3040 	/*
   3041 	 * Since we're using delayed interrupts, sweep up
   3042 	 * before we report an error.
   3043 	 */
   3044 	wm_txeof(txq, UINT_MAX);
   3045 
   3046 	if (txq->txq_sending)
   3047 		*hang |= __BIT(wmq->wmq_id);
   3048 
   3049 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3050 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3051 		    device_xname(sc->sc_dev));
   3052 	} else {
   3053 #ifdef WM_DEBUG
   3054 		int i, j;
   3055 		struct wm_txsoft *txs;
   3056 #endif
   3057 		log(LOG_ERR,
   3058 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3059 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3060 		    txq->txq_next);
   3061 		ifp->if_oerrors++;
   3062 #ifdef WM_DEBUG
   3063 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3064 		    i = WM_NEXTTXS(txq, i)) {
   3065 		    txs = &txq->txq_soft[i];
   3066 		    printf("txs %d tx %d -> %d\n",
   3067 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3068 		    for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3069 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3070 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3071 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3072 				    printf("\t %#08x%08x\n",
   3073 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3074 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3075 			    } else {
   3076 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3077 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3078 					txq->txq_descs[j].wtx_addr.wa_low);
   3079 				    printf("\t %#04x%02x%02x%08x\n",
   3080 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3081 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3082 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3083 					txq->txq_descs[j].wtx_cmdlen);
   3084 			    }
   3085 			if (j == txs->txs_lastdesc)
   3086 				break;
   3087 			}
   3088 		}
   3089 #endif
   3090 	}
   3091 }
   3092 
   3093 /*
   3094  * wm_tick:
   3095  *
   3096  *	One second timer, used to check link status, sweep up
   3097  *	completed transmit jobs, etc.
   3098  */
   3099 static void
   3100 wm_tick(void *arg)
   3101 {
   3102 	struct wm_softc *sc = arg;
   3103 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3104 #ifndef WM_MPSAFE
   3105 	int s = splnet();
   3106 #endif
   3107 
   3108 	WM_CORE_LOCK(sc);
   3109 
   3110 	if (sc->sc_core_stopping) {
   3111 		WM_CORE_UNLOCK(sc);
   3112 #ifndef WM_MPSAFE
   3113 		splx(s);
   3114 #endif
   3115 		return;
   3116 	}
   3117 
   3118 	if (sc->sc_type >= WM_T_82542_2_1) {
   3119 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3120 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3121 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3122 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3123 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3124 	}
   3125 
   3126 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3127 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3128 	    + CSR_READ(sc, WMREG_CRCERRS)
   3129 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3130 	    + CSR_READ(sc, WMREG_SYMERRC)
   3131 	    + CSR_READ(sc, WMREG_RXERRC)
   3132 	    + CSR_READ(sc, WMREG_SEC)
   3133 	    + CSR_READ(sc, WMREG_CEXTERR)
   3134 	    + CSR_READ(sc, WMREG_RLEC);
   3135 	/*
   3136 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3137 	 * memory. It does not mean the number of dropped packet. Because
   3138 	 * ethernet controller can receive packets in such case if there is
   3139 	 * space in phy's FIFO.
   3140 	 *
   3141 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3142 	 * own EVCNT instead of if_iqdrops.
   3143 	 */
   3144 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3145 
   3146 	if (sc->sc_flags & WM_F_HAS_MII)
   3147 		mii_tick(&sc->sc_mii);
   3148 	else if ((sc->sc_type >= WM_T_82575)
   3149 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3150 		wm_serdes_tick(sc);
   3151 	else
   3152 		wm_tbi_tick(sc);
   3153 
   3154 	WM_CORE_UNLOCK(sc);
   3155 
   3156 	wm_watchdog(ifp);
   3157 
   3158 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3159 }
   3160 
   3161 static int
   3162 wm_ifflags_cb(struct ethercom *ec)
   3163 {
   3164 	struct ifnet *ifp = &ec->ec_if;
   3165 	struct wm_softc *sc = ifp->if_softc;
   3166 	int rc = 0;
   3167 
   3168 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3169 		device_xname(sc->sc_dev), __func__));
   3170 
   3171 	WM_CORE_LOCK(sc);
   3172 
   3173 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3174 	sc->sc_if_flags = ifp->if_flags;
   3175 
   3176 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3177 		rc = ENETRESET;
   3178 		goto out;
   3179 	}
   3180 
   3181 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3182 		wm_set_filter(sc);
   3183 
   3184 	wm_set_vlan(sc);
   3185 
   3186 out:
   3187 	WM_CORE_UNLOCK(sc);
   3188 
   3189 	return rc;
   3190 }
   3191 
   3192 /*
   3193  * wm_ioctl:		[ifnet interface function]
   3194  *
   3195  *	Handle control requests from the operator.
   3196  */
   3197 static int
   3198 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3199 {
   3200 	struct wm_softc *sc = ifp->if_softc;
   3201 	struct ifreq *ifr = (struct ifreq *) data;
   3202 	struct ifaddr *ifa = (struct ifaddr *)data;
   3203 	struct sockaddr_dl *sdl;
   3204 	int s, error;
   3205 
   3206 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3207 		device_xname(sc->sc_dev), __func__));
   3208 
   3209 #ifndef WM_MPSAFE
   3210 	s = splnet();
   3211 #endif
   3212 	switch (cmd) {
   3213 	case SIOCSIFMEDIA:
   3214 	case SIOCGIFMEDIA:
   3215 		WM_CORE_LOCK(sc);
   3216 		/* Flow control requires full-duplex mode. */
   3217 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3218 		    (ifr->ifr_media & IFM_FDX) == 0)
   3219 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3220 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3221 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3222 				/* We can do both TXPAUSE and RXPAUSE. */
   3223 				ifr->ifr_media |=
   3224 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3225 			}
   3226 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3227 		}
   3228 		WM_CORE_UNLOCK(sc);
   3229 #ifdef WM_MPSAFE
   3230 		s = splnet();
   3231 #endif
   3232 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3233 #ifdef WM_MPSAFE
   3234 		splx(s);
   3235 #endif
   3236 		break;
   3237 	case SIOCINITIFADDR:
   3238 		WM_CORE_LOCK(sc);
   3239 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3240 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3241 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3242 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3243 			/* unicast address is first multicast entry */
   3244 			wm_set_filter(sc);
   3245 			error = 0;
   3246 			WM_CORE_UNLOCK(sc);
   3247 			break;
   3248 		}
   3249 		WM_CORE_UNLOCK(sc);
   3250 		/*FALLTHROUGH*/
   3251 	default:
   3252 #ifdef WM_MPSAFE
   3253 		s = splnet();
   3254 #endif
   3255 		/* It may call wm_start, so unlock here */
   3256 		error = ether_ioctl(ifp, cmd, data);
   3257 #ifdef WM_MPSAFE
   3258 		splx(s);
   3259 #endif
   3260 		if (error != ENETRESET)
   3261 			break;
   3262 
   3263 		error = 0;
   3264 
   3265 		if (cmd == SIOCSIFCAP)
   3266 			error = (*ifp->if_init)(ifp);
   3267 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3268 			;
   3269 		else if (ifp->if_flags & IFF_RUNNING) {
   3270 			/*
   3271 			 * Multicast list has changed; set the hardware filter
   3272 			 * accordingly.
   3273 			 */
   3274 			WM_CORE_LOCK(sc);
   3275 			wm_set_filter(sc);
   3276 			WM_CORE_UNLOCK(sc);
   3277 		}
   3278 		break;
   3279 	}
   3280 
   3281 #ifndef WM_MPSAFE
   3282 	splx(s);
   3283 #endif
   3284 	return error;
   3285 }
   3286 
   3287 /* MAC address related */
   3288 
   3289 /*
   3290  * Get the offset of MAC address and return it.
   3291  * If error occured, use offset 0.
   3292  */
   3293 static uint16_t
   3294 wm_check_alt_mac_addr(struct wm_softc *sc)
   3295 {
   3296 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3297 	uint16_t offset = NVM_OFF_MACADDR;
   3298 
   3299 	/* Try to read alternative MAC address pointer */
   3300 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3301 		return 0;
   3302 
   3303 	/* Check pointer if it's valid or not. */
   3304 	if ((offset == 0x0000) || (offset == 0xffff))
   3305 		return 0;
   3306 
   3307 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3308 	/*
   3309 	 * Check whether alternative MAC address is valid or not.
   3310 	 * Some cards have non 0xffff pointer but those don't use
   3311 	 * alternative MAC address in reality.
   3312 	 *
   3313 	 * Check whether the broadcast bit is set or not.
   3314 	 */
   3315 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3316 		if (((myea[0] & 0xff) & 0x01) == 0)
   3317 			return offset; /* Found */
   3318 
   3319 	/* Not found */
   3320 	return 0;
   3321 }
   3322 
   3323 static int
   3324 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3325 {
   3326 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3327 	uint16_t offset = NVM_OFF_MACADDR;
   3328 	int do_invert = 0;
   3329 
   3330 	switch (sc->sc_type) {
   3331 	case WM_T_82580:
   3332 	case WM_T_I350:
   3333 	case WM_T_I354:
   3334 		/* EEPROM Top Level Partitioning */
   3335 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3336 		break;
   3337 	case WM_T_82571:
   3338 	case WM_T_82575:
   3339 	case WM_T_82576:
   3340 	case WM_T_80003:
   3341 	case WM_T_I210:
   3342 	case WM_T_I211:
   3343 		offset = wm_check_alt_mac_addr(sc);
   3344 		if (offset == 0)
   3345 			if ((sc->sc_funcid & 0x01) == 1)
   3346 				do_invert = 1;
   3347 		break;
   3348 	default:
   3349 		if ((sc->sc_funcid & 0x01) == 1)
   3350 			do_invert = 1;
   3351 		break;
   3352 	}
   3353 
   3354 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3355 		goto bad;
   3356 
   3357 	enaddr[0] = myea[0] & 0xff;
   3358 	enaddr[1] = myea[0] >> 8;
   3359 	enaddr[2] = myea[1] & 0xff;
   3360 	enaddr[3] = myea[1] >> 8;
   3361 	enaddr[4] = myea[2] & 0xff;
   3362 	enaddr[5] = myea[2] >> 8;
   3363 
   3364 	/*
   3365 	 * Toggle the LSB of the MAC address on the second port
   3366 	 * of some dual port cards.
   3367 	 */
   3368 	if (do_invert != 0)
   3369 		enaddr[5] ^= 1;
   3370 
   3371 	return 0;
   3372 
   3373  bad:
   3374 	return -1;
   3375 }
   3376 
   3377 /*
   3378  * wm_set_ral:
   3379  *
   3380  *	Set an entery in the receive address list.
   3381  */
   3382 static void
   3383 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3384 {
   3385 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3386 	uint32_t wlock_mac;
   3387 	int rv;
   3388 
   3389 	if (enaddr != NULL) {
   3390 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3391 		    (enaddr[3] << 24);
   3392 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3393 		ral_hi |= RAL_AV;
   3394 	} else {
   3395 		ral_lo = 0;
   3396 		ral_hi = 0;
   3397 	}
   3398 
   3399 	switch (sc->sc_type) {
   3400 	case WM_T_82542_2_0:
   3401 	case WM_T_82542_2_1:
   3402 	case WM_T_82543:
   3403 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3404 		CSR_WRITE_FLUSH(sc);
   3405 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3406 		CSR_WRITE_FLUSH(sc);
   3407 		break;
   3408 	case WM_T_PCH2:
   3409 	case WM_T_PCH_LPT:
   3410 	case WM_T_PCH_SPT:
   3411 	case WM_T_PCH_CNP:
   3412 		if (idx == 0) {
   3413 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3414 			CSR_WRITE_FLUSH(sc);
   3415 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3416 			CSR_WRITE_FLUSH(sc);
   3417 			return;
   3418 		}
   3419 		if (sc->sc_type != WM_T_PCH2) {
   3420 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3421 			    FWSM_WLOCK_MAC);
   3422 			addrl = WMREG_SHRAL(idx - 1);
   3423 			addrh = WMREG_SHRAH(idx - 1);
   3424 		} else {
   3425 			wlock_mac = 0;
   3426 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3427 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3428 		}
   3429 
   3430 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3431 			rv = wm_get_swflag_ich8lan(sc);
   3432 			if (rv != 0)
   3433 				return;
   3434 			CSR_WRITE(sc, addrl, ral_lo);
   3435 			CSR_WRITE_FLUSH(sc);
   3436 			CSR_WRITE(sc, addrh, ral_hi);
   3437 			CSR_WRITE_FLUSH(sc);
   3438 			wm_put_swflag_ich8lan(sc);
   3439 		}
   3440 
   3441 		break;
   3442 	default:
   3443 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3444 		CSR_WRITE_FLUSH(sc);
   3445 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3446 		CSR_WRITE_FLUSH(sc);
   3447 		break;
   3448 	}
   3449 }
   3450 
   3451 /*
   3452  * wm_mchash:
   3453  *
   3454  *	Compute the hash of the multicast address for the 4096-bit
   3455  *	multicast filter.
   3456  */
   3457 static uint32_t
   3458 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3459 {
   3460 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3461 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3462 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3463 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3464 	uint32_t hash;
   3465 
   3466 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3467 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3468 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3469 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3470 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3471 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3472 		return (hash & 0x3ff);
   3473 	}
   3474 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3475 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3476 
   3477 	return (hash & 0xfff);
   3478 }
   3479 
   3480 /*
   3481  * wm_set_filter:
   3482  *
   3483  *	Set up the receive filter.
   3484  */
   3485 static void
   3486 wm_set_filter(struct wm_softc *sc)
   3487 {
   3488 	struct ethercom *ec = &sc->sc_ethercom;
   3489 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3490 	struct ether_multi *enm;
   3491 	struct ether_multistep step;
   3492 	bus_addr_t mta_reg;
   3493 	uint32_t hash, reg, bit;
   3494 	int i, size, ralmax;
   3495 
   3496 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3497 		device_xname(sc->sc_dev), __func__));
   3498 
   3499 	if (sc->sc_type >= WM_T_82544)
   3500 		mta_reg = WMREG_CORDOVA_MTA;
   3501 	else
   3502 		mta_reg = WMREG_MTA;
   3503 
   3504 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3505 
   3506 	if (ifp->if_flags & IFF_BROADCAST)
   3507 		sc->sc_rctl |= RCTL_BAM;
   3508 	if (ifp->if_flags & IFF_PROMISC) {
   3509 		sc->sc_rctl |= RCTL_UPE;
   3510 		goto allmulti;
   3511 	}
   3512 
   3513 	/*
   3514 	 * Set the station address in the first RAL slot, and
   3515 	 * clear the remaining slots.
   3516 	 */
   3517 	if (sc->sc_type == WM_T_ICH8)
   3518 		size = WM_RAL_TABSIZE_ICH8 -1;
   3519 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3520 	    || (sc->sc_type == WM_T_PCH))
   3521 		size = WM_RAL_TABSIZE_ICH8;
   3522 	else if (sc->sc_type == WM_T_PCH2)
   3523 		size = WM_RAL_TABSIZE_PCH2;
   3524 	else if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3525 	    || (sc->sc_type == WM_T_PCH_CNP))
   3526 		size = WM_RAL_TABSIZE_PCH_LPT;
   3527 	else if (sc->sc_type == WM_T_82575)
   3528 		size = WM_RAL_TABSIZE_82575;
   3529 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3530 		size = WM_RAL_TABSIZE_82576;
   3531 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3532 		size = WM_RAL_TABSIZE_I350;
   3533 	else
   3534 		size = WM_RAL_TABSIZE;
   3535 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3536 
   3537 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3538 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3539 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3540 		switch (i) {
   3541 		case 0:
   3542 			/* We can use all entries */
   3543 			ralmax = size;
   3544 			break;
   3545 		case 1:
   3546 			/* Only RAR[0] */
   3547 			ralmax = 1;
   3548 			break;
   3549 		default:
   3550 			/* available SHRA + RAR[0] */
   3551 			ralmax = i + 1;
   3552 		}
   3553 	} else
   3554 		ralmax = size;
   3555 	for (i = 1; i < size; i++) {
   3556 		if (i < ralmax)
   3557 			wm_set_ral(sc, NULL, i);
   3558 	}
   3559 
   3560 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3561 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3562 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3563 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3564 		size = WM_ICH8_MC_TABSIZE;
   3565 	else
   3566 		size = WM_MC_TABSIZE;
   3567 	/* Clear out the multicast table. */
   3568 	for (i = 0; i < size; i++) {
   3569 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3570 		CSR_WRITE_FLUSH(sc);
   3571 	}
   3572 
   3573 	ETHER_LOCK(ec);
   3574 	ETHER_FIRST_MULTI(step, ec, enm);
   3575 	while (enm != NULL) {
   3576 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3577 			ETHER_UNLOCK(ec);
   3578 			/*
   3579 			 * We must listen to a range of multicast addresses.
   3580 			 * For now, just accept all multicasts, rather than
   3581 			 * trying to set only those filter bits needed to match
   3582 			 * the range.  (At this time, the only use of address
   3583 			 * ranges is for IP multicast routing, for which the
   3584 			 * range is big enough to require all bits set.)
   3585 			 */
   3586 			goto allmulti;
   3587 		}
   3588 
   3589 		hash = wm_mchash(sc, enm->enm_addrlo);
   3590 
   3591 		reg = (hash >> 5);
   3592 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3593 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3594 		    || (sc->sc_type == WM_T_PCH2)
   3595 		    || (sc->sc_type == WM_T_PCH_LPT)
   3596 		    || (sc->sc_type == WM_T_PCH_SPT)
   3597 		    || (sc->sc_type == WM_T_PCH_CNP))
   3598 			reg &= 0x1f;
   3599 		else
   3600 			reg &= 0x7f;
   3601 		bit = hash & 0x1f;
   3602 
   3603 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3604 		hash |= 1U << bit;
   3605 
   3606 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3607 			/*
   3608 			 * 82544 Errata 9: Certain register cannot be written
   3609 			 * with particular alignments in PCI-X bus operation
   3610 			 * (FCAH, MTA and VFTA).
   3611 			 */
   3612 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3613 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3614 			CSR_WRITE_FLUSH(sc);
   3615 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3616 			CSR_WRITE_FLUSH(sc);
   3617 		} else {
   3618 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3619 			CSR_WRITE_FLUSH(sc);
   3620 		}
   3621 
   3622 		ETHER_NEXT_MULTI(step, enm);
   3623 	}
   3624 	ETHER_UNLOCK(ec);
   3625 
   3626 	ifp->if_flags &= ~IFF_ALLMULTI;
   3627 	goto setit;
   3628 
   3629  allmulti:
   3630 	ifp->if_flags |= IFF_ALLMULTI;
   3631 	sc->sc_rctl |= RCTL_MPE;
   3632 
   3633  setit:
   3634 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3635 }
   3636 
   3637 /* Reset and init related */
   3638 
   3639 static void
   3640 wm_set_vlan(struct wm_softc *sc)
   3641 {
   3642 
   3643 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3644 		device_xname(sc->sc_dev), __func__));
   3645 
   3646 	/* Deal with VLAN enables. */
   3647 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3648 		sc->sc_ctrl |= CTRL_VME;
   3649 	else
   3650 		sc->sc_ctrl &= ~CTRL_VME;
   3651 
   3652 	/* Write the control registers. */
   3653 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3654 }
   3655 
   3656 static void
   3657 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3658 {
   3659 	uint32_t gcr;
   3660 	pcireg_t ctrl2;
   3661 
   3662 	gcr = CSR_READ(sc, WMREG_GCR);
   3663 
   3664 	/* Only take action if timeout value is defaulted to 0 */
   3665 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3666 		goto out;
   3667 
   3668 	if ((gcr & GCR_CAP_VER2) == 0) {
   3669 		gcr |= GCR_CMPL_TMOUT_10MS;
   3670 		goto out;
   3671 	}
   3672 
   3673 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3674 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3675 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3676 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3677 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3678 
   3679 out:
   3680 	/* Disable completion timeout resend */
   3681 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3682 
   3683 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3684 }
   3685 
   3686 void
   3687 wm_get_auto_rd_done(struct wm_softc *sc)
   3688 {
   3689 	int i;
   3690 
   3691 	/* wait for eeprom to reload */
   3692 	switch (sc->sc_type) {
   3693 	case WM_T_82571:
   3694 	case WM_T_82572:
   3695 	case WM_T_82573:
   3696 	case WM_T_82574:
   3697 	case WM_T_82583:
   3698 	case WM_T_82575:
   3699 	case WM_T_82576:
   3700 	case WM_T_82580:
   3701 	case WM_T_I350:
   3702 	case WM_T_I354:
   3703 	case WM_T_I210:
   3704 	case WM_T_I211:
   3705 	case WM_T_80003:
   3706 	case WM_T_ICH8:
   3707 	case WM_T_ICH9:
   3708 		for (i = 0; i < 10; i++) {
   3709 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3710 				break;
   3711 			delay(1000);
   3712 		}
   3713 		if (i == 10) {
   3714 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3715 			    "complete\n", device_xname(sc->sc_dev));
   3716 		}
   3717 		break;
   3718 	default:
   3719 		break;
   3720 	}
   3721 }
   3722 
   3723 void
   3724 wm_lan_init_done(struct wm_softc *sc)
   3725 {
   3726 	uint32_t reg = 0;
   3727 	int i;
   3728 
   3729 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3730 		device_xname(sc->sc_dev), __func__));
   3731 
   3732 	/* Wait for eeprom to reload */
   3733 	switch (sc->sc_type) {
   3734 	case WM_T_ICH10:
   3735 	case WM_T_PCH:
   3736 	case WM_T_PCH2:
   3737 	case WM_T_PCH_LPT:
   3738 	case WM_T_PCH_SPT:
   3739 	case WM_T_PCH_CNP:
   3740 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3741 			reg = CSR_READ(sc, WMREG_STATUS);
   3742 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3743 				break;
   3744 			delay(100);
   3745 		}
   3746 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3747 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3748 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3749 		}
   3750 		break;
   3751 	default:
   3752 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3753 		    __func__);
   3754 		break;
   3755 	}
   3756 
   3757 	reg &= ~STATUS_LAN_INIT_DONE;
   3758 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3759 }
   3760 
   3761 void
   3762 wm_get_cfg_done(struct wm_softc *sc)
   3763 {
   3764 	int mask;
   3765 	uint32_t reg;
   3766 	int i;
   3767 
   3768 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3769 		device_xname(sc->sc_dev), __func__));
   3770 
   3771 	/* Wait for eeprom to reload */
   3772 	switch (sc->sc_type) {
   3773 	case WM_T_82542_2_0:
   3774 	case WM_T_82542_2_1:
   3775 		/* null */
   3776 		break;
   3777 	case WM_T_82543:
   3778 	case WM_T_82544:
   3779 	case WM_T_82540:
   3780 	case WM_T_82545:
   3781 	case WM_T_82545_3:
   3782 	case WM_T_82546:
   3783 	case WM_T_82546_3:
   3784 	case WM_T_82541:
   3785 	case WM_T_82541_2:
   3786 	case WM_T_82547:
   3787 	case WM_T_82547_2:
   3788 	case WM_T_82573:
   3789 	case WM_T_82574:
   3790 	case WM_T_82583:
   3791 		/* generic */
   3792 		delay(10*1000);
   3793 		break;
   3794 	case WM_T_80003:
   3795 	case WM_T_82571:
   3796 	case WM_T_82572:
   3797 	case WM_T_82575:
   3798 	case WM_T_82576:
   3799 	case WM_T_82580:
   3800 	case WM_T_I350:
   3801 	case WM_T_I354:
   3802 	case WM_T_I210:
   3803 	case WM_T_I211:
   3804 		if (sc->sc_type == WM_T_82571) {
   3805 			/* Only 82571 shares port 0 */
   3806 			mask = EEMNGCTL_CFGDONE_0;
   3807 		} else
   3808 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3809 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3810 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3811 				break;
   3812 			delay(1000);
   3813 		}
   3814 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3815 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3816 				device_xname(sc->sc_dev), __func__));
   3817 		}
   3818 		break;
   3819 	case WM_T_ICH8:
   3820 	case WM_T_ICH9:
   3821 	case WM_T_ICH10:
   3822 	case WM_T_PCH:
   3823 	case WM_T_PCH2:
   3824 	case WM_T_PCH_LPT:
   3825 	case WM_T_PCH_SPT:
   3826 	case WM_T_PCH_CNP:
   3827 		delay(10*1000);
   3828 		if (sc->sc_type >= WM_T_ICH10)
   3829 			wm_lan_init_done(sc);
   3830 		else
   3831 			wm_get_auto_rd_done(sc);
   3832 
   3833 		/* Clear PHY Reset Asserted bit */
   3834 		reg = CSR_READ(sc, WMREG_STATUS);
   3835 		if ((reg & STATUS_PHYRA) != 0)
   3836 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3837 		break;
   3838 	default:
   3839 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3840 		    __func__);
   3841 		break;
   3842 	}
   3843 }
   3844 
   3845 void
   3846 wm_phy_post_reset(struct wm_softc *sc)
   3847 {
   3848 	uint32_t reg;
   3849 
   3850 	/* This function is only for ICH8 and newer. */
   3851 	if (sc->sc_type < WM_T_ICH8)
   3852 		return;
   3853 
   3854 	if (wm_phy_resetisblocked(sc)) {
   3855 		/* XXX */
   3856 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3857 		return;
   3858 	}
   3859 
   3860 	/* Allow time for h/w to get to quiescent state after reset */
   3861 	delay(10*1000);
   3862 
   3863 	/* Perform any necessary post-reset workarounds */
   3864 	if (sc->sc_type == WM_T_PCH)
   3865 		wm_hv_phy_workaround_ich8lan(sc);
   3866 	else if (sc->sc_type == WM_T_PCH2)
   3867 		wm_lv_phy_workaround_ich8lan(sc);
   3868 
   3869 	/* Clear the host wakeup bit after lcd reset */
   3870 	if (sc->sc_type >= WM_T_PCH) {
   3871 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3872 		    BM_PORT_GEN_CFG);
   3873 		reg &= ~BM_WUC_HOST_WU_BIT;
   3874 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3875 		    BM_PORT_GEN_CFG, reg);
   3876 	}
   3877 
   3878 	/* Configure the LCD with the extended configuration region in NVM */
   3879 	wm_init_lcd_from_nvm(sc);
   3880 
   3881 	/* XXX Configure the LCD with the OEM bits in NVM */
   3882 
   3883 	if (sc->sc_type == WM_T_PCH2) {
   3884 		/* Ungate automatic PHY configuration on non-managed 82579 */
   3885 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   3886 			delay(10 * 1000);
   3887 			wm_gate_hw_phy_config_ich8lan(sc, false);
   3888 		}
   3889 		/* XXX Set EEE LPI Update Timer to 200usec */
   3890 	}
   3891 }
   3892 
   3893 /* Only for PCH and newer */
   3894 static int
   3895 wm_write_smbus_addr(struct wm_softc *sc)
   3896 {
   3897 	uint32_t strap, freq;
   3898 	uint16_t phy_data;
   3899 	int rv;
   3900 
   3901 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3902 		device_xname(sc->sc_dev), __func__));
   3903 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   3904 
   3905 	strap = CSR_READ(sc, WMREG_STRAP);
   3906 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3907 
   3908 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   3909 	if (rv != 0)
   3910 		return -1;
   3911 
   3912 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3913 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3914 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3915 
   3916 	if (sc->sc_phytype == WMPHY_I217) {
   3917 		/* Restore SMBus frequency */
   3918 		if (freq --) {
   3919 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3920 			    | HV_SMB_ADDR_FREQ_HIGH);
   3921 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3922 			    HV_SMB_ADDR_FREQ_LOW);
   3923 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3924 			    HV_SMB_ADDR_FREQ_HIGH);
   3925 		} else {
   3926 			DPRINTF(WM_DEBUG_INIT,
   3927 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3928 				device_xname(sc->sc_dev), __func__));
   3929 		}
   3930 	}
   3931 
   3932 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   3933 	    phy_data);
   3934 }
   3935 
   3936 void
   3937 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3938 {
   3939 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3940 	uint16_t phy_page = 0;
   3941 
   3942 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3943 		device_xname(sc->sc_dev), __func__));
   3944 
   3945 	switch (sc->sc_type) {
   3946 	case WM_T_ICH8:
   3947 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3948 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3949 			return;
   3950 
   3951 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3952 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3953 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3954 			break;
   3955 		}
   3956 		/* FALLTHROUGH */
   3957 	case WM_T_PCH:
   3958 	case WM_T_PCH2:
   3959 	case WM_T_PCH_LPT:
   3960 	case WM_T_PCH_SPT:
   3961 	case WM_T_PCH_CNP:
   3962 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3963 		break;
   3964 	default:
   3965 		return;
   3966 	}
   3967 
   3968 	sc->phy.acquire(sc);
   3969 
   3970 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3971 	if ((reg & sw_cfg_mask) == 0)
   3972 		goto release;
   3973 
   3974 	/*
   3975 	 * Make sure HW does not configure LCD from PHY extended configuration
   3976 	 * before SW configuration
   3977 	 */
   3978 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3979 	if ((sc->sc_type < WM_T_PCH2)
   3980 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3981 		goto release;
   3982 
   3983 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3984 		device_xname(sc->sc_dev), __func__));
   3985 	/* word_addr is in DWORD */
   3986 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3987 
   3988 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3989 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3990 	if (cnf_size == 0)
   3991 		goto release;
   3992 
   3993 	if (((sc->sc_type == WM_T_PCH)
   3994 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3995 	    || (sc->sc_type > WM_T_PCH)) {
   3996 		/*
   3997 		 * HW configures the SMBus address and LEDs when the OEM and
   3998 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3999 		 * are cleared, SW will configure them instead.
   4000 		 */
   4001 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4002 			device_xname(sc->sc_dev), __func__));
   4003 		wm_write_smbus_addr(sc);
   4004 
   4005 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4006 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   4007 	}
   4008 
   4009 	/* Configure LCD from extended configuration region. */
   4010 	for (i = 0; i < cnf_size; i++) {
   4011 		uint16_t reg_data, reg_addr;
   4012 
   4013 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4014 			goto release;
   4015 
   4016 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4017 			goto release;
   4018 
   4019 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4020 			phy_page = reg_data;
   4021 
   4022 		reg_addr &= IGPHY_MAXREGADDR;
   4023 		reg_addr |= phy_page;
   4024 
   4025 		KASSERT(sc->phy.writereg_locked != NULL);
   4026 		sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr, reg_data);
   4027 	}
   4028 
   4029 release:
   4030 	sc->phy.release(sc);
   4031 	return;
   4032 }
   4033 
   4034 
   4035 /* Init hardware bits */
   4036 void
   4037 wm_initialize_hardware_bits(struct wm_softc *sc)
   4038 {
   4039 	uint32_t tarc0, tarc1, reg;
   4040 
   4041 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4042 		device_xname(sc->sc_dev), __func__));
   4043 
   4044 	/* For 82571 variant, 80003 and ICHs */
   4045 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4046 	    || (sc->sc_type >= WM_T_80003)) {
   4047 
   4048 		/* Transmit Descriptor Control 0 */
   4049 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4050 		reg |= TXDCTL_COUNT_DESC;
   4051 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4052 
   4053 		/* Transmit Descriptor Control 1 */
   4054 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4055 		reg |= TXDCTL_COUNT_DESC;
   4056 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4057 
   4058 		/* TARC0 */
   4059 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4060 		switch (sc->sc_type) {
   4061 		case WM_T_82571:
   4062 		case WM_T_82572:
   4063 		case WM_T_82573:
   4064 		case WM_T_82574:
   4065 		case WM_T_82583:
   4066 		case WM_T_80003:
   4067 			/* Clear bits 30..27 */
   4068 			tarc0 &= ~__BITS(30, 27);
   4069 			break;
   4070 		default:
   4071 			break;
   4072 		}
   4073 
   4074 		switch (sc->sc_type) {
   4075 		case WM_T_82571:
   4076 		case WM_T_82572:
   4077 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4078 
   4079 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4080 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4081 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4082 			/* 8257[12] Errata No.7 */
   4083 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4084 
   4085 			/* TARC1 bit 28 */
   4086 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4087 				tarc1 &= ~__BIT(28);
   4088 			else
   4089 				tarc1 |= __BIT(28);
   4090 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4091 
   4092 			/*
   4093 			 * 8257[12] Errata No.13
   4094 			 * Disable Dyamic Clock Gating.
   4095 			 */
   4096 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4097 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4098 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4099 			break;
   4100 		case WM_T_82573:
   4101 		case WM_T_82574:
   4102 		case WM_T_82583:
   4103 			if ((sc->sc_type == WM_T_82574)
   4104 			    || (sc->sc_type == WM_T_82583))
   4105 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4106 
   4107 			/* Extended Device Control */
   4108 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4109 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4110 			reg |= __BIT(22);	/* Set bit 22 */
   4111 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4112 
   4113 			/* Device Control */
   4114 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4115 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4116 
   4117 			/* PCIe Control Register */
   4118 			/*
   4119 			 * 82573 Errata (unknown).
   4120 			 *
   4121 			 * 82574 Errata 25 and 82583 Errata 12
   4122 			 * "Dropped Rx Packets":
   4123 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4124 			 */
   4125 			reg = CSR_READ(sc, WMREG_GCR);
   4126 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4127 			CSR_WRITE(sc, WMREG_GCR, reg);
   4128 
   4129 			if ((sc->sc_type == WM_T_82574)
   4130 			    || (sc->sc_type == WM_T_82583)) {
   4131 				/*
   4132 				 * Document says this bit must be set for
   4133 				 * proper operation.
   4134 				 */
   4135 				reg = CSR_READ(sc, WMREG_GCR);
   4136 				reg |= __BIT(22);
   4137 				CSR_WRITE(sc, WMREG_GCR, reg);
   4138 
   4139 				/*
   4140 				 * Apply workaround for hardware errata
   4141 				 * documented in errata docs Fixes issue where
   4142 				 * some error prone or unreliable PCIe
   4143 				 * completions are occurring, particularly
   4144 				 * with ASPM enabled. Without fix, issue can
   4145 				 * cause Tx timeouts.
   4146 				 */
   4147 				reg = CSR_READ(sc, WMREG_GCR2);
   4148 				reg |= __BIT(0);
   4149 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4150 			}
   4151 			break;
   4152 		case WM_T_80003:
   4153 			/* TARC0 */
   4154 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4155 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4156 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4157 
   4158 			/* TARC1 bit 28 */
   4159 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4160 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4161 				tarc1 &= ~__BIT(28);
   4162 			else
   4163 				tarc1 |= __BIT(28);
   4164 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4165 			break;
   4166 		case WM_T_ICH8:
   4167 		case WM_T_ICH9:
   4168 		case WM_T_ICH10:
   4169 		case WM_T_PCH:
   4170 		case WM_T_PCH2:
   4171 		case WM_T_PCH_LPT:
   4172 		case WM_T_PCH_SPT:
   4173 		case WM_T_PCH_CNP:
   4174 			/* TARC0 */
   4175 			if (sc->sc_type == WM_T_ICH8) {
   4176 				/* Set TARC0 bits 29 and 28 */
   4177 				tarc0 |= __BITS(29, 28);
   4178 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4179 				tarc0 |= __BIT(29);
   4180 				/*
   4181 				 *  Drop bit 28. From Linux.
   4182 				 * See I218/I219 spec update
   4183 				 * "5. Buffer Overrun While the I219 is
   4184 				 * Processing DMA Transactions"
   4185 				 */
   4186 				tarc0 &= ~__BIT(28);
   4187 			}
   4188 			/* Set TARC0 bits 23,24,26,27 */
   4189 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4190 
   4191 			/* CTRL_EXT */
   4192 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4193 			reg |= __BIT(22);	/* Set bit 22 */
   4194 			/*
   4195 			 * Enable PHY low-power state when MAC is at D3
   4196 			 * w/o WoL
   4197 			 */
   4198 			if (sc->sc_type >= WM_T_PCH)
   4199 				reg |= CTRL_EXT_PHYPDEN;
   4200 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4201 
   4202 			/* TARC1 */
   4203 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4204 			/* bit 28 */
   4205 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4206 				tarc1 &= ~__BIT(28);
   4207 			else
   4208 				tarc1 |= __BIT(28);
   4209 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4210 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4211 
   4212 			/* Device Status */
   4213 			if (sc->sc_type == WM_T_ICH8) {
   4214 				reg = CSR_READ(sc, WMREG_STATUS);
   4215 				reg &= ~__BIT(31);
   4216 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4217 
   4218 			}
   4219 
   4220 			/* IOSFPC */
   4221 			if (sc->sc_type == WM_T_PCH_SPT) {
   4222 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4223 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4224 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4225 			}
   4226 			/*
   4227 			 * Work-around descriptor data corruption issue during
   4228 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4229 			 * capability.
   4230 			 */
   4231 			reg = CSR_READ(sc, WMREG_RFCTL);
   4232 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4233 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4234 			break;
   4235 		default:
   4236 			break;
   4237 		}
   4238 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4239 
   4240 		switch (sc->sc_type) {
   4241 		/*
   4242 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4243 		 * Avoid RSS Hash Value bug.
   4244 		 */
   4245 		case WM_T_82571:
   4246 		case WM_T_82572:
   4247 		case WM_T_82573:
   4248 		case WM_T_80003:
   4249 		case WM_T_ICH8:
   4250 			reg = CSR_READ(sc, WMREG_RFCTL);
   4251 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4252 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4253 			break;
   4254 		case WM_T_82574:
   4255 			/* use extened Rx descriptor. */
   4256 			reg = CSR_READ(sc, WMREG_RFCTL);
   4257 			reg |= WMREG_RFCTL_EXSTEN;
   4258 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4259 			break;
   4260 		default:
   4261 			break;
   4262 		}
   4263 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4264 		/*
   4265 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4266 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4267 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4268 		 * Correctly by the Device"
   4269 		 *
   4270 		 * I354(C2000) Errata AVR53:
   4271 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4272 		 * Hang"
   4273 		 */
   4274 		reg = CSR_READ(sc, WMREG_RFCTL);
   4275 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4276 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4277 	}
   4278 }
   4279 
   4280 static uint32_t
   4281 wm_rxpbs_adjust_82580(uint32_t val)
   4282 {
   4283 	uint32_t rv = 0;
   4284 
   4285 	if (val < __arraycount(wm_82580_rxpbs_table))
   4286 		rv = wm_82580_rxpbs_table[val];
   4287 
   4288 	return rv;
   4289 }
   4290 
   4291 /*
   4292  * wm_reset_phy:
   4293  *
   4294  *	generic PHY reset function.
   4295  *	Same as e1000_phy_hw_reset_generic()
   4296  */
   4297 static void
   4298 wm_reset_phy(struct wm_softc *sc)
   4299 {
   4300 	uint32_t reg;
   4301 
   4302 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4303 		device_xname(sc->sc_dev), __func__));
   4304 	if (wm_phy_resetisblocked(sc))
   4305 		return;
   4306 
   4307 	sc->phy.acquire(sc);
   4308 
   4309 	reg = CSR_READ(sc, WMREG_CTRL);
   4310 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4311 	CSR_WRITE_FLUSH(sc);
   4312 
   4313 	delay(sc->phy.reset_delay_us);
   4314 
   4315 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4316 	CSR_WRITE_FLUSH(sc);
   4317 
   4318 	delay(150);
   4319 
   4320 	sc->phy.release(sc);
   4321 
   4322 	wm_get_cfg_done(sc);
   4323 	wm_phy_post_reset(sc);
   4324 }
   4325 
   4326 /*
   4327  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4328  * so it is enough to check sc->sc_queue[0] only.
   4329  */
   4330 static void
   4331 wm_flush_desc_rings(struct wm_softc *sc)
   4332 {
   4333 	pcireg_t preg;
   4334 	uint32_t reg;
   4335 	struct wm_txqueue *txq;
   4336 	wiseman_txdesc_t *txd;
   4337 	int nexttx;
   4338 	uint32_t rctl;
   4339 
   4340 	/* First, disable MULR fix in FEXTNVM11 */
   4341 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4342 	reg |= FEXTNVM11_DIS_MULRFIX;
   4343 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4344 
   4345 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4346 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4347 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4348 		return;
   4349 
   4350 	/* TX */
   4351 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4352 	    device_xname(sc->sc_dev), preg, reg);
   4353 	reg = CSR_READ(sc, WMREG_TCTL);
   4354 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4355 
   4356 	txq = &sc->sc_queue[0].wmq_txq;
   4357 	nexttx = txq->txq_next;
   4358 	txd = &txq->txq_descs[nexttx];
   4359 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4360 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4361 	txd->wtx_fields.wtxu_status = 0;
   4362 	txd->wtx_fields.wtxu_options = 0;
   4363 	txd->wtx_fields.wtxu_vlan = 0;
   4364 
   4365 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4366 	    BUS_SPACE_BARRIER_WRITE);
   4367 
   4368 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4369 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4370 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4371 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4372 	delay(250);
   4373 
   4374 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4375 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4376 		return;
   4377 
   4378 	/* RX */
   4379 	printf("%s: Need RX flush (reg = %08x)\n",
   4380 	    device_xname(sc->sc_dev), preg);
   4381 	rctl = CSR_READ(sc, WMREG_RCTL);
   4382 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4383 	CSR_WRITE_FLUSH(sc);
   4384 	delay(150);
   4385 
   4386 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4387 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4388 	reg &= 0xffffc000;
   4389 	/*
   4390 	 * update thresholds: prefetch threshold to 31, host threshold
   4391 	 * to 1 and make sure the granularity is "descriptors" and not
   4392 	 * "cache lines"
   4393 	 */
   4394 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4395 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4396 
   4397 	/*
   4398 	 * momentarily enable the RX ring for the changes to take
   4399 	 * effect
   4400 	 */
   4401 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4402 	CSR_WRITE_FLUSH(sc);
   4403 	delay(150);
   4404 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4405 }
   4406 
   4407 /*
   4408  * wm_reset:
   4409  *
   4410  *	Reset the i82542 chip.
   4411  */
   4412 static void
   4413 wm_reset(struct wm_softc *sc)
   4414 {
   4415 	int phy_reset = 0;
   4416 	int i, error = 0;
   4417 	uint32_t reg;
   4418 	uint16_t kmreg;
   4419 	int rv;
   4420 
   4421 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4422 		device_xname(sc->sc_dev), __func__));
   4423 	KASSERT(sc->sc_type != 0);
   4424 
   4425 	/*
   4426 	 * Allocate on-chip memory according to the MTU size.
   4427 	 * The Packet Buffer Allocation register must be written
   4428 	 * before the chip is reset.
   4429 	 */
   4430 	switch (sc->sc_type) {
   4431 	case WM_T_82547:
   4432 	case WM_T_82547_2:
   4433 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4434 		    PBA_22K : PBA_30K;
   4435 		for (i = 0; i < sc->sc_nqueues; i++) {
   4436 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4437 			txq->txq_fifo_head = 0;
   4438 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4439 			txq->txq_fifo_size =
   4440 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4441 			txq->txq_fifo_stall = 0;
   4442 		}
   4443 		break;
   4444 	case WM_T_82571:
   4445 	case WM_T_82572:
   4446 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4447 	case WM_T_80003:
   4448 		sc->sc_pba = PBA_32K;
   4449 		break;
   4450 	case WM_T_82573:
   4451 		sc->sc_pba = PBA_12K;
   4452 		break;
   4453 	case WM_T_82574:
   4454 	case WM_T_82583:
   4455 		sc->sc_pba = PBA_20K;
   4456 		break;
   4457 	case WM_T_82576:
   4458 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4459 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4460 		break;
   4461 	case WM_T_82580:
   4462 	case WM_T_I350:
   4463 	case WM_T_I354:
   4464 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4465 		break;
   4466 	case WM_T_I210:
   4467 	case WM_T_I211:
   4468 		sc->sc_pba = PBA_34K;
   4469 		break;
   4470 	case WM_T_ICH8:
   4471 		/* Workaround for a bit corruption issue in FIFO memory */
   4472 		sc->sc_pba = PBA_8K;
   4473 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4474 		break;
   4475 	case WM_T_ICH9:
   4476 	case WM_T_ICH10:
   4477 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4478 		    PBA_14K : PBA_10K;
   4479 		break;
   4480 	case WM_T_PCH:
   4481 	case WM_T_PCH2:	/* XXX 14K? */
   4482 	case WM_T_PCH_LPT:
   4483 	case WM_T_PCH_SPT:
   4484 	case WM_T_PCH_CNP:
   4485 		sc->sc_pba = PBA_26K;
   4486 		break;
   4487 	default:
   4488 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4489 		    PBA_40K : PBA_48K;
   4490 		break;
   4491 	}
   4492 	/*
   4493 	 * Only old or non-multiqueue devices have the PBA register
   4494 	 * XXX Need special handling for 82575.
   4495 	 */
   4496 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4497 	    || (sc->sc_type == WM_T_82575))
   4498 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4499 
   4500 	/* Prevent the PCI-E bus from sticking */
   4501 	if (sc->sc_flags & WM_F_PCIE) {
   4502 		int timeout = 800;
   4503 
   4504 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4505 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4506 
   4507 		while (timeout--) {
   4508 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4509 			    == 0)
   4510 				break;
   4511 			delay(100);
   4512 		}
   4513 		if (timeout == 0)
   4514 			device_printf(sc->sc_dev,
   4515 			    "failed to disable busmastering\n");
   4516 	}
   4517 
   4518 	/* Set the completion timeout for interface */
   4519 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4520 	    || (sc->sc_type == WM_T_82580)
   4521 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4522 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4523 		wm_set_pcie_completion_timeout(sc);
   4524 
   4525 	/* Clear interrupt */
   4526 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4527 	if (wm_is_using_msix(sc)) {
   4528 		if (sc->sc_type != WM_T_82574) {
   4529 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4530 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4531 		} else
   4532 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4533 	}
   4534 
   4535 	/* Stop the transmit and receive processes. */
   4536 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4537 	sc->sc_rctl &= ~RCTL_EN;
   4538 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4539 	CSR_WRITE_FLUSH(sc);
   4540 
   4541 	/* XXX set_tbi_sbp_82543() */
   4542 
   4543 	delay(10*1000);
   4544 
   4545 	/* Must acquire the MDIO ownership before MAC reset */
   4546 	switch (sc->sc_type) {
   4547 	case WM_T_82573:
   4548 	case WM_T_82574:
   4549 	case WM_T_82583:
   4550 		error = wm_get_hw_semaphore_82573(sc);
   4551 		break;
   4552 	default:
   4553 		break;
   4554 	}
   4555 
   4556 	/*
   4557 	 * 82541 Errata 29? & 82547 Errata 28?
   4558 	 * See also the description about PHY_RST bit in CTRL register
   4559 	 * in 8254x_GBe_SDM.pdf.
   4560 	 */
   4561 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4562 		CSR_WRITE(sc, WMREG_CTRL,
   4563 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4564 		CSR_WRITE_FLUSH(sc);
   4565 		delay(5000);
   4566 	}
   4567 
   4568 	switch (sc->sc_type) {
   4569 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4570 	case WM_T_82541:
   4571 	case WM_T_82541_2:
   4572 	case WM_T_82547:
   4573 	case WM_T_82547_2:
   4574 		/*
   4575 		 * On some chipsets, a reset through a memory-mapped write
   4576 		 * cycle can cause the chip to reset before completing the
   4577 		 * write cycle. This causes major headache that can be avoided
   4578 		 * by issuing the reset via indirect register writes through
   4579 		 * I/O space.
   4580 		 *
   4581 		 * So, if we successfully mapped the I/O BAR at attach time,
   4582 		 * use that. Otherwise, try our luck with a memory-mapped
   4583 		 * reset.
   4584 		 */
   4585 		if (sc->sc_flags & WM_F_IOH_VALID)
   4586 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4587 		else
   4588 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4589 		break;
   4590 	case WM_T_82545_3:
   4591 	case WM_T_82546_3:
   4592 		/* Use the shadow control register on these chips. */
   4593 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4594 		break;
   4595 	case WM_T_80003:
   4596 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4597 		sc->phy.acquire(sc);
   4598 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4599 		sc->phy.release(sc);
   4600 		break;
   4601 	case WM_T_ICH8:
   4602 	case WM_T_ICH9:
   4603 	case WM_T_ICH10:
   4604 	case WM_T_PCH:
   4605 	case WM_T_PCH2:
   4606 	case WM_T_PCH_LPT:
   4607 	case WM_T_PCH_SPT:
   4608 	case WM_T_PCH_CNP:
   4609 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4610 		if (wm_phy_resetisblocked(sc) == false) {
   4611 			/*
   4612 			 * Gate automatic PHY configuration by hardware on
   4613 			 * non-managed 82579
   4614 			 */
   4615 			if ((sc->sc_type == WM_T_PCH2)
   4616 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4617 				== 0))
   4618 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4619 
   4620 			reg |= CTRL_PHY_RESET;
   4621 			phy_reset = 1;
   4622 		} else
   4623 			printf("XXX reset is blocked!!!\n");
   4624 		sc->phy.acquire(sc);
   4625 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4626 		/* Don't insert a completion barrier when reset */
   4627 		delay(20*1000);
   4628 		mutex_exit(sc->sc_ich_phymtx);
   4629 		break;
   4630 	case WM_T_82580:
   4631 	case WM_T_I350:
   4632 	case WM_T_I354:
   4633 	case WM_T_I210:
   4634 	case WM_T_I211:
   4635 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4636 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4637 			CSR_WRITE_FLUSH(sc);
   4638 		delay(5000);
   4639 		break;
   4640 	case WM_T_82542_2_0:
   4641 	case WM_T_82542_2_1:
   4642 	case WM_T_82543:
   4643 	case WM_T_82540:
   4644 	case WM_T_82545:
   4645 	case WM_T_82546:
   4646 	case WM_T_82571:
   4647 	case WM_T_82572:
   4648 	case WM_T_82573:
   4649 	case WM_T_82574:
   4650 	case WM_T_82575:
   4651 	case WM_T_82576:
   4652 	case WM_T_82583:
   4653 	default:
   4654 		/* Everything else can safely use the documented method. */
   4655 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4656 		break;
   4657 	}
   4658 
   4659 	/* Must release the MDIO ownership after MAC reset */
   4660 	switch (sc->sc_type) {
   4661 	case WM_T_82573:
   4662 	case WM_T_82574:
   4663 	case WM_T_82583:
   4664 		if (error == 0)
   4665 			wm_put_hw_semaphore_82573(sc);
   4666 		break;
   4667 	default:
   4668 		break;
   4669 	}
   4670 
   4671 	/* Set Phy Config Counter to 50msec */
   4672 	if (sc->sc_type == WM_T_PCH2) {
   4673 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   4674 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   4675 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   4676 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   4677 	}
   4678 
   4679 	if (phy_reset != 0)
   4680 		wm_get_cfg_done(sc);
   4681 
   4682 	/* reload EEPROM */
   4683 	switch (sc->sc_type) {
   4684 	case WM_T_82542_2_0:
   4685 	case WM_T_82542_2_1:
   4686 	case WM_T_82543:
   4687 	case WM_T_82544:
   4688 		delay(10);
   4689 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4690 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4691 		CSR_WRITE_FLUSH(sc);
   4692 		delay(2000);
   4693 		break;
   4694 	case WM_T_82540:
   4695 	case WM_T_82545:
   4696 	case WM_T_82545_3:
   4697 	case WM_T_82546:
   4698 	case WM_T_82546_3:
   4699 		delay(5*1000);
   4700 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4701 		break;
   4702 	case WM_T_82541:
   4703 	case WM_T_82541_2:
   4704 	case WM_T_82547:
   4705 	case WM_T_82547_2:
   4706 		delay(20000);
   4707 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4708 		break;
   4709 	case WM_T_82571:
   4710 	case WM_T_82572:
   4711 	case WM_T_82573:
   4712 	case WM_T_82574:
   4713 	case WM_T_82583:
   4714 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4715 			delay(10);
   4716 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4717 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4718 			CSR_WRITE_FLUSH(sc);
   4719 		}
   4720 		/* check EECD_EE_AUTORD */
   4721 		wm_get_auto_rd_done(sc);
   4722 		/*
   4723 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4724 		 * is set.
   4725 		 */
   4726 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4727 		    || (sc->sc_type == WM_T_82583))
   4728 			delay(25*1000);
   4729 		break;
   4730 	case WM_T_82575:
   4731 	case WM_T_82576:
   4732 	case WM_T_82580:
   4733 	case WM_T_I350:
   4734 	case WM_T_I354:
   4735 	case WM_T_I210:
   4736 	case WM_T_I211:
   4737 	case WM_T_80003:
   4738 		/* check EECD_EE_AUTORD */
   4739 		wm_get_auto_rd_done(sc);
   4740 		break;
   4741 	case WM_T_ICH8:
   4742 	case WM_T_ICH9:
   4743 	case WM_T_ICH10:
   4744 	case WM_T_PCH:
   4745 	case WM_T_PCH2:
   4746 	case WM_T_PCH_LPT:
   4747 	case WM_T_PCH_SPT:
   4748 	case WM_T_PCH_CNP:
   4749 		break;
   4750 	default:
   4751 		panic("%s: unknown type\n", __func__);
   4752 	}
   4753 
   4754 	/* Check whether EEPROM is present or not */
   4755 	switch (sc->sc_type) {
   4756 	case WM_T_82575:
   4757 	case WM_T_82576:
   4758 	case WM_T_82580:
   4759 	case WM_T_I350:
   4760 	case WM_T_I354:
   4761 	case WM_T_ICH8:
   4762 	case WM_T_ICH9:
   4763 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4764 			/* Not found */
   4765 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4766 			if (sc->sc_type == WM_T_82575)
   4767 				wm_reset_init_script_82575(sc);
   4768 		}
   4769 		break;
   4770 	default:
   4771 		break;
   4772 	}
   4773 
   4774 	if (phy_reset != 0)
   4775 		wm_phy_post_reset(sc);
   4776 
   4777 	if ((sc->sc_type == WM_T_82580)
   4778 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4779 		/* clear global device reset status bit */
   4780 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4781 	}
   4782 
   4783 	/* Clear any pending interrupt events. */
   4784 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4785 	reg = CSR_READ(sc, WMREG_ICR);
   4786 	if (wm_is_using_msix(sc)) {
   4787 		if (sc->sc_type != WM_T_82574) {
   4788 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4789 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4790 		} else
   4791 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4792 	}
   4793 
   4794 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4795 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4796 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4797 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4798 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4799 		reg |= KABGTXD_BGSQLBIAS;
   4800 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4801 	}
   4802 
   4803 	/* reload sc_ctrl */
   4804 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4805 
   4806 	if (sc->sc_type == WM_T_I354) {
   4807 #if 0
   4808 		/* I354 uses an external PHY */
   4809 		wm_set_eee_i354(sc);
   4810 #endif
   4811 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4812 		wm_set_eee_i350(sc);
   4813 
   4814 	/*
   4815 	 * For PCH, this write will make sure that any noise will be detected
   4816 	 * as a CRC error and be dropped rather than show up as a bad packet
   4817 	 * to the DMA engine
   4818 	 */
   4819 	if (sc->sc_type == WM_T_PCH)
   4820 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4821 
   4822 	if (sc->sc_type >= WM_T_82544)
   4823 		CSR_WRITE(sc, WMREG_WUC, 0);
   4824 
   4825 	wm_reset_mdicnfg_82580(sc);
   4826 
   4827 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4828 		wm_pll_workaround_i210(sc);
   4829 
   4830 	if (sc->sc_type == WM_T_80003) {
   4831 		/* default to TRUE to enable the MDIC W/A */
   4832 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   4833 
   4834 		rv = wm_kmrn_readreg(sc,
   4835 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   4836 		if (rv == 0) {
   4837 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   4838 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   4839 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   4840 			else
   4841 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   4842 		}
   4843 	}
   4844 }
   4845 
   4846 /*
   4847  * wm_add_rxbuf:
   4848  *
   4849  *	Add a receive buffer to the indiciated descriptor.
   4850  */
   4851 static int
   4852 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4853 {
   4854 	struct wm_softc *sc = rxq->rxq_sc;
   4855 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4856 	struct mbuf *m;
   4857 	int error;
   4858 
   4859 	KASSERT(mutex_owned(rxq->rxq_lock));
   4860 
   4861 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4862 	if (m == NULL)
   4863 		return ENOBUFS;
   4864 
   4865 	MCLGET(m, M_DONTWAIT);
   4866 	if ((m->m_flags & M_EXT) == 0) {
   4867 		m_freem(m);
   4868 		return ENOBUFS;
   4869 	}
   4870 
   4871 	if (rxs->rxs_mbuf != NULL)
   4872 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4873 
   4874 	rxs->rxs_mbuf = m;
   4875 
   4876 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4877 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4878 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4879 	if (error) {
   4880 		/* XXX XXX XXX */
   4881 		aprint_error_dev(sc->sc_dev,
   4882 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   4883 		panic("wm_add_rxbuf");
   4884 	}
   4885 
   4886 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4887 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4888 
   4889 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4890 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4891 			wm_init_rxdesc(rxq, idx);
   4892 	} else
   4893 		wm_init_rxdesc(rxq, idx);
   4894 
   4895 	return 0;
   4896 }
   4897 
   4898 /*
   4899  * wm_rxdrain:
   4900  *
   4901  *	Drain the receive queue.
   4902  */
   4903 static void
   4904 wm_rxdrain(struct wm_rxqueue *rxq)
   4905 {
   4906 	struct wm_softc *sc = rxq->rxq_sc;
   4907 	struct wm_rxsoft *rxs;
   4908 	int i;
   4909 
   4910 	KASSERT(mutex_owned(rxq->rxq_lock));
   4911 
   4912 	for (i = 0; i < WM_NRXDESC; i++) {
   4913 		rxs = &rxq->rxq_soft[i];
   4914 		if (rxs->rxs_mbuf != NULL) {
   4915 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4916 			m_freem(rxs->rxs_mbuf);
   4917 			rxs->rxs_mbuf = NULL;
   4918 		}
   4919 	}
   4920 }
   4921 
   4922 /*
   4923  * Setup registers for RSS.
   4924  *
   4925  * XXX not yet VMDq support
   4926  */
   4927 static void
   4928 wm_init_rss(struct wm_softc *sc)
   4929 {
   4930 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4931 	int i;
   4932 
   4933 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   4934 
   4935 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4936 		int qid, reta_ent;
   4937 
   4938 		qid  = i % sc->sc_nqueues;
   4939 		switch (sc->sc_type) {
   4940 		case WM_T_82574:
   4941 			reta_ent = __SHIFTIN(qid,
   4942 			    RETA_ENT_QINDEX_MASK_82574);
   4943 			break;
   4944 		case WM_T_82575:
   4945 			reta_ent = __SHIFTIN(qid,
   4946 			    RETA_ENT_QINDEX1_MASK_82575);
   4947 			break;
   4948 		default:
   4949 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4950 			break;
   4951 		}
   4952 
   4953 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4954 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4955 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4956 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4957 	}
   4958 
   4959 	rss_getkey((uint8_t *)rss_key);
   4960 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4961 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4962 
   4963 	if (sc->sc_type == WM_T_82574)
   4964 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4965 	else
   4966 		mrqc = MRQC_ENABLE_RSS_MQ;
   4967 
   4968 	/*
   4969 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4970 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4971 	 */
   4972 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4973 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4974 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4975 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4976 
   4977 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4978 }
   4979 
   4980 /*
   4981  * Adjust TX and RX queue numbers which the system actulally uses.
   4982  *
   4983  * The numbers are affected by below parameters.
   4984  *     - The nubmer of hardware queues
   4985  *     - The number of MSI-X vectors (= "nvectors" argument)
   4986  *     - ncpu
   4987  */
   4988 static void
   4989 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4990 {
   4991 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4992 
   4993 	if (nvectors < 2) {
   4994 		sc->sc_nqueues = 1;
   4995 		return;
   4996 	}
   4997 
   4998 	switch (sc->sc_type) {
   4999 	case WM_T_82572:
   5000 		hw_ntxqueues = 2;
   5001 		hw_nrxqueues = 2;
   5002 		break;
   5003 	case WM_T_82574:
   5004 		hw_ntxqueues = 2;
   5005 		hw_nrxqueues = 2;
   5006 		break;
   5007 	case WM_T_82575:
   5008 		hw_ntxqueues = 4;
   5009 		hw_nrxqueues = 4;
   5010 		break;
   5011 	case WM_T_82576:
   5012 		hw_ntxqueues = 16;
   5013 		hw_nrxqueues = 16;
   5014 		break;
   5015 	case WM_T_82580:
   5016 	case WM_T_I350:
   5017 	case WM_T_I354:
   5018 		hw_ntxqueues = 8;
   5019 		hw_nrxqueues = 8;
   5020 		break;
   5021 	case WM_T_I210:
   5022 		hw_ntxqueues = 4;
   5023 		hw_nrxqueues = 4;
   5024 		break;
   5025 	case WM_T_I211:
   5026 		hw_ntxqueues = 2;
   5027 		hw_nrxqueues = 2;
   5028 		break;
   5029 		/*
   5030 		 * As below ethernet controllers does not support MSI-X,
   5031 		 * this driver let them not use multiqueue.
   5032 		 *     - WM_T_80003
   5033 		 *     - WM_T_ICH8
   5034 		 *     - WM_T_ICH9
   5035 		 *     - WM_T_ICH10
   5036 		 *     - WM_T_PCH
   5037 		 *     - WM_T_PCH2
   5038 		 *     - WM_T_PCH_LPT
   5039 		 */
   5040 	default:
   5041 		hw_ntxqueues = 1;
   5042 		hw_nrxqueues = 1;
   5043 		break;
   5044 	}
   5045 
   5046 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5047 
   5048 	/*
   5049 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5050 	 * the number of queues used actually.
   5051 	 */
   5052 	if (nvectors < hw_nqueues + 1)
   5053 		sc->sc_nqueues = nvectors - 1;
   5054 	else
   5055 		sc->sc_nqueues = hw_nqueues;
   5056 
   5057 	/*
   5058 	 * As queues more then cpus cannot improve scaling, we limit
   5059 	 * the number of queues used actually.
   5060 	 */
   5061 	if (ncpu < sc->sc_nqueues)
   5062 		sc->sc_nqueues = ncpu;
   5063 }
   5064 
   5065 static inline bool
   5066 wm_is_using_msix(struct wm_softc *sc)
   5067 {
   5068 
   5069 	return (sc->sc_nintrs > 1);
   5070 }
   5071 
   5072 static inline bool
   5073 wm_is_using_multiqueue(struct wm_softc *sc)
   5074 {
   5075 
   5076 	return (sc->sc_nqueues > 1);
   5077 }
   5078 
   5079 static int
   5080 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5081 {
   5082 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5083 	wmq->wmq_id = qidx;
   5084 	wmq->wmq_intr_idx = intr_idx;
   5085 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5086 #ifdef WM_MPSAFE
   5087 	    | SOFTINT_MPSAFE
   5088 #endif
   5089 	    , wm_handle_queue, wmq);
   5090 	if (wmq->wmq_si != NULL)
   5091 		return 0;
   5092 
   5093 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5094 	    wmq->wmq_id);
   5095 
   5096 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5097 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5098 	return ENOMEM;
   5099 }
   5100 
   5101 /*
   5102  * Both single interrupt MSI and INTx can use this function.
   5103  */
   5104 static int
   5105 wm_setup_legacy(struct wm_softc *sc)
   5106 {
   5107 	pci_chipset_tag_t pc = sc->sc_pc;
   5108 	const char *intrstr = NULL;
   5109 	char intrbuf[PCI_INTRSTR_LEN];
   5110 	int error;
   5111 
   5112 	error = wm_alloc_txrx_queues(sc);
   5113 	if (error) {
   5114 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5115 		    error);
   5116 		return ENOMEM;
   5117 	}
   5118 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5119 	    sizeof(intrbuf));
   5120 #ifdef WM_MPSAFE
   5121 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5122 #endif
   5123 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5124 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5125 	if (sc->sc_ihs[0] == NULL) {
   5126 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5127 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5128 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5129 		return ENOMEM;
   5130 	}
   5131 
   5132 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5133 	sc->sc_nintrs = 1;
   5134 
   5135 	return wm_softint_establish(sc, 0, 0);
   5136 }
   5137 
   5138 static int
   5139 wm_setup_msix(struct wm_softc *sc)
   5140 {
   5141 	void *vih;
   5142 	kcpuset_t *affinity;
   5143 	int qidx, error, intr_idx, txrx_established;
   5144 	pci_chipset_tag_t pc = sc->sc_pc;
   5145 	const char *intrstr = NULL;
   5146 	char intrbuf[PCI_INTRSTR_LEN];
   5147 	char intr_xname[INTRDEVNAMEBUF];
   5148 
   5149 	if (sc->sc_nqueues < ncpu) {
   5150 		/*
   5151 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5152 		 * interrupts start from CPU#1.
   5153 		 */
   5154 		sc->sc_affinity_offset = 1;
   5155 	} else {
   5156 		/*
   5157 		 * In this case, this device use all CPUs. So, we unify
   5158 		 * affinitied cpu_index to msix vector number for readability.
   5159 		 */
   5160 		sc->sc_affinity_offset = 0;
   5161 	}
   5162 
   5163 	error = wm_alloc_txrx_queues(sc);
   5164 	if (error) {
   5165 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5166 		    error);
   5167 		return ENOMEM;
   5168 	}
   5169 
   5170 	kcpuset_create(&affinity, false);
   5171 	intr_idx = 0;
   5172 
   5173 	/*
   5174 	 * TX and RX
   5175 	 */
   5176 	txrx_established = 0;
   5177 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5178 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5179 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5180 
   5181 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5182 		    sizeof(intrbuf));
   5183 #ifdef WM_MPSAFE
   5184 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5185 		    PCI_INTR_MPSAFE, true);
   5186 #endif
   5187 		memset(intr_xname, 0, sizeof(intr_xname));
   5188 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5189 		    device_xname(sc->sc_dev), qidx);
   5190 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5191 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5192 		if (vih == NULL) {
   5193 			aprint_error_dev(sc->sc_dev,
   5194 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5195 			    intrstr ? " at " : "",
   5196 			    intrstr ? intrstr : "");
   5197 
   5198 			goto fail;
   5199 		}
   5200 		kcpuset_zero(affinity);
   5201 		/* Round-robin affinity */
   5202 		kcpuset_set(affinity, affinity_to);
   5203 		error = interrupt_distribute(vih, affinity, NULL);
   5204 		if (error == 0) {
   5205 			aprint_normal_dev(sc->sc_dev,
   5206 			    "for TX and RX interrupting at %s affinity to %u\n",
   5207 			    intrstr, affinity_to);
   5208 		} else {
   5209 			aprint_normal_dev(sc->sc_dev,
   5210 			    "for TX and RX interrupting at %s\n", intrstr);
   5211 		}
   5212 		sc->sc_ihs[intr_idx] = vih;
   5213 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5214 			goto fail;
   5215 		txrx_established++;
   5216 		intr_idx++;
   5217 	}
   5218 
   5219 	/*
   5220 	 * LINK
   5221 	 */
   5222 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5223 	    sizeof(intrbuf));
   5224 #ifdef WM_MPSAFE
   5225 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5226 #endif
   5227 	memset(intr_xname, 0, sizeof(intr_xname));
   5228 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5229 	    device_xname(sc->sc_dev));
   5230 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5231 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5232 	if (vih == NULL) {
   5233 		aprint_error_dev(sc->sc_dev,
   5234 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5235 		    intrstr ? " at " : "",
   5236 		    intrstr ? intrstr : "");
   5237 
   5238 		goto fail;
   5239 	}
   5240 	/* keep default affinity to LINK interrupt */
   5241 	aprint_normal_dev(sc->sc_dev,
   5242 	    "for LINK interrupting at %s\n", intrstr);
   5243 	sc->sc_ihs[intr_idx] = vih;
   5244 	sc->sc_link_intr_idx = intr_idx;
   5245 
   5246 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5247 	kcpuset_destroy(affinity);
   5248 	return 0;
   5249 
   5250  fail:
   5251 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5252 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5253 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5254 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5255 	}
   5256 
   5257 	kcpuset_destroy(affinity);
   5258 	return ENOMEM;
   5259 }
   5260 
   5261 static void
   5262 wm_unset_stopping_flags(struct wm_softc *sc)
   5263 {
   5264 	int i;
   5265 
   5266 	KASSERT(WM_CORE_LOCKED(sc));
   5267 
   5268 	/*
   5269 	 * must unset stopping flags in ascending order.
   5270 	 */
   5271 	for (i = 0; i < sc->sc_nqueues; i++) {
   5272 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5273 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5274 
   5275 		mutex_enter(txq->txq_lock);
   5276 		txq->txq_stopping = false;
   5277 		mutex_exit(txq->txq_lock);
   5278 
   5279 		mutex_enter(rxq->rxq_lock);
   5280 		rxq->rxq_stopping = false;
   5281 		mutex_exit(rxq->rxq_lock);
   5282 	}
   5283 
   5284 	sc->sc_core_stopping = false;
   5285 }
   5286 
   5287 static void
   5288 wm_set_stopping_flags(struct wm_softc *sc)
   5289 {
   5290 	int i;
   5291 
   5292 	KASSERT(WM_CORE_LOCKED(sc));
   5293 
   5294 	sc->sc_core_stopping = true;
   5295 
   5296 	/*
   5297 	 * must set stopping flags in ascending order.
   5298 	 */
   5299 	for (i = 0; i < sc->sc_nqueues; i++) {
   5300 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5301 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5302 
   5303 		mutex_enter(rxq->rxq_lock);
   5304 		rxq->rxq_stopping = true;
   5305 		mutex_exit(rxq->rxq_lock);
   5306 
   5307 		mutex_enter(txq->txq_lock);
   5308 		txq->txq_stopping = true;
   5309 		mutex_exit(txq->txq_lock);
   5310 	}
   5311 }
   5312 
   5313 /*
   5314  * write interrupt interval value to ITR or EITR
   5315  */
   5316 static void
   5317 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5318 {
   5319 
   5320 	if (!wmq->wmq_set_itr)
   5321 		return;
   5322 
   5323 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5324 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5325 
   5326 		/*
   5327 		 * 82575 doesn't have CNT_INGR field.
   5328 		 * So, overwrite counter field by software.
   5329 		 */
   5330 		if (sc->sc_type == WM_T_82575)
   5331 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5332 		else
   5333 			eitr |= EITR_CNT_INGR;
   5334 
   5335 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5336 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5337 		/*
   5338 		 * 82574 has both ITR and EITR. SET EITR when we use
   5339 		 * the multi queue function with MSI-X.
   5340 		 */
   5341 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5342 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5343 	} else {
   5344 		KASSERT(wmq->wmq_id == 0);
   5345 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5346 	}
   5347 
   5348 	wmq->wmq_set_itr = false;
   5349 }
   5350 
   5351 /*
   5352  * TODO
   5353  * Below dynamic calculation of itr is almost the same as linux igb,
   5354  * however it does not fit to wm(4). So, we will have been disable AIM
   5355  * until we will find appropriate calculation of itr.
   5356  */
   5357 /*
   5358  * calculate interrupt interval value to be going to write register in
   5359  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5360  */
   5361 static void
   5362 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5363 {
   5364 #ifdef NOTYET
   5365 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5366 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5367 	uint32_t avg_size = 0;
   5368 	uint32_t new_itr;
   5369 
   5370 	if (rxq->rxq_packets)
   5371 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5372 	if (txq->txq_packets)
   5373 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5374 
   5375 	if (avg_size == 0) {
   5376 		new_itr = 450; /* restore default value */
   5377 		goto out;
   5378 	}
   5379 
   5380 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5381 	avg_size += 24;
   5382 
   5383 	/* Don't starve jumbo frames */
   5384 	avg_size = uimin(avg_size, 3000);
   5385 
   5386 	/* Give a little boost to mid-size frames */
   5387 	if ((avg_size > 300) && (avg_size < 1200))
   5388 		new_itr = avg_size / 3;
   5389 	else
   5390 		new_itr = avg_size / 2;
   5391 
   5392 out:
   5393 	/*
   5394 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5395 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5396 	 */
   5397 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5398 		new_itr *= 4;
   5399 
   5400 	if (new_itr != wmq->wmq_itr) {
   5401 		wmq->wmq_itr = new_itr;
   5402 		wmq->wmq_set_itr = true;
   5403 	} else
   5404 		wmq->wmq_set_itr = false;
   5405 
   5406 	rxq->rxq_packets = 0;
   5407 	rxq->rxq_bytes = 0;
   5408 	txq->txq_packets = 0;
   5409 	txq->txq_bytes = 0;
   5410 #endif
   5411 }
   5412 
   5413 /*
   5414  * wm_init:		[ifnet interface function]
   5415  *
   5416  *	Initialize the interface.
   5417  */
   5418 static int
   5419 wm_init(struct ifnet *ifp)
   5420 {
   5421 	struct wm_softc *sc = ifp->if_softc;
   5422 	int ret;
   5423 
   5424 	WM_CORE_LOCK(sc);
   5425 	ret = wm_init_locked(ifp);
   5426 	WM_CORE_UNLOCK(sc);
   5427 
   5428 	return ret;
   5429 }
   5430 
   5431 static int
   5432 wm_init_locked(struct ifnet *ifp)
   5433 {
   5434 	struct wm_softc *sc = ifp->if_softc;
   5435 	int i, j, trynum, error = 0;
   5436 	uint32_t reg;
   5437 
   5438 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5439 		device_xname(sc->sc_dev), __func__));
   5440 	KASSERT(WM_CORE_LOCKED(sc));
   5441 
   5442 	/*
   5443 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5444 	 * There is a small but measurable benefit to avoiding the adjusment
   5445 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5446 	 * on such platforms.  One possibility is that the DMA itself is
   5447 	 * slightly more efficient if the front of the entire packet (instead
   5448 	 * of the front of the headers) is aligned.
   5449 	 *
   5450 	 * Note we must always set align_tweak to 0 if we are using
   5451 	 * jumbo frames.
   5452 	 */
   5453 #ifdef __NO_STRICT_ALIGNMENT
   5454 	sc->sc_align_tweak = 0;
   5455 #else
   5456 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5457 		sc->sc_align_tweak = 0;
   5458 	else
   5459 		sc->sc_align_tweak = 2;
   5460 #endif /* __NO_STRICT_ALIGNMENT */
   5461 
   5462 	/* Cancel any pending I/O. */
   5463 	wm_stop_locked(ifp, 0);
   5464 
   5465 	/* update statistics before reset */
   5466 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5467 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5468 
   5469 	/* PCH_SPT hardware workaround */
   5470 	if (sc->sc_type == WM_T_PCH_SPT)
   5471 		wm_flush_desc_rings(sc);
   5472 
   5473 	/* Reset the chip to a known state. */
   5474 	wm_reset(sc);
   5475 
   5476 	/*
   5477 	 * AMT based hardware can now take control from firmware
   5478 	 * Do this after reset.
   5479 	 */
   5480 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5481 		wm_get_hw_control(sc);
   5482 
   5483 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5484 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5485 		wm_legacy_irq_quirk_spt(sc);
   5486 
   5487 	/* Init hardware bits */
   5488 	wm_initialize_hardware_bits(sc);
   5489 
   5490 	/* Reset the PHY. */
   5491 	if (sc->sc_flags & WM_F_HAS_MII)
   5492 		wm_gmii_reset(sc);
   5493 
   5494 	/* Calculate (E)ITR value */
   5495 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5496 		/*
   5497 		 * For NEWQUEUE's EITR (except for 82575).
   5498 		 * 82575's EITR should be set same throttling value as other
   5499 		 * old controllers' ITR because the interrupt/sec calculation
   5500 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5501 		 *
   5502 		 * 82574's EITR should be set same throttling value as ITR.
   5503 		 *
   5504 		 * For N interrupts/sec, set this value to:
   5505 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5506 		 */
   5507 		sc->sc_itr_init = 450;
   5508 	} else if (sc->sc_type >= WM_T_82543) {
   5509 		/*
   5510 		 * Set up the interrupt throttling register (units of 256ns)
   5511 		 * Note that a footnote in Intel's documentation says this
   5512 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5513 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5514 		 * that that is also true for the 1024ns units of the other
   5515 		 * interrupt-related timer registers -- so, really, we ought
   5516 		 * to divide this value by 4 when the link speed is low.
   5517 		 *
   5518 		 * XXX implement this division at link speed change!
   5519 		 */
   5520 
   5521 		/*
   5522 		 * For N interrupts/sec, set this value to:
   5523 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5524 		 * absolute and packet timer values to this value
   5525 		 * divided by 4 to get "simple timer" behavior.
   5526 		 */
   5527 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5528 	}
   5529 
   5530 	error = wm_init_txrx_queues(sc);
   5531 	if (error)
   5532 		goto out;
   5533 
   5534 	/*
   5535 	 * Clear out the VLAN table -- we don't use it (yet).
   5536 	 */
   5537 	CSR_WRITE(sc, WMREG_VET, 0);
   5538 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5539 		trynum = 10; /* Due to hw errata */
   5540 	else
   5541 		trynum = 1;
   5542 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5543 		for (j = 0; j < trynum; j++)
   5544 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5545 
   5546 	/*
   5547 	 * Set up flow-control parameters.
   5548 	 *
   5549 	 * XXX Values could probably stand some tuning.
   5550 	 */
   5551 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5552 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5553 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5554 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5555 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5556 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5557 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5558 	}
   5559 
   5560 	sc->sc_fcrtl = FCRTL_DFLT;
   5561 	if (sc->sc_type < WM_T_82543) {
   5562 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5563 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5564 	} else {
   5565 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5566 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5567 	}
   5568 
   5569 	if (sc->sc_type == WM_T_80003)
   5570 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5571 	else
   5572 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5573 
   5574 	/* Writes the control register. */
   5575 	wm_set_vlan(sc);
   5576 
   5577 	if (sc->sc_flags & WM_F_HAS_MII) {
   5578 		uint16_t kmreg;
   5579 
   5580 		switch (sc->sc_type) {
   5581 		case WM_T_80003:
   5582 		case WM_T_ICH8:
   5583 		case WM_T_ICH9:
   5584 		case WM_T_ICH10:
   5585 		case WM_T_PCH:
   5586 		case WM_T_PCH2:
   5587 		case WM_T_PCH_LPT:
   5588 		case WM_T_PCH_SPT:
   5589 		case WM_T_PCH_CNP:
   5590 			/*
   5591 			 * Set the mac to wait the maximum time between each
   5592 			 * iteration and increase the max iterations when
   5593 			 * polling the phy; this fixes erroneous timeouts at
   5594 			 * 10Mbps.
   5595 			 */
   5596 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5597 			    0xFFFF);
   5598 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5599 			    &kmreg);
   5600 			kmreg |= 0x3F;
   5601 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5602 			    kmreg);
   5603 			break;
   5604 		default:
   5605 			break;
   5606 		}
   5607 
   5608 		if (sc->sc_type == WM_T_80003) {
   5609 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5610 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5611 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5612 
   5613 			/* Bypass RX and TX FIFO's */
   5614 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5615 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5616 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5617 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5618 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5619 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5620 		}
   5621 	}
   5622 #if 0
   5623 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5624 #endif
   5625 
   5626 	/* Set up checksum offload parameters. */
   5627 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5628 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5629 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5630 		reg |= RXCSUM_IPOFL;
   5631 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5632 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5633 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5634 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5635 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5636 
   5637 	/* Set registers about MSI-X */
   5638 	if (wm_is_using_msix(sc)) {
   5639 		uint32_t ivar;
   5640 		struct wm_queue *wmq;
   5641 		int qid, qintr_idx;
   5642 
   5643 		if (sc->sc_type == WM_T_82575) {
   5644 			/* Interrupt control */
   5645 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5646 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5647 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5648 
   5649 			/* TX and RX */
   5650 			for (i = 0; i < sc->sc_nqueues; i++) {
   5651 				wmq = &sc->sc_queue[i];
   5652 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5653 				    EITR_TX_QUEUE(wmq->wmq_id)
   5654 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5655 			}
   5656 			/* Link status */
   5657 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5658 			    EITR_OTHER);
   5659 		} else if (sc->sc_type == WM_T_82574) {
   5660 			/* Interrupt control */
   5661 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5662 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5663 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5664 
   5665 			/*
   5666 			 * workaround issue with spurious interrupts
   5667 			 * in MSI-X mode.
   5668 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5669 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5670 			 */
   5671 			reg = CSR_READ(sc, WMREG_RFCTL);
   5672 			reg |= WMREG_RFCTL_ACKDIS;
   5673 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5674 
   5675 			ivar = 0;
   5676 			/* TX and RX */
   5677 			for (i = 0; i < sc->sc_nqueues; i++) {
   5678 				wmq = &sc->sc_queue[i];
   5679 				qid = wmq->wmq_id;
   5680 				qintr_idx = wmq->wmq_intr_idx;
   5681 
   5682 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5683 				    IVAR_TX_MASK_Q_82574(qid));
   5684 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5685 				    IVAR_RX_MASK_Q_82574(qid));
   5686 			}
   5687 			/* Link status */
   5688 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5689 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5690 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5691 		} else {
   5692 			/* Interrupt control */
   5693 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5694 			    | GPIE_EIAME | GPIE_PBA);
   5695 
   5696 			switch (sc->sc_type) {
   5697 			case WM_T_82580:
   5698 			case WM_T_I350:
   5699 			case WM_T_I354:
   5700 			case WM_T_I210:
   5701 			case WM_T_I211:
   5702 				/* TX and RX */
   5703 				for (i = 0; i < sc->sc_nqueues; i++) {
   5704 					wmq = &sc->sc_queue[i];
   5705 					qid = wmq->wmq_id;
   5706 					qintr_idx = wmq->wmq_intr_idx;
   5707 
   5708 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5709 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5710 					ivar |= __SHIFTIN((qintr_idx
   5711 						| IVAR_VALID),
   5712 					    IVAR_TX_MASK_Q(qid));
   5713 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5714 					ivar |= __SHIFTIN((qintr_idx
   5715 						| IVAR_VALID),
   5716 					    IVAR_RX_MASK_Q(qid));
   5717 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5718 				}
   5719 				break;
   5720 			case WM_T_82576:
   5721 				/* TX and RX */
   5722 				for (i = 0; i < sc->sc_nqueues; i++) {
   5723 					wmq = &sc->sc_queue[i];
   5724 					qid = wmq->wmq_id;
   5725 					qintr_idx = wmq->wmq_intr_idx;
   5726 
   5727 					ivar = CSR_READ(sc,
   5728 					    WMREG_IVAR_Q_82576(qid));
   5729 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5730 					ivar |= __SHIFTIN((qintr_idx
   5731 						| IVAR_VALID),
   5732 					    IVAR_TX_MASK_Q_82576(qid));
   5733 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5734 					ivar |= __SHIFTIN((qintr_idx
   5735 						| IVAR_VALID),
   5736 					    IVAR_RX_MASK_Q_82576(qid));
   5737 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5738 					    ivar);
   5739 				}
   5740 				break;
   5741 			default:
   5742 				break;
   5743 			}
   5744 
   5745 			/* Link status */
   5746 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5747 			    IVAR_MISC_OTHER);
   5748 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5749 		}
   5750 
   5751 		if (wm_is_using_multiqueue(sc)) {
   5752 			wm_init_rss(sc);
   5753 
   5754 			/*
   5755 			** NOTE: Receive Full-Packet Checksum Offload
   5756 			** is mutually exclusive with Multiqueue. However
   5757 			** this is not the same as TCP/IP checksums which
   5758 			** still work.
   5759 			*/
   5760 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5761 			reg |= RXCSUM_PCSD;
   5762 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5763 		}
   5764 	}
   5765 
   5766 	/* Set up the interrupt registers. */
   5767 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5768 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5769 	    ICR_RXO | ICR_RXT0;
   5770 	if (wm_is_using_msix(sc)) {
   5771 		uint32_t mask;
   5772 		struct wm_queue *wmq;
   5773 
   5774 		switch (sc->sc_type) {
   5775 		case WM_T_82574:
   5776 			mask = 0;
   5777 			for (i = 0; i < sc->sc_nqueues; i++) {
   5778 				wmq = &sc->sc_queue[i];
   5779 				mask |= ICR_TXQ(wmq->wmq_id);
   5780 				mask |= ICR_RXQ(wmq->wmq_id);
   5781 			}
   5782 			mask |= ICR_OTHER;
   5783 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5784 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5785 			break;
   5786 		default:
   5787 			if (sc->sc_type == WM_T_82575) {
   5788 				mask = 0;
   5789 				for (i = 0; i < sc->sc_nqueues; i++) {
   5790 					wmq = &sc->sc_queue[i];
   5791 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5792 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5793 				}
   5794 				mask |= EITR_OTHER;
   5795 			} else {
   5796 				mask = 0;
   5797 				for (i = 0; i < sc->sc_nqueues; i++) {
   5798 					wmq = &sc->sc_queue[i];
   5799 					mask |= 1 << wmq->wmq_intr_idx;
   5800 				}
   5801 				mask |= 1 << sc->sc_link_intr_idx;
   5802 			}
   5803 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5804 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5805 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5806 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5807 			break;
   5808 		}
   5809 	} else
   5810 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5811 
   5812 	/* Set up the inter-packet gap. */
   5813 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5814 
   5815 	if (sc->sc_type >= WM_T_82543) {
   5816 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5817 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5818 			wm_itrs_writereg(sc, wmq);
   5819 		}
   5820 		/*
   5821 		 * Link interrupts occur much less than TX
   5822 		 * interrupts and RX interrupts. So, we don't
   5823 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5824 		 * FreeBSD's if_igb.
   5825 		 */
   5826 	}
   5827 
   5828 	/* Set the VLAN ethernetype. */
   5829 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5830 
   5831 	/*
   5832 	 * Set up the transmit control register; we start out with
   5833 	 * a collision distance suitable for FDX, but update it whe
   5834 	 * we resolve the media type.
   5835 	 */
   5836 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5837 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5838 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5839 	if (sc->sc_type >= WM_T_82571)
   5840 		sc->sc_tctl |= TCTL_MULR;
   5841 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5842 
   5843 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5844 		/* Write TDT after TCTL.EN is set. See the document. */
   5845 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5846 	}
   5847 
   5848 	if (sc->sc_type == WM_T_80003) {
   5849 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5850 		reg &= ~TCTL_EXT_GCEX_MASK;
   5851 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5852 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5853 	}
   5854 
   5855 	/* Set the media. */
   5856 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5857 		goto out;
   5858 
   5859 	/* Configure for OS presence */
   5860 	wm_init_manageability(sc);
   5861 
   5862 	/*
   5863 	 * Set up the receive control register; we actually program the
   5864 	 * register when we set the receive filter. Use multicast address
   5865 	 * offset type 0.
   5866 	 *
   5867 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   5868 	 * don't enable that feature.
   5869 	 */
   5870 	sc->sc_mchash_type = 0;
   5871 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5872 	    | RCTL_MO(sc->sc_mchash_type);
   5873 
   5874 	/*
   5875 	 * 82574 use one buffer extended Rx descriptor.
   5876 	 */
   5877 	if (sc->sc_type == WM_T_82574)
   5878 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5879 
   5880 	/*
   5881 	 * The I350 has a bug where it always strips the CRC whether
   5882 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5883 	 */
   5884 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5885 	    || (sc->sc_type == WM_T_I210))
   5886 		sc->sc_rctl |= RCTL_SECRC;
   5887 
   5888 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5889 	    && (ifp->if_mtu > ETHERMTU)) {
   5890 		sc->sc_rctl |= RCTL_LPE;
   5891 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5892 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5893 	}
   5894 
   5895 	if (MCLBYTES == 2048)
   5896 		sc->sc_rctl |= RCTL_2k;
   5897 	else {
   5898 		if (sc->sc_type >= WM_T_82543) {
   5899 			switch (MCLBYTES) {
   5900 			case 4096:
   5901 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5902 				break;
   5903 			case 8192:
   5904 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5905 				break;
   5906 			case 16384:
   5907 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5908 				break;
   5909 			default:
   5910 				panic("wm_init: MCLBYTES %d unsupported",
   5911 				    MCLBYTES);
   5912 				break;
   5913 			}
   5914 		} else
   5915 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   5916 	}
   5917 
   5918 	/* Enable ECC */
   5919 	switch (sc->sc_type) {
   5920 	case WM_T_82571:
   5921 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5922 		reg |= PBA_ECC_CORR_EN;
   5923 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5924 		break;
   5925 	case WM_T_PCH_LPT:
   5926 	case WM_T_PCH_SPT:
   5927 	case WM_T_PCH_CNP:
   5928 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5929 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5930 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5931 
   5932 		sc->sc_ctrl |= CTRL_MEHE;
   5933 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5934 		break;
   5935 	default:
   5936 		break;
   5937 	}
   5938 
   5939 	/*
   5940 	 * Set the receive filter.
   5941 	 *
   5942 	 * For 82575 and 82576, the RX descriptors must be initialized after
   5943 	 * the setting of RCTL.EN in wm_set_filter()
   5944 	 */
   5945 	wm_set_filter(sc);
   5946 
   5947 	/* On 575 and later set RDT only if RX enabled */
   5948 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5949 		int qidx;
   5950 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5951 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5952 			for (i = 0; i < WM_NRXDESC; i++) {
   5953 				mutex_enter(rxq->rxq_lock);
   5954 				wm_init_rxdesc(rxq, i);
   5955 				mutex_exit(rxq->rxq_lock);
   5956 
   5957 			}
   5958 		}
   5959 	}
   5960 
   5961 	wm_unset_stopping_flags(sc);
   5962 
   5963 	/* Start the one second link check clock. */
   5964 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5965 
   5966 	/* ...all done! */
   5967 	ifp->if_flags |= IFF_RUNNING;
   5968 	ifp->if_flags &= ~IFF_OACTIVE;
   5969 
   5970  out:
   5971 	sc->sc_if_flags = ifp->if_flags;
   5972 	if (error)
   5973 		log(LOG_ERR, "%s: interface not running\n",
   5974 		    device_xname(sc->sc_dev));
   5975 	return error;
   5976 }
   5977 
   5978 /*
   5979  * wm_stop:		[ifnet interface function]
   5980  *
   5981  *	Stop transmission on the interface.
   5982  */
   5983 static void
   5984 wm_stop(struct ifnet *ifp, int disable)
   5985 {
   5986 	struct wm_softc *sc = ifp->if_softc;
   5987 
   5988 	WM_CORE_LOCK(sc);
   5989 	wm_stop_locked(ifp, disable);
   5990 	WM_CORE_UNLOCK(sc);
   5991 }
   5992 
   5993 static void
   5994 wm_stop_locked(struct ifnet *ifp, int disable)
   5995 {
   5996 	struct wm_softc *sc = ifp->if_softc;
   5997 	struct wm_txsoft *txs;
   5998 	int i, qidx;
   5999 
   6000 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6001 		device_xname(sc->sc_dev), __func__));
   6002 	KASSERT(WM_CORE_LOCKED(sc));
   6003 
   6004 	wm_set_stopping_flags(sc);
   6005 
   6006 	/* Stop the one second clock. */
   6007 	callout_stop(&sc->sc_tick_ch);
   6008 
   6009 	/* Stop the 82547 Tx FIFO stall check timer. */
   6010 	if (sc->sc_type == WM_T_82547)
   6011 		callout_stop(&sc->sc_txfifo_ch);
   6012 
   6013 	if (sc->sc_flags & WM_F_HAS_MII) {
   6014 		/* Down the MII. */
   6015 		mii_down(&sc->sc_mii);
   6016 	} else {
   6017 #if 0
   6018 		/* Should we clear PHY's status properly? */
   6019 		wm_reset(sc);
   6020 #endif
   6021 	}
   6022 
   6023 	/* Stop the transmit and receive processes. */
   6024 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6025 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6026 	sc->sc_rctl &= ~RCTL_EN;
   6027 
   6028 	/*
   6029 	 * Clear the interrupt mask to ensure the device cannot assert its
   6030 	 * interrupt line.
   6031 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6032 	 * service any currently pending or shared interrupt.
   6033 	 */
   6034 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6035 	sc->sc_icr = 0;
   6036 	if (wm_is_using_msix(sc)) {
   6037 		if (sc->sc_type != WM_T_82574) {
   6038 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6039 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6040 		} else
   6041 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6042 	}
   6043 
   6044 	/* Release any queued transmit buffers. */
   6045 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6046 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6047 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6048 		mutex_enter(txq->txq_lock);
   6049 		txq->txq_sending = false; /* ensure watchdog disabled */
   6050 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6051 			txs = &txq->txq_soft[i];
   6052 			if (txs->txs_mbuf != NULL) {
   6053 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6054 				m_freem(txs->txs_mbuf);
   6055 				txs->txs_mbuf = NULL;
   6056 			}
   6057 		}
   6058 		mutex_exit(txq->txq_lock);
   6059 	}
   6060 
   6061 	/* Mark the interface as down and cancel the watchdog timer. */
   6062 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6063 
   6064 	if (disable) {
   6065 		for (i = 0; i < sc->sc_nqueues; i++) {
   6066 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6067 			mutex_enter(rxq->rxq_lock);
   6068 			wm_rxdrain(rxq);
   6069 			mutex_exit(rxq->rxq_lock);
   6070 		}
   6071 	}
   6072 
   6073 #if 0 /* notyet */
   6074 	if (sc->sc_type >= WM_T_82544)
   6075 		CSR_WRITE(sc, WMREG_WUC, 0);
   6076 #endif
   6077 }
   6078 
   6079 static void
   6080 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6081 {
   6082 	struct mbuf *m;
   6083 	int i;
   6084 
   6085 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6086 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6087 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6088 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6089 		    m->m_data, m->m_len, m->m_flags);
   6090 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6091 	    i, i == 1 ? "" : "s");
   6092 }
   6093 
   6094 /*
   6095  * wm_82547_txfifo_stall:
   6096  *
   6097  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6098  *	reset the FIFO pointers, and restart packet transmission.
   6099  */
   6100 static void
   6101 wm_82547_txfifo_stall(void *arg)
   6102 {
   6103 	struct wm_softc *sc = arg;
   6104 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6105 
   6106 	mutex_enter(txq->txq_lock);
   6107 
   6108 	if (txq->txq_stopping)
   6109 		goto out;
   6110 
   6111 	if (txq->txq_fifo_stall) {
   6112 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6113 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6114 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6115 			/*
   6116 			 * Packets have drained.  Stop transmitter, reset
   6117 			 * FIFO pointers, restart transmitter, and kick
   6118 			 * the packet queue.
   6119 			 */
   6120 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6121 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6122 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6123 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6124 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6125 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6126 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6127 			CSR_WRITE_FLUSH(sc);
   6128 
   6129 			txq->txq_fifo_head = 0;
   6130 			txq->txq_fifo_stall = 0;
   6131 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6132 		} else {
   6133 			/*
   6134 			 * Still waiting for packets to drain; try again in
   6135 			 * another tick.
   6136 			 */
   6137 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6138 		}
   6139 	}
   6140 
   6141 out:
   6142 	mutex_exit(txq->txq_lock);
   6143 }
   6144 
   6145 /*
   6146  * wm_82547_txfifo_bugchk:
   6147  *
   6148  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6149  *	prevent enqueueing a packet that would wrap around the end
   6150  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6151  *
   6152  *	We do this by checking the amount of space before the end
   6153  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6154  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6155  *	the internal FIFO pointers to the beginning, and restart
   6156  *	transmission on the interface.
   6157  */
   6158 #define	WM_FIFO_HDR		0x10
   6159 #define	WM_82547_PAD_LEN	0x3e0
   6160 static int
   6161 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6162 {
   6163 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6164 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6165 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6166 
   6167 	/* Just return if already stalled. */
   6168 	if (txq->txq_fifo_stall)
   6169 		return 1;
   6170 
   6171 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6172 		/* Stall only occurs in half-duplex mode. */
   6173 		goto send_packet;
   6174 	}
   6175 
   6176 	if (len >= WM_82547_PAD_LEN + space) {
   6177 		txq->txq_fifo_stall = 1;
   6178 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6179 		return 1;
   6180 	}
   6181 
   6182  send_packet:
   6183 	txq->txq_fifo_head += len;
   6184 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6185 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6186 
   6187 	return 0;
   6188 }
   6189 
   6190 static int
   6191 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6192 {
   6193 	int error;
   6194 
   6195 	/*
   6196 	 * Allocate the control data structures, and create and load the
   6197 	 * DMA map for it.
   6198 	 *
   6199 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6200 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6201 	 * both sets within the same 4G segment.
   6202 	 */
   6203 	if (sc->sc_type < WM_T_82544)
   6204 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6205 	else
   6206 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6207 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6208 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6209 	else
   6210 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6211 
   6212 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6213 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6214 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6215 		aprint_error_dev(sc->sc_dev,
   6216 		    "unable to allocate TX control data, error = %d\n",
   6217 		    error);
   6218 		goto fail_0;
   6219 	}
   6220 
   6221 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6222 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6223 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6224 		aprint_error_dev(sc->sc_dev,
   6225 		    "unable to map TX control data, error = %d\n", error);
   6226 		goto fail_1;
   6227 	}
   6228 
   6229 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6230 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6231 		aprint_error_dev(sc->sc_dev,
   6232 		    "unable to create TX control data DMA map, error = %d\n",
   6233 		    error);
   6234 		goto fail_2;
   6235 	}
   6236 
   6237 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6238 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6239 		aprint_error_dev(sc->sc_dev,
   6240 		    "unable to load TX control data DMA map, error = %d\n",
   6241 		    error);
   6242 		goto fail_3;
   6243 	}
   6244 
   6245 	return 0;
   6246 
   6247  fail_3:
   6248 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6249  fail_2:
   6250 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6251 	    WM_TXDESCS_SIZE(txq));
   6252  fail_1:
   6253 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6254  fail_0:
   6255 	return error;
   6256 }
   6257 
   6258 static void
   6259 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6260 {
   6261 
   6262 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6263 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6264 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6265 	    WM_TXDESCS_SIZE(txq));
   6266 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6267 }
   6268 
   6269 static int
   6270 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6271 {
   6272 	int error;
   6273 	size_t rxq_descs_size;
   6274 
   6275 	/*
   6276 	 * Allocate the control data structures, and create and load the
   6277 	 * DMA map for it.
   6278 	 *
   6279 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6280 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6281 	 * both sets within the same 4G segment.
   6282 	 */
   6283 	rxq->rxq_ndesc = WM_NRXDESC;
   6284 	if (sc->sc_type == WM_T_82574)
   6285 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6286 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6287 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6288 	else
   6289 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6290 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6291 
   6292 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6293 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6294 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6295 		aprint_error_dev(sc->sc_dev,
   6296 		    "unable to allocate RX control data, error = %d\n",
   6297 		    error);
   6298 		goto fail_0;
   6299 	}
   6300 
   6301 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6302 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6303 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6304 		aprint_error_dev(sc->sc_dev,
   6305 		    "unable to map RX control data, error = %d\n", error);
   6306 		goto fail_1;
   6307 	}
   6308 
   6309 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6310 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6311 		aprint_error_dev(sc->sc_dev,
   6312 		    "unable to create RX control data DMA map, error = %d\n",
   6313 		    error);
   6314 		goto fail_2;
   6315 	}
   6316 
   6317 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6318 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6319 		aprint_error_dev(sc->sc_dev,
   6320 		    "unable to load RX control data DMA map, error = %d\n",
   6321 		    error);
   6322 		goto fail_3;
   6323 	}
   6324 
   6325 	return 0;
   6326 
   6327  fail_3:
   6328 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6329  fail_2:
   6330 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6331 	    rxq_descs_size);
   6332  fail_1:
   6333 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6334  fail_0:
   6335 	return error;
   6336 }
   6337 
   6338 static void
   6339 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6340 {
   6341 
   6342 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6343 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6344 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6345 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6346 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6347 }
   6348 
   6349 
   6350 static int
   6351 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6352 {
   6353 	int i, error;
   6354 
   6355 	/* Create the transmit buffer DMA maps. */
   6356 	WM_TXQUEUELEN(txq) =
   6357 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6358 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6359 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6360 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6361 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6362 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6363 			aprint_error_dev(sc->sc_dev,
   6364 			    "unable to create Tx DMA map %d, error = %d\n",
   6365 			    i, error);
   6366 			goto fail;
   6367 		}
   6368 	}
   6369 
   6370 	return 0;
   6371 
   6372  fail:
   6373 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6374 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6375 			bus_dmamap_destroy(sc->sc_dmat,
   6376 			    txq->txq_soft[i].txs_dmamap);
   6377 	}
   6378 	return error;
   6379 }
   6380 
   6381 static void
   6382 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6383 {
   6384 	int i;
   6385 
   6386 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6387 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6388 			bus_dmamap_destroy(sc->sc_dmat,
   6389 			    txq->txq_soft[i].txs_dmamap);
   6390 	}
   6391 }
   6392 
   6393 static int
   6394 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6395 {
   6396 	int i, error;
   6397 
   6398 	/* Create the receive buffer DMA maps. */
   6399 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6400 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6401 			    MCLBYTES, 0, 0,
   6402 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6403 			aprint_error_dev(sc->sc_dev,
   6404 			    "unable to create Rx DMA map %d error = %d\n",
   6405 			    i, error);
   6406 			goto fail;
   6407 		}
   6408 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6409 	}
   6410 
   6411 	return 0;
   6412 
   6413  fail:
   6414 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6415 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6416 			bus_dmamap_destroy(sc->sc_dmat,
   6417 			    rxq->rxq_soft[i].rxs_dmamap);
   6418 	}
   6419 	return error;
   6420 }
   6421 
   6422 static void
   6423 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6424 {
   6425 	int i;
   6426 
   6427 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6428 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6429 			bus_dmamap_destroy(sc->sc_dmat,
   6430 			    rxq->rxq_soft[i].rxs_dmamap);
   6431 	}
   6432 }
   6433 
   6434 /*
   6435  * wm_alloc_quques:
   6436  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6437  */
   6438 static int
   6439 wm_alloc_txrx_queues(struct wm_softc *sc)
   6440 {
   6441 	int i, error, tx_done, rx_done;
   6442 
   6443 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6444 	    KM_SLEEP);
   6445 	if (sc->sc_queue == NULL) {
   6446 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6447 		error = ENOMEM;
   6448 		goto fail_0;
   6449 	}
   6450 
   6451 	/*
   6452 	 * For transmission
   6453 	 */
   6454 	error = 0;
   6455 	tx_done = 0;
   6456 	for (i = 0; i < sc->sc_nqueues; i++) {
   6457 #ifdef WM_EVENT_COUNTERS
   6458 		int j;
   6459 		const char *xname;
   6460 #endif
   6461 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6462 		txq->txq_sc = sc;
   6463 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6464 
   6465 		error = wm_alloc_tx_descs(sc, txq);
   6466 		if (error)
   6467 			break;
   6468 		error = wm_alloc_tx_buffer(sc, txq);
   6469 		if (error) {
   6470 			wm_free_tx_descs(sc, txq);
   6471 			break;
   6472 		}
   6473 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6474 		if (txq->txq_interq == NULL) {
   6475 			wm_free_tx_descs(sc, txq);
   6476 			wm_free_tx_buffer(sc, txq);
   6477 			error = ENOMEM;
   6478 			break;
   6479 		}
   6480 
   6481 #ifdef WM_EVENT_COUNTERS
   6482 		xname = device_xname(sc->sc_dev);
   6483 
   6484 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6485 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6486 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6487 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6488 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6489 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6490 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6491 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6492 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6493 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6494 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6495 
   6496 		for (j = 0; j < WM_NTXSEGS; j++) {
   6497 			snprintf(txq->txq_txseg_evcnt_names[j],
   6498 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6499 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6500 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6501 		}
   6502 
   6503 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6504 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6505 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6506 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6507 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6508 #endif /* WM_EVENT_COUNTERS */
   6509 
   6510 		tx_done++;
   6511 	}
   6512 	if (error)
   6513 		goto fail_1;
   6514 
   6515 	/*
   6516 	 * For recieve
   6517 	 */
   6518 	error = 0;
   6519 	rx_done = 0;
   6520 	for (i = 0; i < sc->sc_nqueues; i++) {
   6521 #ifdef WM_EVENT_COUNTERS
   6522 		const char *xname;
   6523 #endif
   6524 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6525 		rxq->rxq_sc = sc;
   6526 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6527 
   6528 		error = wm_alloc_rx_descs(sc, rxq);
   6529 		if (error)
   6530 			break;
   6531 
   6532 		error = wm_alloc_rx_buffer(sc, rxq);
   6533 		if (error) {
   6534 			wm_free_rx_descs(sc, rxq);
   6535 			break;
   6536 		}
   6537 
   6538 #ifdef WM_EVENT_COUNTERS
   6539 		xname = device_xname(sc->sc_dev);
   6540 
   6541 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6542 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6543 
   6544 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6545 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6546 #endif /* WM_EVENT_COUNTERS */
   6547 
   6548 		rx_done++;
   6549 	}
   6550 	if (error)
   6551 		goto fail_2;
   6552 
   6553 	return 0;
   6554 
   6555  fail_2:
   6556 	for (i = 0; i < rx_done; i++) {
   6557 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6558 		wm_free_rx_buffer(sc, rxq);
   6559 		wm_free_rx_descs(sc, rxq);
   6560 		if (rxq->rxq_lock)
   6561 			mutex_obj_free(rxq->rxq_lock);
   6562 	}
   6563  fail_1:
   6564 	for (i = 0; i < tx_done; i++) {
   6565 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6566 		pcq_destroy(txq->txq_interq);
   6567 		wm_free_tx_buffer(sc, txq);
   6568 		wm_free_tx_descs(sc, txq);
   6569 		if (txq->txq_lock)
   6570 			mutex_obj_free(txq->txq_lock);
   6571 	}
   6572 
   6573 	kmem_free(sc->sc_queue,
   6574 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6575  fail_0:
   6576 	return error;
   6577 }
   6578 
   6579 /*
   6580  * wm_free_quques:
   6581  *	Free {tx,rx}descs and {tx,rx} buffers
   6582  */
   6583 static void
   6584 wm_free_txrx_queues(struct wm_softc *sc)
   6585 {
   6586 	int i;
   6587 
   6588 	for (i = 0; i < sc->sc_nqueues; i++) {
   6589 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6590 
   6591 #ifdef WM_EVENT_COUNTERS
   6592 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6593 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6594 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6595 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6596 #endif /* WM_EVENT_COUNTERS */
   6597 
   6598 		wm_free_rx_buffer(sc, rxq);
   6599 		wm_free_rx_descs(sc, rxq);
   6600 		if (rxq->rxq_lock)
   6601 			mutex_obj_free(rxq->rxq_lock);
   6602 	}
   6603 
   6604 	for (i = 0; i < sc->sc_nqueues; i++) {
   6605 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6606 		struct mbuf *m;
   6607 #ifdef WM_EVENT_COUNTERS
   6608 		int j;
   6609 
   6610 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6611 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6612 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6613 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6614 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6615 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6616 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6617 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6618 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6619 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6620 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6621 
   6622 		for (j = 0; j < WM_NTXSEGS; j++)
   6623 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6624 
   6625 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6626 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6627 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6628 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6629 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6630 #endif /* WM_EVENT_COUNTERS */
   6631 
   6632 		/* drain txq_interq */
   6633 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6634 			m_freem(m);
   6635 		pcq_destroy(txq->txq_interq);
   6636 
   6637 		wm_free_tx_buffer(sc, txq);
   6638 		wm_free_tx_descs(sc, txq);
   6639 		if (txq->txq_lock)
   6640 			mutex_obj_free(txq->txq_lock);
   6641 	}
   6642 
   6643 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6644 }
   6645 
   6646 static void
   6647 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6648 {
   6649 
   6650 	KASSERT(mutex_owned(txq->txq_lock));
   6651 
   6652 	/* Initialize the transmit descriptor ring. */
   6653 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6654 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6655 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6656 	txq->txq_free = WM_NTXDESC(txq);
   6657 	txq->txq_next = 0;
   6658 }
   6659 
   6660 static void
   6661 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6662     struct wm_txqueue *txq)
   6663 {
   6664 
   6665 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6666 		device_xname(sc->sc_dev), __func__));
   6667 	KASSERT(mutex_owned(txq->txq_lock));
   6668 
   6669 	if (sc->sc_type < WM_T_82543) {
   6670 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6671 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6672 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6673 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6674 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6675 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6676 	} else {
   6677 		int qid = wmq->wmq_id;
   6678 
   6679 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6680 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6681 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6682 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6683 
   6684 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6685 			/*
   6686 			 * Don't write TDT before TCTL.EN is set.
   6687 			 * See the document.
   6688 			 */
   6689 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6690 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6691 			    | TXDCTL_WTHRESH(0));
   6692 		else {
   6693 			/* XXX should update with AIM? */
   6694 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6695 			if (sc->sc_type >= WM_T_82540) {
   6696 				/* should be same */
   6697 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6698 			}
   6699 
   6700 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6701 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6702 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6703 		}
   6704 	}
   6705 }
   6706 
   6707 static void
   6708 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6709 {
   6710 	int i;
   6711 
   6712 	KASSERT(mutex_owned(txq->txq_lock));
   6713 
   6714 	/* Initialize the transmit job descriptors. */
   6715 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6716 		txq->txq_soft[i].txs_mbuf = NULL;
   6717 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6718 	txq->txq_snext = 0;
   6719 	txq->txq_sdirty = 0;
   6720 }
   6721 
   6722 static void
   6723 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6724     struct wm_txqueue *txq)
   6725 {
   6726 
   6727 	KASSERT(mutex_owned(txq->txq_lock));
   6728 
   6729 	/*
   6730 	 * Set up some register offsets that are different between
   6731 	 * the i82542 and the i82543 and later chips.
   6732 	 */
   6733 	if (sc->sc_type < WM_T_82543)
   6734 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6735 	else
   6736 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6737 
   6738 	wm_init_tx_descs(sc, txq);
   6739 	wm_init_tx_regs(sc, wmq, txq);
   6740 	wm_init_tx_buffer(sc, txq);
   6741 
   6742 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   6743 	txq->txq_sending = false;
   6744 }
   6745 
   6746 static void
   6747 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6748     struct wm_rxqueue *rxq)
   6749 {
   6750 
   6751 	KASSERT(mutex_owned(rxq->rxq_lock));
   6752 
   6753 	/*
   6754 	 * Initialize the receive descriptor and receive job
   6755 	 * descriptor rings.
   6756 	 */
   6757 	if (sc->sc_type < WM_T_82543) {
   6758 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6759 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6760 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6761 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6762 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6763 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6764 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6765 
   6766 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6767 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6768 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6769 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6770 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6771 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6772 	} else {
   6773 		int qid = wmq->wmq_id;
   6774 
   6775 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6776 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6777 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   6778 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6779 
   6780 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6781 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6782 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6783 
   6784 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6785 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6786 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6787 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6788 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6789 			    | RXDCTL_WTHRESH(1));
   6790 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6791 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6792 		} else {
   6793 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6794 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6795 			/* XXX should update with AIM? */
   6796 			CSR_WRITE(sc, WMREG_RDTR,
   6797 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   6798 			/* MUST be same */
   6799 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6800 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6801 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6802 		}
   6803 	}
   6804 }
   6805 
   6806 static int
   6807 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6808 {
   6809 	struct wm_rxsoft *rxs;
   6810 	int error, i;
   6811 
   6812 	KASSERT(mutex_owned(rxq->rxq_lock));
   6813 
   6814 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6815 		rxs = &rxq->rxq_soft[i];
   6816 		if (rxs->rxs_mbuf == NULL) {
   6817 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6818 				log(LOG_ERR, "%s: unable to allocate or map "
   6819 				    "rx buffer %d, error = %d\n",
   6820 				    device_xname(sc->sc_dev), i, error);
   6821 				/*
   6822 				 * XXX Should attempt to run with fewer receive
   6823 				 * XXX buffers instead of just failing.
   6824 				 */
   6825 				wm_rxdrain(rxq);
   6826 				return ENOMEM;
   6827 			}
   6828 		} else {
   6829 			/*
   6830 			 * For 82575 and 82576, the RX descriptors must be
   6831 			 * initialized after the setting of RCTL.EN in
   6832 			 * wm_set_filter()
   6833 			 */
   6834 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6835 				wm_init_rxdesc(rxq, i);
   6836 		}
   6837 	}
   6838 	rxq->rxq_ptr = 0;
   6839 	rxq->rxq_discard = 0;
   6840 	WM_RXCHAIN_RESET(rxq);
   6841 
   6842 	return 0;
   6843 }
   6844 
   6845 static int
   6846 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6847     struct wm_rxqueue *rxq)
   6848 {
   6849 
   6850 	KASSERT(mutex_owned(rxq->rxq_lock));
   6851 
   6852 	/*
   6853 	 * Set up some register offsets that are different between
   6854 	 * the i82542 and the i82543 and later chips.
   6855 	 */
   6856 	if (sc->sc_type < WM_T_82543)
   6857 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6858 	else
   6859 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6860 
   6861 	wm_init_rx_regs(sc, wmq, rxq);
   6862 	return wm_init_rx_buffer(sc, rxq);
   6863 }
   6864 
   6865 /*
   6866  * wm_init_quques:
   6867  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6868  */
   6869 static int
   6870 wm_init_txrx_queues(struct wm_softc *sc)
   6871 {
   6872 	int i, error = 0;
   6873 
   6874 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6875 		device_xname(sc->sc_dev), __func__));
   6876 
   6877 	for (i = 0; i < sc->sc_nqueues; i++) {
   6878 		struct wm_queue *wmq = &sc->sc_queue[i];
   6879 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6880 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6881 
   6882 		/*
   6883 		 * TODO
   6884 		 * Currently, use constant variable instead of AIM.
   6885 		 * Furthermore, the interrupt interval of multiqueue which use
   6886 		 * polling mode is less than default value.
   6887 		 * More tuning and AIM are required.
   6888 		 */
   6889 		if (wm_is_using_multiqueue(sc))
   6890 			wmq->wmq_itr = 50;
   6891 		else
   6892 			wmq->wmq_itr = sc->sc_itr_init;
   6893 		wmq->wmq_set_itr = true;
   6894 
   6895 		mutex_enter(txq->txq_lock);
   6896 		wm_init_tx_queue(sc, wmq, txq);
   6897 		mutex_exit(txq->txq_lock);
   6898 
   6899 		mutex_enter(rxq->rxq_lock);
   6900 		error = wm_init_rx_queue(sc, wmq, rxq);
   6901 		mutex_exit(rxq->rxq_lock);
   6902 		if (error)
   6903 			break;
   6904 	}
   6905 
   6906 	return error;
   6907 }
   6908 
   6909 /*
   6910  * wm_tx_offload:
   6911  *
   6912  *	Set up TCP/IP checksumming parameters for the
   6913  *	specified packet.
   6914  */
   6915 static int
   6916 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6917     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6918 {
   6919 	struct mbuf *m0 = txs->txs_mbuf;
   6920 	struct livengood_tcpip_ctxdesc *t;
   6921 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6922 	uint32_t ipcse;
   6923 	struct ether_header *eh;
   6924 	int offset, iphl;
   6925 	uint8_t fields;
   6926 
   6927 	/*
   6928 	 * XXX It would be nice if the mbuf pkthdr had offset
   6929 	 * fields for the protocol headers.
   6930 	 */
   6931 
   6932 	eh = mtod(m0, struct ether_header *);
   6933 	switch (htons(eh->ether_type)) {
   6934 	case ETHERTYPE_IP:
   6935 	case ETHERTYPE_IPV6:
   6936 		offset = ETHER_HDR_LEN;
   6937 		break;
   6938 
   6939 	case ETHERTYPE_VLAN:
   6940 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6941 		break;
   6942 
   6943 	default:
   6944 		/*
   6945 		 * Don't support this protocol or encapsulation.
   6946 		 */
   6947 		*fieldsp = 0;
   6948 		*cmdp = 0;
   6949 		return 0;
   6950 	}
   6951 
   6952 	if ((m0->m_pkthdr.csum_flags &
   6953 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6954 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6955 	} else
   6956 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   6957 
   6958 	ipcse = offset + iphl - 1;
   6959 
   6960 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6961 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6962 	seg = 0;
   6963 	fields = 0;
   6964 
   6965 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6966 		int hlen = offset + iphl;
   6967 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6968 
   6969 		if (__predict_false(m0->m_len <
   6970 				    (hlen + sizeof(struct tcphdr)))) {
   6971 			/*
   6972 			 * TCP/IP headers are not in the first mbuf; we need
   6973 			 * to do this the slow and painful way. Let's just
   6974 			 * hope this doesn't happen very often.
   6975 			 */
   6976 			struct tcphdr th;
   6977 
   6978 			WM_Q_EVCNT_INCR(txq, tsopain);
   6979 
   6980 			m_copydata(m0, hlen, sizeof(th), &th);
   6981 			if (v4) {
   6982 				struct ip ip;
   6983 
   6984 				m_copydata(m0, offset, sizeof(ip), &ip);
   6985 				ip.ip_len = 0;
   6986 				m_copyback(m0,
   6987 				    offset + offsetof(struct ip, ip_len),
   6988 				    sizeof(ip.ip_len), &ip.ip_len);
   6989 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6990 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6991 			} else {
   6992 				struct ip6_hdr ip6;
   6993 
   6994 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6995 				ip6.ip6_plen = 0;
   6996 				m_copyback(m0,
   6997 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6998 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6999 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7000 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7001 			}
   7002 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7003 			    sizeof(th.th_sum), &th.th_sum);
   7004 
   7005 			hlen += th.th_off << 2;
   7006 		} else {
   7007 			/*
   7008 			 * TCP/IP headers are in the first mbuf; we can do
   7009 			 * this the easy way.
   7010 			 */
   7011 			struct tcphdr *th;
   7012 
   7013 			if (v4) {
   7014 				struct ip *ip =
   7015 				    (void *)(mtod(m0, char *) + offset);
   7016 				th = (void *)(mtod(m0, char *) + hlen);
   7017 
   7018 				ip->ip_len = 0;
   7019 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7020 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7021 			} else {
   7022 				struct ip6_hdr *ip6 =
   7023 				    (void *)(mtod(m0, char *) + offset);
   7024 				th = (void *)(mtod(m0, char *) + hlen);
   7025 
   7026 				ip6->ip6_plen = 0;
   7027 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7028 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7029 			}
   7030 			hlen += th->th_off << 2;
   7031 		}
   7032 
   7033 		if (v4) {
   7034 			WM_Q_EVCNT_INCR(txq, tso);
   7035 			cmdlen |= WTX_TCPIP_CMD_IP;
   7036 		} else {
   7037 			WM_Q_EVCNT_INCR(txq, tso6);
   7038 			ipcse = 0;
   7039 		}
   7040 		cmd |= WTX_TCPIP_CMD_TSE;
   7041 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7042 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7043 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7044 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7045 	}
   7046 
   7047 	/*
   7048 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7049 	 * offload feature, if we load the context descriptor, we
   7050 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7051 	 */
   7052 
   7053 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7054 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7055 	    WTX_TCPIP_IPCSE(ipcse);
   7056 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7057 		WM_Q_EVCNT_INCR(txq, ipsum);
   7058 		fields |= WTX_IXSM;
   7059 	}
   7060 
   7061 	offset += iphl;
   7062 
   7063 	if (m0->m_pkthdr.csum_flags &
   7064 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7065 		WM_Q_EVCNT_INCR(txq, tusum);
   7066 		fields |= WTX_TXSM;
   7067 		tucs = WTX_TCPIP_TUCSS(offset) |
   7068 		    WTX_TCPIP_TUCSO(offset +
   7069 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7070 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7071 	} else if ((m0->m_pkthdr.csum_flags &
   7072 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7073 		WM_Q_EVCNT_INCR(txq, tusum6);
   7074 		fields |= WTX_TXSM;
   7075 		tucs = WTX_TCPIP_TUCSS(offset) |
   7076 		    WTX_TCPIP_TUCSO(offset +
   7077 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7078 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7079 	} else {
   7080 		/* Just initialize it to a valid TCP context. */
   7081 		tucs = WTX_TCPIP_TUCSS(offset) |
   7082 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7083 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7084 	}
   7085 
   7086 	/*
   7087 	 * We don't have to write context descriptor for every packet
   7088 	 * except for 82574. For 82574, we must write context descriptor
   7089 	 * for every packet when we use two descriptor queues.
   7090 	 * It would be overhead to write context descriptor for every packet,
   7091 	 * however it does not cause problems.
   7092 	 */
   7093 	/* Fill in the context descriptor. */
   7094 	t = (struct livengood_tcpip_ctxdesc *)
   7095 	    &txq->txq_descs[txq->txq_next];
   7096 	t->tcpip_ipcs = htole32(ipcs);
   7097 	t->tcpip_tucs = htole32(tucs);
   7098 	t->tcpip_cmdlen = htole32(cmdlen);
   7099 	t->tcpip_seg = htole32(seg);
   7100 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7101 
   7102 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7103 	txs->txs_ndesc++;
   7104 
   7105 	*cmdp = cmd;
   7106 	*fieldsp = fields;
   7107 
   7108 	return 0;
   7109 }
   7110 
   7111 static inline int
   7112 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7113 {
   7114 	struct wm_softc *sc = ifp->if_softc;
   7115 	u_int cpuid = cpu_index(curcpu());
   7116 
   7117 	/*
   7118 	 * Currently, simple distribute strategy.
   7119 	 * TODO:
   7120 	 * distribute by flowid(RSS has value).
   7121 	 */
   7122 	return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   7123 }
   7124 
   7125 /*
   7126  * wm_start:		[ifnet interface function]
   7127  *
   7128  *	Start packet transmission on the interface.
   7129  */
   7130 static void
   7131 wm_start(struct ifnet *ifp)
   7132 {
   7133 	struct wm_softc *sc = ifp->if_softc;
   7134 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7135 
   7136 #ifdef WM_MPSAFE
   7137 	KASSERT(if_is_mpsafe(ifp));
   7138 #endif
   7139 	/*
   7140 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7141 	 */
   7142 
   7143 	mutex_enter(txq->txq_lock);
   7144 	if (!txq->txq_stopping)
   7145 		wm_start_locked(ifp);
   7146 	mutex_exit(txq->txq_lock);
   7147 }
   7148 
   7149 static void
   7150 wm_start_locked(struct ifnet *ifp)
   7151 {
   7152 	struct wm_softc *sc = ifp->if_softc;
   7153 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7154 
   7155 	wm_send_common_locked(ifp, txq, false);
   7156 }
   7157 
   7158 static int
   7159 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7160 {
   7161 	int qid;
   7162 	struct wm_softc *sc = ifp->if_softc;
   7163 	struct wm_txqueue *txq;
   7164 
   7165 	qid = wm_select_txqueue(ifp, m);
   7166 	txq = &sc->sc_queue[qid].wmq_txq;
   7167 
   7168 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7169 		m_freem(m);
   7170 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7171 		return ENOBUFS;
   7172 	}
   7173 
   7174 	/*
   7175 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7176 	 */
   7177 	ifp->if_obytes += m->m_pkthdr.len;
   7178 	if (m->m_flags & M_MCAST)
   7179 		ifp->if_omcasts++;
   7180 
   7181 	if (mutex_tryenter(txq->txq_lock)) {
   7182 		if (!txq->txq_stopping)
   7183 			wm_transmit_locked(ifp, txq);
   7184 		mutex_exit(txq->txq_lock);
   7185 	}
   7186 
   7187 	return 0;
   7188 }
   7189 
   7190 static void
   7191 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7192 {
   7193 
   7194 	wm_send_common_locked(ifp, txq, true);
   7195 }
   7196 
   7197 static void
   7198 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7199     bool is_transmit)
   7200 {
   7201 	struct wm_softc *sc = ifp->if_softc;
   7202 	struct mbuf *m0;
   7203 	struct wm_txsoft *txs;
   7204 	bus_dmamap_t dmamap;
   7205 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7206 	bus_addr_t curaddr;
   7207 	bus_size_t seglen, curlen;
   7208 	uint32_t cksumcmd;
   7209 	uint8_t cksumfields;
   7210 	bool remap = true;
   7211 
   7212 	KASSERT(mutex_owned(txq->txq_lock));
   7213 
   7214 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7215 		return;
   7216 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7217 		return;
   7218 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7219 		return;
   7220 
   7221 	/* Remember the previous number of free descriptors. */
   7222 	ofree = txq->txq_free;
   7223 
   7224 	/*
   7225 	 * Loop through the send queue, setting up transmit descriptors
   7226 	 * until we drain the queue, or use up all available transmit
   7227 	 * descriptors.
   7228 	 */
   7229 	for (;;) {
   7230 		m0 = NULL;
   7231 
   7232 		/* Get a work queue entry. */
   7233 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7234 			wm_txeof(txq, UINT_MAX);
   7235 			if (txq->txq_sfree == 0) {
   7236 				DPRINTF(WM_DEBUG_TX,
   7237 				    ("%s: TX: no free job descriptors\n",
   7238 					device_xname(sc->sc_dev)));
   7239 				WM_Q_EVCNT_INCR(txq, txsstall);
   7240 				break;
   7241 			}
   7242 		}
   7243 
   7244 		/* Grab a packet off the queue. */
   7245 		if (is_transmit)
   7246 			m0 = pcq_get(txq->txq_interq);
   7247 		else
   7248 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7249 		if (m0 == NULL)
   7250 			break;
   7251 
   7252 		DPRINTF(WM_DEBUG_TX,
   7253 		    ("%s: TX: have packet to transmit: %p\n",
   7254 			device_xname(sc->sc_dev), m0));
   7255 
   7256 		txs = &txq->txq_soft[txq->txq_snext];
   7257 		dmamap = txs->txs_dmamap;
   7258 
   7259 		use_tso = (m0->m_pkthdr.csum_flags &
   7260 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7261 
   7262 		/*
   7263 		 * So says the Linux driver:
   7264 		 * The controller does a simple calculation to make sure
   7265 		 * there is enough room in the FIFO before initiating the
   7266 		 * DMA for each buffer. The calc is:
   7267 		 *	4 = ceil(buffer len / MSS)
   7268 		 * To make sure we don't overrun the FIFO, adjust the max
   7269 		 * buffer len if the MSS drops.
   7270 		 */
   7271 		dmamap->dm_maxsegsz =
   7272 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7273 		    ? m0->m_pkthdr.segsz << 2
   7274 		    : WTX_MAX_LEN;
   7275 
   7276 		/*
   7277 		 * Load the DMA map.  If this fails, the packet either
   7278 		 * didn't fit in the allotted number of segments, or we
   7279 		 * were short on resources.  For the too-many-segments
   7280 		 * case, we simply report an error and drop the packet,
   7281 		 * since we can't sanely copy a jumbo packet to a single
   7282 		 * buffer.
   7283 		 */
   7284 retry:
   7285 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7286 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7287 		if (__predict_false(error)) {
   7288 			if (error == EFBIG) {
   7289 				if (remap == true) {
   7290 					struct mbuf *m;
   7291 
   7292 					remap = false;
   7293 					m = m_defrag(m0, M_NOWAIT);
   7294 					if (m != NULL) {
   7295 						WM_Q_EVCNT_INCR(txq, defrag);
   7296 						m0 = m;
   7297 						goto retry;
   7298 					}
   7299 				}
   7300 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7301 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7302 				    "DMA segments, dropping...\n",
   7303 				    device_xname(sc->sc_dev));
   7304 				wm_dump_mbuf_chain(sc, m0);
   7305 				m_freem(m0);
   7306 				continue;
   7307 			}
   7308 			/*  Short on resources, just stop for now. */
   7309 			DPRINTF(WM_DEBUG_TX,
   7310 			    ("%s: TX: dmamap load failed: %d\n",
   7311 				device_xname(sc->sc_dev), error));
   7312 			break;
   7313 		}
   7314 
   7315 		segs_needed = dmamap->dm_nsegs;
   7316 		if (use_tso) {
   7317 			/* For sentinel descriptor; see below. */
   7318 			segs_needed++;
   7319 		}
   7320 
   7321 		/*
   7322 		 * Ensure we have enough descriptors free to describe
   7323 		 * the packet. Note, we always reserve one descriptor
   7324 		 * at the end of the ring due to the semantics of the
   7325 		 * TDT register, plus one more in the event we need
   7326 		 * to load offload context.
   7327 		 */
   7328 		if (segs_needed > txq->txq_free - 2) {
   7329 			/*
   7330 			 * Not enough free descriptors to transmit this
   7331 			 * packet.  We haven't committed anything yet,
   7332 			 * so just unload the DMA map, put the packet
   7333 			 * pack on the queue, and punt. Notify the upper
   7334 			 * layer that there are no more slots left.
   7335 			 */
   7336 			DPRINTF(WM_DEBUG_TX,
   7337 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7338 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7339 				segs_needed, txq->txq_free - 1));
   7340 			if (!is_transmit)
   7341 				ifp->if_flags |= IFF_OACTIVE;
   7342 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7343 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7344 			WM_Q_EVCNT_INCR(txq, txdstall);
   7345 			break;
   7346 		}
   7347 
   7348 		/*
   7349 		 * Check for 82547 Tx FIFO bug. We need to do this
   7350 		 * once we know we can transmit the packet, since we
   7351 		 * do some internal FIFO space accounting here.
   7352 		 */
   7353 		if (sc->sc_type == WM_T_82547 &&
   7354 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7355 			DPRINTF(WM_DEBUG_TX,
   7356 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7357 				device_xname(sc->sc_dev)));
   7358 			if (!is_transmit)
   7359 				ifp->if_flags |= IFF_OACTIVE;
   7360 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7361 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7362 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7363 			break;
   7364 		}
   7365 
   7366 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7367 
   7368 		DPRINTF(WM_DEBUG_TX,
   7369 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7370 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7371 
   7372 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7373 
   7374 		/*
   7375 		 * Store a pointer to the packet so that we can free it
   7376 		 * later.
   7377 		 *
   7378 		 * Initially, we consider the number of descriptors the
   7379 		 * packet uses the number of DMA segments.  This may be
   7380 		 * incremented by 1 if we do checksum offload (a descriptor
   7381 		 * is used to set the checksum context).
   7382 		 */
   7383 		txs->txs_mbuf = m0;
   7384 		txs->txs_firstdesc = txq->txq_next;
   7385 		txs->txs_ndesc = segs_needed;
   7386 
   7387 		/* Set up offload parameters for this packet. */
   7388 		if (m0->m_pkthdr.csum_flags &
   7389 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7390 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7391 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7392 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7393 					  &cksumfields) != 0) {
   7394 				/* Error message already displayed. */
   7395 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7396 				continue;
   7397 			}
   7398 		} else {
   7399 			cksumcmd = 0;
   7400 			cksumfields = 0;
   7401 		}
   7402 
   7403 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7404 
   7405 		/* Sync the DMA map. */
   7406 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7407 		    BUS_DMASYNC_PREWRITE);
   7408 
   7409 		/* Initialize the transmit descriptor. */
   7410 		for (nexttx = txq->txq_next, seg = 0;
   7411 		     seg < dmamap->dm_nsegs; seg++) {
   7412 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7413 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7414 			     seglen != 0;
   7415 			     curaddr += curlen, seglen -= curlen,
   7416 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7417 				curlen = seglen;
   7418 
   7419 				/*
   7420 				 * So says the Linux driver:
   7421 				 * Work around for premature descriptor
   7422 				 * write-backs in TSO mode.  Append a
   7423 				 * 4-byte sentinel descriptor.
   7424 				 */
   7425 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7426 				    curlen > 8)
   7427 					curlen -= 4;
   7428 
   7429 				wm_set_dma_addr(
   7430 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7431 				txq->txq_descs[nexttx].wtx_cmdlen
   7432 				    = htole32(cksumcmd | curlen);
   7433 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7434 				    = 0;
   7435 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7436 				    = cksumfields;
   7437 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7438 				lasttx = nexttx;
   7439 
   7440 				DPRINTF(WM_DEBUG_TX,
   7441 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7442 					"len %#04zx\n",
   7443 					device_xname(sc->sc_dev), nexttx,
   7444 					(uint64_t)curaddr, curlen));
   7445 			}
   7446 		}
   7447 
   7448 		KASSERT(lasttx != -1);
   7449 
   7450 		/*
   7451 		 * Set up the command byte on the last descriptor of
   7452 		 * the packet. If we're in the interrupt delay window,
   7453 		 * delay the interrupt.
   7454 		 */
   7455 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7456 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7457 
   7458 		/*
   7459 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7460 		 * up the descriptor to encapsulate the packet for us.
   7461 		 *
   7462 		 * This is only valid on the last descriptor of the packet.
   7463 		 */
   7464 		if (vlan_has_tag(m0)) {
   7465 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7466 			    htole32(WTX_CMD_VLE);
   7467 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7468 			    = htole16(vlan_get_tag(m0));
   7469 		}
   7470 
   7471 		txs->txs_lastdesc = lasttx;
   7472 
   7473 		DPRINTF(WM_DEBUG_TX,
   7474 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7475 			device_xname(sc->sc_dev),
   7476 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7477 
   7478 		/* Sync the descriptors we're using. */
   7479 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7480 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7481 
   7482 		/* Give the packet to the chip. */
   7483 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7484 
   7485 		DPRINTF(WM_DEBUG_TX,
   7486 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7487 
   7488 		DPRINTF(WM_DEBUG_TX,
   7489 		    ("%s: TX: finished transmitting packet, job %d\n",
   7490 			device_xname(sc->sc_dev), txq->txq_snext));
   7491 
   7492 		/* Advance the tx pointer. */
   7493 		txq->txq_free -= txs->txs_ndesc;
   7494 		txq->txq_next = nexttx;
   7495 
   7496 		txq->txq_sfree--;
   7497 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7498 
   7499 		/* Pass the packet to any BPF listeners. */
   7500 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7501 	}
   7502 
   7503 	if (m0 != NULL) {
   7504 		if (!is_transmit)
   7505 			ifp->if_flags |= IFF_OACTIVE;
   7506 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7507 		WM_Q_EVCNT_INCR(txq, descdrop);
   7508 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7509 			__func__));
   7510 		m_freem(m0);
   7511 	}
   7512 
   7513 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7514 		/* No more slots; notify upper layer. */
   7515 		if (!is_transmit)
   7516 			ifp->if_flags |= IFF_OACTIVE;
   7517 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7518 	}
   7519 
   7520 	if (txq->txq_free != ofree) {
   7521 		/* Set a watchdog timer in case the chip flakes out. */
   7522 		txq->txq_lastsent = time_uptime;
   7523 		txq->txq_sending = true;
   7524 	}
   7525 }
   7526 
   7527 /*
   7528  * wm_nq_tx_offload:
   7529  *
   7530  *	Set up TCP/IP checksumming parameters for the
   7531  *	specified packet, for NEWQUEUE devices
   7532  */
   7533 static int
   7534 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7535     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7536 {
   7537 	struct mbuf *m0 = txs->txs_mbuf;
   7538 	uint32_t vl_len, mssidx, cmdc;
   7539 	struct ether_header *eh;
   7540 	int offset, iphl;
   7541 
   7542 	/*
   7543 	 * XXX It would be nice if the mbuf pkthdr had offset
   7544 	 * fields for the protocol headers.
   7545 	 */
   7546 	*cmdlenp = 0;
   7547 	*fieldsp = 0;
   7548 
   7549 	eh = mtod(m0, struct ether_header *);
   7550 	switch (htons(eh->ether_type)) {
   7551 	case ETHERTYPE_IP:
   7552 	case ETHERTYPE_IPV6:
   7553 		offset = ETHER_HDR_LEN;
   7554 		break;
   7555 
   7556 	case ETHERTYPE_VLAN:
   7557 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7558 		break;
   7559 
   7560 	default:
   7561 		/* Don't support this protocol or encapsulation. */
   7562 		*do_csum = false;
   7563 		return 0;
   7564 	}
   7565 	*do_csum = true;
   7566 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7567 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7568 
   7569 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7570 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7571 
   7572 	if ((m0->m_pkthdr.csum_flags &
   7573 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7574 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7575 	} else {
   7576 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7577 	}
   7578 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7579 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7580 
   7581 	if (vlan_has_tag(m0)) {
   7582 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7583 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7584 		*cmdlenp |= NQTX_CMD_VLE;
   7585 	}
   7586 
   7587 	mssidx = 0;
   7588 
   7589 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7590 		int hlen = offset + iphl;
   7591 		int tcp_hlen;
   7592 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7593 
   7594 		if (__predict_false(m0->m_len <
   7595 				    (hlen + sizeof(struct tcphdr)))) {
   7596 			/*
   7597 			 * TCP/IP headers are not in the first mbuf; we need
   7598 			 * to do this the slow and painful way. Let's just
   7599 			 * hope this doesn't happen very often.
   7600 			 */
   7601 			struct tcphdr th;
   7602 
   7603 			WM_Q_EVCNT_INCR(txq, tsopain);
   7604 
   7605 			m_copydata(m0, hlen, sizeof(th), &th);
   7606 			if (v4) {
   7607 				struct ip ip;
   7608 
   7609 				m_copydata(m0, offset, sizeof(ip), &ip);
   7610 				ip.ip_len = 0;
   7611 				m_copyback(m0,
   7612 				    offset + offsetof(struct ip, ip_len),
   7613 				    sizeof(ip.ip_len), &ip.ip_len);
   7614 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7615 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7616 			} else {
   7617 				struct ip6_hdr ip6;
   7618 
   7619 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7620 				ip6.ip6_plen = 0;
   7621 				m_copyback(m0,
   7622 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7623 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7624 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7625 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7626 			}
   7627 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7628 			    sizeof(th.th_sum), &th.th_sum);
   7629 
   7630 			tcp_hlen = th.th_off << 2;
   7631 		} else {
   7632 			/*
   7633 			 * TCP/IP headers are in the first mbuf; we can do
   7634 			 * this the easy way.
   7635 			 */
   7636 			struct tcphdr *th;
   7637 
   7638 			if (v4) {
   7639 				struct ip *ip =
   7640 				    (void *)(mtod(m0, char *) + offset);
   7641 				th = (void *)(mtod(m0, char *) + hlen);
   7642 
   7643 				ip->ip_len = 0;
   7644 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7645 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7646 			} else {
   7647 				struct ip6_hdr *ip6 =
   7648 				    (void *)(mtod(m0, char *) + offset);
   7649 				th = (void *)(mtod(m0, char *) + hlen);
   7650 
   7651 				ip6->ip6_plen = 0;
   7652 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7653 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7654 			}
   7655 			tcp_hlen = th->th_off << 2;
   7656 		}
   7657 		hlen += tcp_hlen;
   7658 		*cmdlenp |= NQTX_CMD_TSE;
   7659 
   7660 		if (v4) {
   7661 			WM_Q_EVCNT_INCR(txq, tso);
   7662 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7663 		} else {
   7664 			WM_Q_EVCNT_INCR(txq, tso6);
   7665 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7666 		}
   7667 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7668 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7669 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7670 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7671 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7672 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7673 	} else {
   7674 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7675 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7676 	}
   7677 
   7678 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7679 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7680 		cmdc |= NQTXC_CMD_IP4;
   7681 	}
   7682 
   7683 	if (m0->m_pkthdr.csum_flags &
   7684 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7685 		WM_Q_EVCNT_INCR(txq, tusum);
   7686 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   7687 			cmdc |= NQTXC_CMD_TCP;
   7688 		else
   7689 			cmdc |= NQTXC_CMD_UDP;
   7690 
   7691 		cmdc |= NQTXC_CMD_IP4;
   7692 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7693 	}
   7694 	if (m0->m_pkthdr.csum_flags &
   7695 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7696 		WM_Q_EVCNT_INCR(txq, tusum6);
   7697 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   7698 			cmdc |= NQTXC_CMD_TCP;
   7699 		else
   7700 			cmdc |= NQTXC_CMD_UDP;
   7701 
   7702 		cmdc |= NQTXC_CMD_IP6;
   7703 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7704 	}
   7705 
   7706 	/*
   7707 	 * We don't have to write context descriptor for every packet to
   7708 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7709 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7710 	 * controllers.
   7711 	 * It would be overhead to write context descriptor for every packet,
   7712 	 * however it does not cause problems.
   7713 	 */
   7714 	/* Fill in the context descriptor. */
   7715 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7716 	    htole32(vl_len);
   7717 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7718 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7719 	    htole32(cmdc);
   7720 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7721 	    htole32(mssidx);
   7722 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7723 	DPRINTF(WM_DEBUG_TX,
   7724 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7725 		txq->txq_next, 0, vl_len));
   7726 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7727 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7728 	txs->txs_ndesc++;
   7729 	return 0;
   7730 }
   7731 
   7732 /*
   7733  * wm_nq_start:		[ifnet interface function]
   7734  *
   7735  *	Start packet transmission on the interface for NEWQUEUE devices
   7736  */
   7737 static void
   7738 wm_nq_start(struct ifnet *ifp)
   7739 {
   7740 	struct wm_softc *sc = ifp->if_softc;
   7741 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7742 
   7743 #ifdef WM_MPSAFE
   7744 	KASSERT(if_is_mpsafe(ifp));
   7745 #endif
   7746 	/*
   7747 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7748 	 */
   7749 
   7750 	mutex_enter(txq->txq_lock);
   7751 	if (!txq->txq_stopping)
   7752 		wm_nq_start_locked(ifp);
   7753 	mutex_exit(txq->txq_lock);
   7754 }
   7755 
   7756 static void
   7757 wm_nq_start_locked(struct ifnet *ifp)
   7758 {
   7759 	struct wm_softc *sc = ifp->if_softc;
   7760 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7761 
   7762 	wm_nq_send_common_locked(ifp, txq, false);
   7763 }
   7764 
   7765 static int
   7766 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7767 {
   7768 	int qid;
   7769 	struct wm_softc *sc = ifp->if_softc;
   7770 	struct wm_txqueue *txq;
   7771 
   7772 	qid = wm_select_txqueue(ifp, m);
   7773 	txq = &sc->sc_queue[qid].wmq_txq;
   7774 
   7775 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7776 		m_freem(m);
   7777 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7778 		return ENOBUFS;
   7779 	}
   7780 
   7781 	/*
   7782 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7783 	 */
   7784 	ifp->if_obytes += m->m_pkthdr.len;
   7785 	if (m->m_flags & M_MCAST)
   7786 		ifp->if_omcasts++;
   7787 
   7788 	/*
   7789 	 * The situations which this mutex_tryenter() fails at running time
   7790 	 * are below two patterns.
   7791 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7792 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7793 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7794 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7795 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   7796 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   7797 	 * stuck, either.
   7798 	 */
   7799 	if (mutex_tryenter(txq->txq_lock)) {
   7800 		if (!txq->txq_stopping)
   7801 			wm_nq_transmit_locked(ifp, txq);
   7802 		mutex_exit(txq->txq_lock);
   7803 	}
   7804 
   7805 	return 0;
   7806 }
   7807 
   7808 static void
   7809 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7810 {
   7811 
   7812 	wm_nq_send_common_locked(ifp, txq, true);
   7813 }
   7814 
   7815 static void
   7816 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7817     bool is_transmit)
   7818 {
   7819 	struct wm_softc *sc = ifp->if_softc;
   7820 	struct mbuf *m0;
   7821 	struct wm_txsoft *txs;
   7822 	bus_dmamap_t dmamap;
   7823 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7824 	bool do_csum, sent;
   7825 	bool remap = true;
   7826 
   7827 	KASSERT(mutex_owned(txq->txq_lock));
   7828 
   7829 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7830 		return;
   7831 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7832 		return;
   7833 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7834 		return;
   7835 
   7836 	sent = false;
   7837 
   7838 	/*
   7839 	 * Loop through the send queue, setting up transmit descriptors
   7840 	 * until we drain the queue, or use up all available transmit
   7841 	 * descriptors.
   7842 	 */
   7843 	for (;;) {
   7844 		m0 = NULL;
   7845 
   7846 		/* Get a work queue entry. */
   7847 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7848 			wm_txeof(txq, UINT_MAX);
   7849 			if (txq->txq_sfree == 0) {
   7850 				DPRINTF(WM_DEBUG_TX,
   7851 				    ("%s: TX: no free job descriptors\n",
   7852 					device_xname(sc->sc_dev)));
   7853 				WM_Q_EVCNT_INCR(txq, txsstall);
   7854 				break;
   7855 			}
   7856 		}
   7857 
   7858 		/* Grab a packet off the queue. */
   7859 		if (is_transmit)
   7860 			m0 = pcq_get(txq->txq_interq);
   7861 		else
   7862 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7863 		if (m0 == NULL)
   7864 			break;
   7865 
   7866 		DPRINTF(WM_DEBUG_TX,
   7867 		    ("%s: TX: have packet to transmit: %p\n",
   7868 		    device_xname(sc->sc_dev), m0));
   7869 
   7870 		txs = &txq->txq_soft[txq->txq_snext];
   7871 		dmamap = txs->txs_dmamap;
   7872 
   7873 		/*
   7874 		 * Load the DMA map.  If this fails, the packet either
   7875 		 * didn't fit in the allotted number of segments, or we
   7876 		 * were short on resources.  For the too-many-segments
   7877 		 * case, we simply report an error and drop the packet,
   7878 		 * since we can't sanely copy a jumbo packet to a single
   7879 		 * buffer.
   7880 		 */
   7881 retry:
   7882 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7883 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7884 		if (__predict_false(error)) {
   7885 			if (error == EFBIG) {
   7886 				if (remap == true) {
   7887 					struct mbuf *m;
   7888 
   7889 					remap = false;
   7890 					m = m_defrag(m0, M_NOWAIT);
   7891 					if (m != NULL) {
   7892 						WM_Q_EVCNT_INCR(txq, defrag);
   7893 						m0 = m;
   7894 						goto retry;
   7895 					}
   7896 				}
   7897 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7898 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7899 				    "DMA segments, dropping...\n",
   7900 				    device_xname(sc->sc_dev));
   7901 				wm_dump_mbuf_chain(sc, m0);
   7902 				m_freem(m0);
   7903 				continue;
   7904 			}
   7905 			/* Short on resources, just stop for now. */
   7906 			DPRINTF(WM_DEBUG_TX,
   7907 			    ("%s: TX: dmamap load failed: %d\n",
   7908 				device_xname(sc->sc_dev), error));
   7909 			break;
   7910 		}
   7911 
   7912 		segs_needed = dmamap->dm_nsegs;
   7913 
   7914 		/*
   7915 		 * Ensure we have enough descriptors free to describe
   7916 		 * the packet. Note, we always reserve one descriptor
   7917 		 * at the end of the ring due to the semantics of the
   7918 		 * TDT register, plus one more in the event we need
   7919 		 * to load offload context.
   7920 		 */
   7921 		if (segs_needed > txq->txq_free - 2) {
   7922 			/*
   7923 			 * Not enough free descriptors to transmit this
   7924 			 * packet.  We haven't committed anything yet,
   7925 			 * so just unload the DMA map, put the packet
   7926 			 * pack on the queue, and punt. Notify the upper
   7927 			 * layer that there are no more slots left.
   7928 			 */
   7929 			DPRINTF(WM_DEBUG_TX,
   7930 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7931 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7932 				segs_needed, txq->txq_free - 1));
   7933 			if (!is_transmit)
   7934 				ifp->if_flags |= IFF_OACTIVE;
   7935 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7936 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7937 			WM_Q_EVCNT_INCR(txq, txdstall);
   7938 			break;
   7939 		}
   7940 
   7941 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7942 
   7943 		DPRINTF(WM_DEBUG_TX,
   7944 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7945 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7946 
   7947 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7948 
   7949 		/*
   7950 		 * Store a pointer to the packet so that we can free it
   7951 		 * later.
   7952 		 *
   7953 		 * Initially, we consider the number of descriptors the
   7954 		 * packet uses the number of DMA segments.  This may be
   7955 		 * incremented by 1 if we do checksum offload (a descriptor
   7956 		 * is used to set the checksum context).
   7957 		 */
   7958 		txs->txs_mbuf = m0;
   7959 		txs->txs_firstdesc = txq->txq_next;
   7960 		txs->txs_ndesc = segs_needed;
   7961 
   7962 		/* Set up offload parameters for this packet. */
   7963 		uint32_t cmdlen, fields, dcmdlen;
   7964 		if (m0->m_pkthdr.csum_flags &
   7965 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7966 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7967 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7968 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7969 			    &do_csum) != 0) {
   7970 				/* Error message already displayed. */
   7971 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7972 				continue;
   7973 			}
   7974 		} else {
   7975 			do_csum = false;
   7976 			cmdlen = 0;
   7977 			fields = 0;
   7978 		}
   7979 
   7980 		/* Sync the DMA map. */
   7981 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7982 		    BUS_DMASYNC_PREWRITE);
   7983 
   7984 		/* Initialize the first transmit descriptor. */
   7985 		nexttx = txq->txq_next;
   7986 		if (!do_csum) {
   7987 			/* setup a legacy descriptor */
   7988 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7989 			    dmamap->dm_segs[0].ds_addr);
   7990 			txq->txq_descs[nexttx].wtx_cmdlen =
   7991 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7992 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7993 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7994 			if (vlan_has_tag(m0)) {
   7995 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7996 				    htole32(WTX_CMD_VLE);
   7997 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7998 				    htole16(vlan_get_tag(m0));
   7999 			} else
   8000 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8001 
   8002 			dcmdlen = 0;
   8003 		} else {
   8004 			/* setup an advanced data descriptor */
   8005 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8006 			    htole64(dmamap->dm_segs[0].ds_addr);
   8007 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8008 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8009 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   8010 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8011 			    htole32(fields);
   8012 			DPRINTF(WM_DEBUG_TX,
   8013 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8014 				device_xname(sc->sc_dev), nexttx,
   8015 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8016 			DPRINTF(WM_DEBUG_TX,
   8017 			    ("\t 0x%08x%08x\n", fields,
   8018 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8019 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8020 		}
   8021 
   8022 		lasttx = nexttx;
   8023 		nexttx = WM_NEXTTX(txq, nexttx);
   8024 		/*
   8025 		 * fill in the next descriptors. legacy or advanced format
   8026 		 * is the same here
   8027 		 */
   8028 		for (seg = 1; seg < dmamap->dm_nsegs;
   8029 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8030 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8031 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8032 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8033 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8034 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8035 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8036 			lasttx = nexttx;
   8037 
   8038 			DPRINTF(WM_DEBUG_TX,
   8039 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8040 				device_xname(sc->sc_dev), nexttx,
   8041 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8042 				dmamap->dm_segs[seg].ds_len));
   8043 		}
   8044 
   8045 		KASSERT(lasttx != -1);
   8046 
   8047 		/*
   8048 		 * Set up the command byte on the last descriptor of
   8049 		 * the packet. If we're in the interrupt delay window,
   8050 		 * delay the interrupt.
   8051 		 */
   8052 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8053 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8054 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8055 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8056 
   8057 		txs->txs_lastdesc = lasttx;
   8058 
   8059 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8060 		    device_xname(sc->sc_dev),
   8061 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8062 
   8063 		/* Sync the descriptors we're using. */
   8064 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8065 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8066 
   8067 		/* Give the packet to the chip. */
   8068 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8069 		sent = true;
   8070 
   8071 		DPRINTF(WM_DEBUG_TX,
   8072 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8073 
   8074 		DPRINTF(WM_DEBUG_TX,
   8075 		    ("%s: TX: finished transmitting packet, job %d\n",
   8076 			device_xname(sc->sc_dev), txq->txq_snext));
   8077 
   8078 		/* Advance the tx pointer. */
   8079 		txq->txq_free -= txs->txs_ndesc;
   8080 		txq->txq_next = nexttx;
   8081 
   8082 		txq->txq_sfree--;
   8083 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8084 
   8085 		/* Pass the packet to any BPF listeners. */
   8086 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8087 	}
   8088 
   8089 	if (m0 != NULL) {
   8090 		if (!is_transmit)
   8091 			ifp->if_flags |= IFF_OACTIVE;
   8092 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8093 		WM_Q_EVCNT_INCR(txq, descdrop);
   8094 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8095 			__func__));
   8096 		m_freem(m0);
   8097 	}
   8098 
   8099 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8100 		/* No more slots; notify upper layer. */
   8101 		if (!is_transmit)
   8102 			ifp->if_flags |= IFF_OACTIVE;
   8103 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8104 	}
   8105 
   8106 	if (sent) {
   8107 		/* Set a watchdog timer in case the chip flakes out. */
   8108 		txq->txq_lastsent = time_uptime;
   8109 		txq->txq_sending = true;
   8110 	}
   8111 }
   8112 
   8113 static void
   8114 wm_deferred_start_locked(struct wm_txqueue *txq)
   8115 {
   8116 	struct wm_softc *sc = txq->txq_sc;
   8117 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8118 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8119 	int qid = wmq->wmq_id;
   8120 
   8121 	KASSERT(mutex_owned(txq->txq_lock));
   8122 
   8123 	if (txq->txq_stopping) {
   8124 		mutex_exit(txq->txq_lock);
   8125 		return;
   8126 	}
   8127 
   8128 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8129 		/* XXX need for ALTQ or one CPU system */
   8130 		if (qid == 0)
   8131 			wm_nq_start_locked(ifp);
   8132 		wm_nq_transmit_locked(ifp, txq);
   8133 	} else {
   8134 		/* XXX need for ALTQ or one CPU system */
   8135 		if (qid == 0)
   8136 			wm_start_locked(ifp);
   8137 		wm_transmit_locked(ifp, txq);
   8138 	}
   8139 }
   8140 
   8141 /* Interrupt */
   8142 
   8143 /*
   8144  * wm_txeof:
   8145  *
   8146  *	Helper; handle transmit interrupts.
   8147  */
   8148 static bool
   8149 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8150 {
   8151 	struct wm_softc *sc = txq->txq_sc;
   8152 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8153 	struct wm_txsoft *txs;
   8154 	int count = 0;
   8155 	int i;
   8156 	uint8_t status;
   8157 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8158 	bool more = false;
   8159 
   8160 	KASSERT(mutex_owned(txq->txq_lock));
   8161 
   8162 	if (txq->txq_stopping)
   8163 		return false;
   8164 
   8165 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8166 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8167 	if (wmq->wmq_id == 0)
   8168 		ifp->if_flags &= ~IFF_OACTIVE;
   8169 
   8170 	/*
   8171 	 * Go through the Tx list and free mbufs for those
   8172 	 * frames which have been transmitted.
   8173 	 */
   8174 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8175 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8176 		if (limit-- == 0) {
   8177 			more = true;
   8178 			DPRINTF(WM_DEBUG_TX,
   8179 			    ("%s: TX: loop limited, job %d is not processed\n",
   8180 				device_xname(sc->sc_dev), i));
   8181 			break;
   8182 		}
   8183 
   8184 		txs = &txq->txq_soft[i];
   8185 
   8186 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8187 			device_xname(sc->sc_dev), i));
   8188 
   8189 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8190 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8191 
   8192 		status =
   8193 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8194 		if ((status & WTX_ST_DD) == 0) {
   8195 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8196 			    BUS_DMASYNC_PREREAD);
   8197 			break;
   8198 		}
   8199 
   8200 		count++;
   8201 		DPRINTF(WM_DEBUG_TX,
   8202 		    ("%s: TX: job %d done: descs %d..%d\n",
   8203 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8204 		    txs->txs_lastdesc));
   8205 
   8206 		/*
   8207 		 * XXX We should probably be using the statistics
   8208 		 * XXX registers, but I don't know if they exist
   8209 		 * XXX on chips before the i82544.
   8210 		 */
   8211 
   8212 #ifdef WM_EVENT_COUNTERS
   8213 		if (status & WTX_ST_TU)
   8214 			WM_Q_EVCNT_INCR(txq, underrun);
   8215 #endif /* WM_EVENT_COUNTERS */
   8216 
   8217 		/*
   8218 		 * 82574 and newer's document says the status field has neither
   8219 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8220 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8221 		 * Developer's Manual", 82574 datasheet and newer.
   8222 		 *
   8223 		 * XXX I saw the LC bit was set on I218 even though the media
   8224 		 * was full duplex, so the bit might be used for other
   8225 		 * meaning ...(I have no document).
   8226 		 */
   8227 
   8228 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8229 		    && ((sc->sc_type < WM_T_82574)
   8230 			|| (sc->sc_type == WM_T_80003))) {
   8231 			ifp->if_oerrors++;
   8232 			if (status & WTX_ST_LC)
   8233 				log(LOG_WARNING, "%s: late collision\n",
   8234 				    device_xname(sc->sc_dev));
   8235 			else if (status & WTX_ST_EC) {
   8236 				ifp->if_collisions +=
   8237 				    TX_COLLISION_THRESHOLD + 1;
   8238 				log(LOG_WARNING, "%s: excessive collisions\n",
   8239 				    device_xname(sc->sc_dev));
   8240 			}
   8241 		} else
   8242 			ifp->if_opackets++;
   8243 
   8244 		txq->txq_packets++;
   8245 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8246 
   8247 		txq->txq_free += txs->txs_ndesc;
   8248 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8249 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8250 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8251 		m_freem(txs->txs_mbuf);
   8252 		txs->txs_mbuf = NULL;
   8253 	}
   8254 
   8255 	/* Update the dirty transmit buffer pointer. */
   8256 	txq->txq_sdirty = i;
   8257 	DPRINTF(WM_DEBUG_TX,
   8258 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8259 
   8260 	if (count != 0)
   8261 		rnd_add_uint32(&sc->rnd_source, count);
   8262 
   8263 	/*
   8264 	 * If there are no more pending transmissions, cancel the watchdog
   8265 	 * timer.
   8266 	 */
   8267 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8268 		txq->txq_sending = false;
   8269 
   8270 	return more;
   8271 }
   8272 
   8273 static inline uint32_t
   8274 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8275 {
   8276 	struct wm_softc *sc = rxq->rxq_sc;
   8277 
   8278 	if (sc->sc_type == WM_T_82574)
   8279 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8280 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8281 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8282 	else
   8283 		return rxq->rxq_descs[idx].wrx_status;
   8284 }
   8285 
   8286 static inline uint32_t
   8287 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8288 {
   8289 	struct wm_softc *sc = rxq->rxq_sc;
   8290 
   8291 	if (sc->sc_type == WM_T_82574)
   8292 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8293 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8294 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8295 	else
   8296 		return rxq->rxq_descs[idx].wrx_errors;
   8297 }
   8298 
   8299 static inline uint16_t
   8300 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8301 {
   8302 	struct wm_softc *sc = rxq->rxq_sc;
   8303 
   8304 	if (sc->sc_type == WM_T_82574)
   8305 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8306 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8307 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8308 	else
   8309 		return rxq->rxq_descs[idx].wrx_special;
   8310 }
   8311 
   8312 static inline int
   8313 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8314 {
   8315 	struct wm_softc *sc = rxq->rxq_sc;
   8316 
   8317 	if (sc->sc_type == WM_T_82574)
   8318 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8319 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8320 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8321 	else
   8322 		return rxq->rxq_descs[idx].wrx_len;
   8323 }
   8324 
   8325 #ifdef WM_DEBUG
   8326 static inline uint32_t
   8327 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8328 {
   8329 	struct wm_softc *sc = rxq->rxq_sc;
   8330 
   8331 	if (sc->sc_type == WM_T_82574)
   8332 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8333 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8334 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8335 	else
   8336 		return 0;
   8337 }
   8338 
   8339 static inline uint8_t
   8340 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8341 {
   8342 	struct wm_softc *sc = rxq->rxq_sc;
   8343 
   8344 	if (sc->sc_type == WM_T_82574)
   8345 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8346 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8347 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8348 	else
   8349 		return 0;
   8350 }
   8351 #endif /* WM_DEBUG */
   8352 
   8353 static inline bool
   8354 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8355     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8356 {
   8357 
   8358 	if (sc->sc_type == WM_T_82574)
   8359 		return (status & ext_bit) != 0;
   8360 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8361 		return (status & nq_bit) != 0;
   8362 	else
   8363 		return (status & legacy_bit) != 0;
   8364 }
   8365 
   8366 static inline bool
   8367 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8368     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8369 {
   8370 
   8371 	if (sc->sc_type == WM_T_82574)
   8372 		return (error & ext_bit) != 0;
   8373 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8374 		return (error & nq_bit) != 0;
   8375 	else
   8376 		return (error & legacy_bit) != 0;
   8377 }
   8378 
   8379 static inline bool
   8380 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8381 {
   8382 
   8383 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8384 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8385 		return true;
   8386 	else
   8387 		return false;
   8388 }
   8389 
   8390 static inline bool
   8391 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8392 {
   8393 	struct wm_softc *sc = rxq->rxq_sc;
   8394 
   8395 	/* XXXX missing error bit for newqueue? */
   8396 	if (wm_rxdesc_is_set_error(sc, errors,
   8397 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8398 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8399 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8400 		NQRXC_ERROR_RXE)) {
   8401 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8402 		    EXTRXC_ERROR_SE, 0))
   8403 			log(LOG_WARNING, "%s: symbol error\n",
   8404 			    device_xname(sc->sc_dev));
   8405 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8406 		    EXTRXC_ERROR_SEQ, 0))
   8407 			log(LOG_WARNING, "%s: receive sequence error\n",
   8408 			    device_xname(sc->sc_dev));
   8409 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8410 		    EXTRXC_ERROR_CE, 0))
   8411 			log(LOG_WARNING, "%s: CRC error\n",
   8412 			    device_xname(sc->sc_dev));
   8413 		return true;
   8414 	}
   8415 
   8416 	return false;
   8417 }
   8418 
   8419 static inline bool
   8420 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8421 {
   8422 	struct wm_softc *sc = rxq->rxq_sc;
   8423 
   8424 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8425 		NQRXC_STATUS_DD)) {
   8426 		/* We have processed all of the receive descriptors. */
   8427 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8428 		return false;
   8429 	}
   8430 
   8431 	return true;
   8432 }
   8433 
   8434 static inline bool
   8435 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8436     uint16_t vlantag, struct mbuf *m)
   8437 {
   8438 
   8439 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8440 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8441 		vlan_set_tag(m, le16toh(vlantag));
   8442 	}
   8443 
   8444 	return true;
   8445 }
   8446 
   8447 static inline void
   8448 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8449     uint32_t errors, struct mbuf *m)
   8450 {
   8451 	struct wm_softc *sc = rxq->rxq_sc;
   8452 
   8453 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8454 		if (wm_rxdesc_is_set_status(sc, status,
   8455 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8456 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8457 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8458 			if (wm_rxdesc_is_set_error(sc, errors,
   8459 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8460 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8461 		}
   8462 		if (wm_rxdesc_is_set_status(sc, status,
   8463 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8464 			/*
   8465 			 * Note: we don't know if this was TCP or UDP,
   8466 			 * so we just set both bits, and expect the
   8467 			 * upper layers to deal.
   8468 			 */
   8469 			WM_Q_EVCNT_INCR(rxq, tusum);
   8470 			m->m_pkthdr.csum_flags |=
   8471 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8472 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8473 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8474 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8475 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8476 		}
   8477 	}
   8478 }
   8479 
   8480 /*
   8481  * wm_rxeof:
   8482  *
   8483  *	Helper; handle receive interrupts.
   8484  */
   8485 static bool
   8486 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8487 {
   8488 	struct wm_softc *sc = rxq->rxq_sc;
   8489 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8490 	struct wm_rxsoft *rxs;
   8491 	struct mbuf *m;
   8492 	int i, len;
   8493 	int count = 0;
   8494 	uint32_t status, errors;
   8495 	uint16_t vlantag;
   8496 	bool more = false;
   8497 
   8498 	KASSERT(mutex_owned(rxq->rxq_lock));
   8499 
   8500 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8501 		if (limit-- == 0) {
   8502 			rxq->rxq_ptr = i;
   8503 			more = true;
   8504 			DPRINTF(WM_DEBUG_RX,
   8505 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8506 				device_xname(sc->sc_dev), i));
   8507 			break;
   8508 		}
   8509 
   8510 		rxs = &rxq->rxq_soft[i];
   8511 
   8512 		DPRINTF(WM_DEBUG_RX,
   8513 		    ("%s: RX: checking descriptor %d\n",
   8514 			device_xname(sc->sc_dev), i));
   8515 		wm_cdrxsync(rxq, i,
   8516 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8517 
   8518 		status = wm_rxdesc_get_status(rxq, i);
   8519 		errors = wm_rxdesc_get_errors(rxq, i);
   8520 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8521 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8522 #ifdef WM_DEBUG
   8523 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8524 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8525 #endif
   8526 
   8527 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8528 			/*
   8529 			 * Update the receive pointer holding rxq_lock
   8530 			 * consistent with increment counter.
   8531 			 */
   8532 			rxq->rxq_ptr = i;
   8533 			break;
   8534 		}
   8535 
   8536 		count++;
   8537 		if (__predict_false(rxq->rxq_discard)) {
   8538 			DPRINTF(WM_DEBUG_RX,
   8539 			    ("%s: RX: discarding contents of descriptor %d\n",
   8540 				device_xname(sc->sc_dev), i));
   8541 			wm_init_rxdesc(rxq, i);
   8542 			if (wm_rxdesc_is_eop(rxq, status)) {
   8543 				/* Reset our state. */
   8544 				DPRINTF(WM_DEBUG_RX,
   8545 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8546 					device_xname(sc->sc_dev)));
   8547 				rxq->rxq_discard = 0;
   8548 			}
   8549 			continue;
   8550 		}
   8551 
   8552 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8553 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8554 
   8555 		m = rxs->rxs_mbuf;
   8556 
   8557 		/*
   8558 		 * Add a new receive buffer to the ring, unless of
   8559 		 * course the length is zero. Treat the latter as a
   8560 		 * failed mapping.
   8561 		 */
   8562 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8563 			/*
   8564 			 * Failed, throw away what we've done so
   8565 			 * far, and discard the rest of the packet.
   8566 			 */
   8567 			ifp->if_ierrors++;
   8568 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8569 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8570 			wm_init_rxdesc(rxq, i);
   8571 			if (!wm_rxdesc_is_eop(rxq, status))
   8572 				rxq->rxq_discard = 1;
   8573 			if (rxq->rxq_head != NULL)
   8574 				m_freem(rxq->rxq_head);
   8575 			WM_RXCHAIN_RESET(rxq);
   8576 			DPRINTF(WM_DEBUG_RX,
   8577 			    ("%s: RX: Rx buffer allocation failed, "
   8578 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8579 				rxq->rxq_discard ? " (discard)" : ""));
   8580 			continue;
   8581 		}
   8582 
   8583 		m->m_len = len;
   8584 		rxq->rxq_len += len;
   8585 		DPRINTF(WM_DEBUG_RX,
   8586 		    ("%s: RX: buffer at %p len %d\n",
   8587 			device_xname(sc->sc_dev), m->m_data, len));
   8588 
   8589 		/* If this is not the end of the packet, keep looking. */
   8590 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8591 			WM_RXCHAIN_LINK(rxq, m);
   8592 			DPRINTF(WM_DEBUG_RX,
   8593 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8594 				device_xname(sc->sc_dev), rxq->rxq_len));
   8595 			continue;
   8596 		}
   8597 
   8598 		/*
   8599 		 * Okay, we have the entire packet now. The chip is
   8600 		 * configured to include the FCS except I350 and I21[01]
   8601 		 * (not all chips can be configured to strip it),
   8602 		 * so we need to trim it.
   8603 		 * May need to adjust length of previous mbuf in the
   8604 		 * chain if the current mbuf is too short.
   8605 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8606 		 * is always set in I350, so we don't trim it.
   8607 		 */
   8608 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8609 		    && (sc->sc_type != WM_T_I210)
   8610 		    && (sc->sc_type != WM_T_I211)) {
   8611 			if (m->m_len < ETHER_CRC_LEN) {
   8612 				rxq->rxq_tail->m_len
   8613 				    -= (ETHER_CRC_LEN - m->m_len);
   8614 				m->m_len = 0;
   8615 			} else
   8616 				m->m_len -= ETHER_CRC_LEN;
   8617 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8618 		} else
   8619 			len = rxq->rxq_len;
   8620 
   8621 		WM_RXCHAIN_LINK(rxq, m);
   8622 
   8623 		*rxq->rxq_tailp = NULL;
   8624 		m = rxq->rxq_head;
   8625 
   8626 		WM_RXCHAIN_RESET(rxq);
   8627 
   8628 		DPRINTF(WM_DEBUG_RX,
   8629 		    ("%s: RX: have entire packet, len -> %d\n",
   8630 			device_xname(sc->sc_dev), len));
   8631 
   8632 		/* If an error occurred, update stats and drop the packet. */
   8633 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8634 			m_freem(m);
   8635 			continue;
   8636 		}
   8637 
   8638 		/* No errors.  Receive the packet. */
   8639 		m_set_rcvif(m, ifp);
   8640 		m->m_pkthdr.len = len;
   8641 		/*
   8642 		 * TODO
   8643 		 * should be save rsshash and rsstype to this mbuf.
   8644 		 */
   8645 		DPRINTF(WM_DEBUG_RX,
   8646 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8647 			device_xname(sc->sc_dev), rsstype, rsshash));
   8648 
   8649 		/*
   8650 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8651 		 * for us.  Associate the tag with the packet.
   8652 		 */
   8653 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8654 			continue;
   8655 
   8656 		/* Set up checksum info for this packet. */
   8657 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8658 		/*
   8659 		 * Update the receive pointer holding rxq_lock consistent with
   8660 		 * increment counter.
   8661 		 */
   8662 		rxq->rxq_ptr = i;
   8663 		rxq->rxq_packets++;
   8664 		rxq->rxq_bytes += len;
   8665 		mutex_exit(rxq->rxq_lock);
   8666 
   8667 		/* Pass it on. */
   8668 		if_percpuq_enqueue(sc->sc_ipq, m);
   8669 
   8670 		mutex_enter(rxq->rxq_lock);
   8671 
   8672 		if (rxq->rxq_stopping)
   8673 			break;
   8674 	}
   8675 
   8676 	if (count != 0)
   8677 		rnd_add_uint32(&sc->rnd_source, count);
   8678 
   8679 	DPRINTF(WM_DEBUG_RX,
   8680 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8681 
   8682 	return more;
   8683 }
   8684 
   8685 /*
   8686  * wm_linkintr_gmii:
   8687  *
   8688  *	Helper; handle link interrupts for GMII.
   8689  */
   8690 static void
   8691 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8692 {
   8693 
   8694 	KASSERT(WM_CORE_LOCKED(sc));
   8695 
   8696 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8697 		__func__));
   8698 
   8699 	if (icr & ICR_LSC) {
   8700 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8701 		uint32_t reg;
   8702 		bool link;
   8703 
   8704 		link = status & STATUS_LU;
   8705 		if (link) {
   8706 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8707 				device_xname(sc->sc_dev),
   8708 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8709 		} else {
   8710 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8711 				device_xname(sc->sc_dev)));
   8712 		}
   8713 		if ((sc->sc_type == WM_T_ICH8) && (link == false))
   8714 			wm_gig_downshift_workaround_ich8lan(sc);
   8715 
   8716 		if ((sc->sc_type == WM_T_ICH8)
   8717 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8718 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8719 		}
   8720 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8721 			device_xname(sc->sc_dev)));
   8722 		mii_pollstat(&sc->sc_mii);
   8723 		if (sc->sc_type == WM_T_82543) {
   8724 			int miistatus, active;
   8725 
   8726 			/*
   8727 			 * With 82543, we need to force speed and
   8728 			 * duplex on the MAC equal to what the PHY
   8729 			 * speed and duplex configuration is.
   8730 			 */
   8731 			miistatus = sc->sc_mii.mii_media_status;
   8732 
   8733 			if (miistatus & IFM_ACTIVE) {
   8734 				active = sc->sc_mii.mii_media_active;
   8735 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8736 				switch (IFM_SUBTYPE(active)) {
   8737 				case IFM_10_T:
   8738 					sc->sc_ctrl |= CTRL_SPEED_10;
   8739 					break;
   8740 				case IFM_100_TX:
   8741 					sc->sc_ctrl |= CTRL_SPEED_100;
   8742 					break;
   8743 				case IFM_1000_T:
   8744 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8745 					break;
   8746 				default:
   8747 					/*
   8748 					 * fiber?
   8749 					 * Shoud not enter here.
   8750 					 */
   8751 					printf("unknown media (%x)\n", active);
   8752 					break;
   8753 				}
   8754 				if (active & IFM_FDX)
   8755 					sc->sc_ctrl |= CTRL_FD;
   8756 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8757 			}
   8758 		} else if (sc->sc_type == WM_T_PCH) {
   8759 			wm_k1_gig_workaround_hv(sc,
   8760 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8761 		}
   8762 
   8763 		if ((sc->sc_phytype == WMPHY_82578)
   8764 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8765 			== IFM_1000_T)) {
   8766 
   8767 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8768 				delay(200*1000); /* XXX too big */
   8769 
   8770 				/* Link stall fix for link up */
   8771 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8772 				    HV_MUX_DATA_CTRL,
   8773 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8774 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8775 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8776 				    HV_MUX_DATA_CTRL,
   8777 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8778 			}
   8779 		}
   8780 		/*
   8781 		 * I217 Packet Loss issue:
   8782 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8783 		 * on power up.
   8784 		 * Set the Beacon Duration for I217 to 8 usec
   8785 		 */
   8786 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8787 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8788 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8789 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8790 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8791 		}
   8792 
   8793 		/* Work-around I218 hang issue */
   8794 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   8795 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   8796 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   8797 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   8798 			wm_k1_workaround_lpt_lp(sc, link);
   8799 
   8800 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8801 			/*
   8802 			 * Set platform power management values for Latency
   8803 			 * Tolerance Reporting (LTR)
   8804 			 */
   8805 			wm_platform_pm_pch_lpt(sc,
   8806 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8807 		}
   8808 
   8809 		/* FEXTNVM6 K1-off workaround */
   8810 		if (sc->sc_type == WM_T_PCH_SPT) {
   8811 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8812 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8813 			    & FEXTNVM6_K1_OFF_ENABLE)
   8814 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8815 			else
   8816 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8817 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8818 		}
   8819 	} else if (icr & ICR_RXSEQ) {
   8820 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8821 			device_xname(sc->sc_dev)));
   8822 	}
   8823 }
   8824 
   8825 /*
   8826  * wm_linkintr_tbi:
   8827  *
   8828  *	Helper; handle link interrupts for TBI mode.
   8829  */
   8830 static void
   8831 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8832 {
   8833 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8834 	uint32_t status;
   8835 
   8836 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8837 		__func__));
   8838 
   8839 	status = CSR_READ(sc, WMREG_STATUS);
   8840 	if (icr & ICR_LSC) {
   8841 		wm_check_for_link(sc);
   8842 		if (status & STATUS_LU) {
   8843 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8844 				device_xname(sc->sc_dev),
   8845 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8846 			/*
   8847 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8848 			 * so we should update sc->sc_ctrl
   8849 			 */
   8850 
   8851 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8852 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8853 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8854 			if (status & STATUS_FD)
   8855 				sc->sc_tctl |=
   8856 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8857 			else
   8858 				sc->sc_tctl |=
   8859 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8860 			if (sc->sc_ctrl & CTRL_TFCE)
   8861 				sc->sc_fcrtl |= FCRTL_XONE;
   8862 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8863 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8864 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   8865 			sc->sc_tbi_linkup = 1;
   8866 			if_link_state_change(ifp, LINK_STATE_UP);
   8867 		} else {
   8868 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8869 				device_xname(sc->sc_dev)));
   8870 			sc->sc_tbi_linkup = 0;
   8871 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8872 		}
   8873 		/* Update LED */
   8874 		wm_tbi_serdes_set_linkled(sc);
   8875 	} else if (icr & ICR_RXSEQ) {
   8876 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   8877 			device_xname(sc->sc_dev)));
   8878 	}
   8879 }
   8880 
   8881 /*
   8882  * wm_linkintr_serdes:
   8883  *
   8884  *	Helper; handle link interrupts for TBI mode.
   8885  */
   8886 static void
   8887 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8888 {
   8889 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8890 	struct mii_data *mii = &sc->sc_mii;
   8891 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8892 	uint32_t pcs_adv, pcs_lpab, reg;
   8893 
   8894 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8895 		__func__));
   8896 
   8897 	if (icr & ICR_LSC) {
   8898 		/* Check PCS */
   8899 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8900 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8901 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8902 				device_xname(sc->sc_dev)));
   8903 			mii->mii_media_status |= IFM_ACTIVE;
   8904 			sc->sc_tbi_linkup = 1;
   8905 			if_link_state_change(ifp, LINK_STATE_UP);
   8906 		} else {
   8907 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8908 				device_xname(sc->sc_dev)));
   8909 			mii->mii_media_status |= IFM_NONE;
   8910 			sc->sc_tbi_linkup = 0;
   8911 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8912 			wm_tbi_serdes_set_linkled(sc);
   8913 			return;
   8914 		}
   8915 		mii->mii_media_active |= IFM_1000_SX;
   8916 		if ((reg & PCS_LSTS_FDX) != 0)
   8917 			mii->mii_media_active |= IFM_FDX;
   8918 		else
   8919 			mii->mii_media_active |= IFM_HDX;
   8920 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8921 			/* Check flow */
   8922 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8923 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8924 				DPRINTF(WM_DEBUG_LINK,
   8925 				    ("XXX LINKOK but not ACOMP\n"));
   8926 				return;
   8927 			}
   8928 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8929 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8930 			DPRINTF(WM_DEBUG_LINK,
   8931 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8932 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8933 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8934 				mii->mii_media_active |= IFM_FLOW
   8935 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8936 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8937 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8938 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8939 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8940 				mii->mii_media_active |= IFM_FLOW
   8941 				    | IFM_ETH_TXPAUSE;
   8942 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8943 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8944 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8945 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8946 				mii->mii_media_active |= IFM_FLOW
   8947 				    | IFM_ETH_RXPAUSE;
   8948 		}
   8949 		/* Update LED */
   8950 		wm_tbi_serdes_set_linkled(sc);
   8951 	} else {
   8952 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   8953 		    device_xname(sc->sc_dev)));
   8954 	}
   8955 }
   8956 
   8957 /*
   8958  * wm_linkintr:
   8959  *
   8960  *	Helper; handle link interrupts.
   8961  */
   8962 static void
   8963 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8964 {
   8965 
   8966 	KASSERT(WM_CORE_LOCKED(sc));
   8967 
   8968 	if (sc->sc_flags & WM_F_HAS_MII)
   8969 		wm_linkintr_gmii(sc, icr);
   8970 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8971 	    && (sc->sc_type >= WM_T_82575))
   8972 		wm_linkintr_serdes(sc, icr);
   8973 	else
   8974 		wm_linkintr_tbi(sc, icr);
   8975 }
   8976 
   8977 /*
   8978  * wm_intr_legacy:
   8979  *
   8980  *	Interrupt service routine for INTx and MSI.
   8981  */
   8982 static int
   8983 wm_intr_legacy(void *arg)
   8984 {
   8985 	struct wm_softc *sc = arg;
   8986 	struct wm_queue *wmq = &sc->sc_queue[0];
   8987 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8988 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8989 	uint32_t icr, rndval = 0;
   8990 	int handled = 0;
   8991 
   8992 	while (1 /* CONSTCOND */) {
   8993 		icr = CSR_READ(sc, WMREG_ICR);
   8994 		if ((icr & sc->sc_icr) == 0)
   8995 			break;
   8996 		if (handled == 0) {
   8997 			DPRINTF(WM_DEBUG_TX,
   8998 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8999 		}
   9000 		if (rndval == 0)
   9001 			rndval = icr;
   9002 
   9003 		mutex_enter(rxq->rxq_lock);
   9004 
   9005 		if (rxq->rxq_stopping) {
   9006 			mutex_exit(rxq->rxq_lock);
   9007 			break;
   9008 		}
   9009 
   9010 		handled = 1;
   9011 
   9012 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9013 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9014 			DPRINTF(WM_DEBUG_RX,
   9015 			    ("%s: RX: got Rx intr 0x%08x\n",
   9016 				device_xname(sc->sc_dev),
   9017 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9018 			WM_Q_EVCNT_INCR(rxq, intr);
   9019 		}
   9020 #endif
   9021 		/*
   9022 		 * wm_rxeof() does *not* call upper layer functions directly,
   9023 		 * as if_percpuq_enqueue() just call softint_schedule().
   9024 		 * So, we can call wm_rxeof() in interrupt context.
   9025 		 */
   9026 		wm_rxeof(rxq, UINT_MAX);
   9027 
   9028 		mutex_exit(rxq->rxq_lock);
   9029 		mutex_enter(txq->txq_lock);
   9030 
   9031 		if (txq->txq_stopping) {
   9032 			mutex_exit(txq->txq_lock);
   9033 			break;
   9034 		}
   9035 
   9036 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9037 		if (icr & ICR_TXDW) {
   9038 			DPRINTF(WM_DEBUG_TX,
   9039 			    ("%s: TX: got TXDW interrupt\n",
   9040 				device_xname(sc->sc_dev)));
   9041 			WM_Q_EVCNT_INCR(txq, txdw);
   9042 		}
   9043 #endif
   9044 		wm_txeof(txq, UINT_MAX);
   9045 
   9046 		mutex_exit(txq->txq_lock);
   9047 		WM_CORE_LOCK(sc);
   9048 
   9049 		if (sc->sc_core_stopping) {
   9050 			WM_CORE_UNLOCK(sc);
   9051 			break;
   9052 		}
   9053 
   9054 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9055 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9056 			wm_linkintr(sc, icr);
   9057 		}
   9058 
   9059 		WM_CORE_UNLOCK(sc);
   9060 
   9061 		if (icr & ICR_RXO) {
   9062 #if defined(WM_DEBUG)
   9063 			log(LOG_WARNING, "%s: Receive overrun\n",
   9064 			    device_xname(sc->sc_dev));
   9065 #endif /* defined(WM_DEBUG) */
   9066 		}
   9067 	}
   9068 
   9069 	rnd_add_uint32(&sc->rnd_source, rndval);
   9070 
   9071 	if (handled) {
   9072 		/* Try to get more packets going. */
   9073 		softint_schedule(wmq->wmq_si);
   9074 	}
   9075 
   9076 	return handled;
   9077 }
   9078 
   9079 static inline void
   9080 wm_txrxintr_disable(struct wm_queue *wmq)
   9081 {
   9082 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9083 
   9084 	if (sc->sc_type == WM_T_82574)
   9085 		CSR_WRITE(sc, WMREG_IMC,
   9086 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9087 	else if (sc->sc_type == WM_T_82575)
   9088 		CSR_WRITE(sc, WMREG_EIMC,
   9089 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9090 	else
   9091 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9092 }
   9093 
   9094 static inline void
   9095 wm_txrxintr_enable(struct wm_queue *wmq)
   9096 {
   9097 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9098 
   9099 	wm_itrs_calculate(sc, wmq);
   9100 
   9101 	/*
   9102 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9103 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9104 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9105 	 * while each wm_handle_queue(wmq) is runnig.
   9106 	 */
   9107 	if (sc->sc_type == WM_T_82574)
   9108 		CSR_WRITE(sc, WMREG_IMS,
   9109 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9110 	else if (sc->sc_type == WM_T_82575)
   9111 		CSR_WRITE(sc, WMREG_EIMS,
   9112 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9113 	else
   9114 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9115 }
   9116 
   9117 static int
   9118 wm_txrxintr_msix(void *arg)
   9119 {
   9120 	struct wm_queue *wmq = arg;
   9121 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9122 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9123 	struct wm_softc *sc = txq->txq_sc;
   9124 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9125 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9126 	bool txmore;
   9127 	bool rxmore;
   9128 
   9129 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9130 
   9131 	DPRINTF(WM_DEBUG_TX,
   9132 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9133 
   9134 	wm_txrxintr_disable(wmq);
   9135 
   9136 	mutex_enter(txq->txq_lock);
   9137 
   9138 	if (txq->txq_stopping) {
   9139 		mutex_exit(txq->txq_lock);
   9140 		return 0;
   9141 	}
   9142 
   9143 	WM_Q_EVCNT_INCR(txq, txdw);
   9144 	txmore = wm_txeof(txq, txlimit);
   9145 	/* wm_deferred start() is done in wm_handle_queue(). */
   9146 	mutex_exit(txq->txq_lock);
   9147 
   9148 	DPRINTF(WM_DEBUG_RX,
   9149 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9150 	mutex_enter(rxq->rxq_lock);
   9151 
   9152 	if (rxq->rxq_stopping) {
   9153 		mutex_exit(rxq->rxq_lock);
   9154 		return 0;
   9155 	}
   9156 
   9157 	WM_Q_EVCNT_INCR(rxq, intr);
   9158 	rxmore = wm_rxeof(rxq, rxlimit);
   9159 	mutex_exit(rxq->rxq_lock);
   9160 
   9161 	wm_itrs_writereg(sc, wmq);
   9162 
   9163 	if (txmore || rxmore)
   9164 		softint_schedule(wmq->wmq_si);
   9165 	else
   9166 		wm_txrxintr_enable(wmq);
   9167 
   9168 	return 1;
   9169 }
   9170 
   9171 static void
   9172 wm_handle_queue(void *arg)
   9173 {
   9174 	struct wm_queue *wmq = arg;
   9175 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9176 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9177 	struct wm_softc *sc = txq->txq_sc;
   9178 	u_int txlimit = sc->sc_tx_process_limit;
   9179 	u_int rxlimit = sc->sc_rx_process_limit;
   9180 	bool txmore;
   9181 	bool rxmore;
   9182 
   9183 	mutex_enter(txq->txq_lock);
   9184 	if (txq->txq_stopping) {
   9185 		mutex_exit(txq->txq_lock);
   9186 		return;
   9187 	}
   9188 	txmore = wm_txeof(txq, txlimit);
   9189 	wm_deferred_start_locked(txq);
   9190 	mutex_exit(txq->txq_lock);
   9191 
   9192 	mutex_enter(rxq->rxq_lock);
   9193 	if (rxq->rxq_stopping) {
   9194 		mutex_exit(rxq->rxq_lock);
   9195 		return;
   9196 	}
   9197 	WM_Q_EVCNT_INCR(rxq, defer);
   9198 	rxmore = wm_rxeof(rxq, rxlimit);
   9199 	mutex_exit(rxq->rxq_lock);
   9200 
   9201 	if (txmore || rxmore)
   9202 		softint_schedule(wmq->wmq_si);
   9203 	else
   9204 		wm_txrxintr_enable(wmq);
   9205 }
   9206 
   9207 /*
   9208  * wm_linkintr_msix:
   9209  *
   9210  *	Interrupt service routine for link status change for MSI-X.
   9211  */
   9212 static int
   9213 wm_linkintr_msix(void *arg)
   9214 {
   9215 	struct wm_softc *sc = arg;
   9216 	uint32_t reg;
   9217 	bool has_rxo;
   9218 
   9219 	DPRINTF(WM_DEBUG_LINK,
   9220 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9221 
   9222 	reg = CSR_READ(sc, WMREG_ICR);
   9223 	WM_CORE_LOCK(sc);
   9224 	if (sc->sc_core_stopping)
   9225 		goto out;
   9226 
   9227 	if ((reg & ICR_LSC) != 0) {
   9228 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9229 		wm_linkintr(sc, ICR_LSC);
   9230 	}
   9231 
   9232 	/*
   9233 	 * XXX 82574 MSI-X mode workaround
   9234 	 *
   9235 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9236 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9237 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9238 	 * interrupts by writing WMREG_ICS to process receive packets.
   9239 	 */
   9240 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9241 #if defined(WM_DEBUG)
   9242 		log(LOG_WARNING, "%s: Receive overrun\n",
   9243 		    device_xname(sc->sc_dev));
   9244 #endif /* defined(WM_DEBUG) */
   9245 
   9246 		has_rxo = true;
   9247 		/*
   9248 		 * The RXO interrupt is very high rate when receive traffic is
   9249 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9250 		 * interrupts. ICR_OTHER will be enabled at the end of
   9251 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9252 		 * ICR_RXQ(1) interrupts.
   9253 		 */
   9254 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9255 
   9256 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9257 	}
   9258 
   9259 
   9260 
   9261 out:
   9262 	WM_CORE_UNLOCK(sc);
   9263 
   9264 	if (sc->sc_type == WM_T_82574) {
   9265 		if (!has_rxo)
   9266 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9267 		else
   9268 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9269 	} else if (sc->sc_type == WM_T_82575)
   9270 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9271 	else
   9272 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9273 
   9274 	return 1;
   9275 }
   9276 
   9277 /*
   9278  * Media related.
   9279  * GMII, SGMII, TBI (and SERDES)
   9280  */
   9281 
   9282 /* Common */
   9283 
   9284 /*
   9285  * wm_tbi_serdes_set_linkled:
   9286  *
   9287  *	Update the link LED on TBI and SERDES devices.
   9288  */
   9289 static void
   9290 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9291 {
   9292 
   9293 	if (sc->sc_tbi_linkup)
   9294 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9295 	else
   9296 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9297 
   9298 	/* 82540 or newer devices are active low */
   9299 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9300 
   9301 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9302 }
   9303 
   9304 /* GMII related */
   9305 
   9306 /*
   9307  * wm_gmii_reset:
   9308  *
   9309  *	Reset the PHY.
   9310  */
   9311 static void
   9312 wm_gmii_reset(struct wm_softc *sc)
   9313 {
   9314 	uint32_t reg;
   9315 	int rv;
   9316 
   9317 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9318 		device_xname(sc->sc_dev), __func__));
   9319 
   9320 	rv = sc->phy.acquire(sc);
   9321 	if (rv != 0) {
   9322 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9323 		    __func__);
   9324 		return;
   9325 	}
   9326 
   9327 	switch (sc->sc_type) {
   9328 	case WM_T_82542_2_0:
   9329 	case WM_T_82542_2_1:
   9330 		/* null */
   9331 		break;
   9332 	case WM_T_82543:
   9333 		/*
   9334 		 * With 82543, we need to force speed and duplex on the MAC
   9335 		 * equal to what the PHY speed and duplex configuration is.
   9336 		 * In addition, we need to perform a hardware reset on the PHY
   9337 		 * to take it out of reset.
   9338 		 */
   9339 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9340 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9341 
   9342 		/* The PHY reset pin is active-low. */
   9343 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9344 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9345 		    CTRL_EXT_SWDPIN(4));
   9346 		reg |= CTRL_EXT_SWDPIO(4);
   9347 
   9348 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9349 		CSR_WRITE_FLUSH(sc);
   9350 		delay(10*1000);
   9351 
   9352 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9353 		CSR_WRITE_FLUSH(sc);
   9354 		delay(150);
   9355 #if 0
   9356 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9357 #endif
   9358 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9359 		break;
   9360 	case WM_T_82544:	/* reset 10000us */
   9361 	case WM_T_82540:
   9362 	case WM_T_82545:
   9363 	case WM_T_82545_3:
   9364 	case WM_T_82546:
   9365 	case WM_T_82546_3:
   9366 	case WM_T_82541:
   9367 	case WM_T_82541_2:
   9368 	case WM_T_82547:
   9369 	case WM_T_82547_2:
   9370 	case WM_T_82571:	/* reset 100us */
   9371 	case WM_T_82572:
   9372 	case WM_T_82573:
   9373 	case WM_T_82574:
   9374 	case WM_T_82575:
   9375 	case WM_T_82576:
   9376 	case WM_T_82580:
   9377 	case WM_T_I350:
   9378 	case WM_T_I354:
   9379 	case WM_T_I210:
   9380 	case WM_T_I211:
   9381 	case WM_T_82583:
   9382 	case WM_T_80003:
   9383 		/* generic reset */
   9384 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9385 		CSR_WRITE_FLUSH(sc);
   9386 		delay(20000);
   9387 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9388 		CSR_WRITE_FLUSH(sc);
   9389 		delay(20000);
   9390 
   9391 		if ((sc->sc_type == WM_T_82541)
   9392 		    || (sc->sc_type == WM_T_82541_2)
   9393 		    || (sc->sc_type == WM_T_82547)
   9394 		    || (sc->sc_type == WM_T_82547_2)) {
   9395 			/* workaround for igp are done in igp_reset() */
   9396 			/* XXX add code to set LED after phy reset */
   9397 		}
   9398 		break;
   9399 	case WM_T_ICH8:
   9400 	case WM_T_ICH9:
   9401 	case WM_T_ICH10:
   9402 	case WM_T_PCH:
   9403 	case WM_T_PCH2:
   9404 	case WM_T_PCH_LPT:
   9405 	case WM_T_PCH_SPT:
   9406 	case WM_T_PCH_CNP:
   9407 		/* generic reset */
   9408 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9409 		CSR_WRITE_FLUSH(sc);
   9410 		delay(100);
   9411 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9412 		CSR_WRITE_FLUSH(sc);
   9413 		delay(150);
   9414 		break;
   9415 	default:
   9416 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9417 		    __func__);
   9418 		break;
   9419 	}
   9420 
   9421 	sc->phy.release(sc);
   9422 
   9423 	/* get_cfg_done */
   9424 	wm_get_cfg_done(sc);
   9425 
   9426 	/* extra setup */
   9427 	switch (sc->sc_type) {
   9428 	case WM_T_82542_2_0:
   9429 	case WM_T_82542_2_1:
   9430 	case WM_T_82543:
   9431 	case WM_T_82544:
   9432 	case WM_T_82540:
   9433 	case WM_T_82545:
   9434 	case WM_T_82545_3:
   9435 	case WM_T_82546:
   9436 	case WM_T_82546_3:
   9437 	case WM_T_82541_2:
   9438 	case WM_T_82547_2:
   9439 	case WM_T_82571:
   9440 	case WM_T_82572:
   9441 	case WM_T_82573:
   9442 	case WM_T_82574:
   9443 	case WM_T_82583:
   9444 	case WM_T_82575:
   9445 	case WM_T_82576:
   9446 	case WM_T_82580:
   9447 	case WM_T_I350:
   9448 	case WM_T_I354:
   9449 	case WM_T_I210:
   9450 	case WM_T_I211:
   9451 	case WM_T_80003:
   9452 		/* null */
   9453 		break;
   9454 	case WM_T_82541:
   9455 	case WM_T_82547:
   9456 		/* XXX Configure actively LED after PHY reset */
   9457 		break;
   9458 	case WM_T_ICH8:
   9459 	case WM_T_ICH9:
   9460 	case WM_T_ICH10:
   9461 	case WM_T_PCH:
   9462 	case WM_T_PCH2:
   9463 	case WM_T_PCH_LPT:
   9464 	case WM_T_PCH_SPT:
   9465 	case WM_T_PCH_CNP:
   9466 		wm_phy_post_reset(sc);
   9467 		break;
   9468 	default:
   9469 		panic("%s: unknown type\n", __func__);
   9470 		break;
   9471 	}
   9472 }
   9473 
   9474 /*
   9475  * Setup sc_phytype and mii_{read|write}reg.
   9476  *
   9477  *  To identify PHY type, correct read/write function should be selected.
   9478  * To select correct read/write function, PCI ID or MAC type are required
   9479  * without accessing PHY registers.
   9480  *
   9481  *  On the first call of this function, PHY ID is not known yet. Check
   9482  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9483  * result might be incorrect.
   9484  *
   9485  *  In the second call, PHY OUI and model is used to identify PHY type.
   9486  * It might not be perfpect because of the lack of compared entry, but it
   9487  * would be better than the first call.
   9488  *
   9489  *  If the detected new result and previous assumption is different,
   9490  * diagnous message will be printed.
   9491  */
   9492 static void
   9493 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9494     uint16_t phy_model)
   9495 {
   9496 	device_t dev = sc->sc_dev;
   9497 	struct mii_data *mii = &sc->sc_mii;
   9498 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9499 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9500 	mii_readreg_t new_readreg;
   9501 	mii_writereg_t new_writereg;
   9502 
   9503 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9504 		device_xname(sc->sc_dev), __func__));
   9505 
   9506 	if (mii->mii_readreg == NULL) {
   9507 		/*
   9508 		 *  This is the first call of this function. For ICH and PCH
   9509 		 * variants, it's difficult to determine the PHY access method
   9510 		 * by sc_type, so use the PCI product ID for some devices.
   9511 		 */
   9512 
   9513 		switch (sc->sc_pcidevid) {
   9514 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9515 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9516 			/* 82577 */
   9517 			new_phytype = WMPHY_82577;
   9518 			break;
   9519 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9520 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9521 			/* 82578 */
   9522 			new_phytype = WMPHY_82578;
   9523 			break;
   9524 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9525 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9526 			/* 82579 */
   9527 			new_phytype = WMPHY_82579;
   9528 			break;
   9529 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9530 		case PCI_PRODUCT_INTEL_82801I_BM:
   9531 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9532 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9533 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9534 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9535 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9536 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9537 			/* ICH8, 9, 10 with 82567 */
   9538 			new_phytype = WMPHY_BM;
   9539 			break;
   9540 		default:
   9541 			break;
   9542 		}
   9543 	} else {
   9544 		/* It's not the first call. Use PHY OUI and model */
   9545 		switch (phy_oui) {
   9546 		case MII_OUI_ATHEROS: /* XXX ??? */
   9547 			switch (phy_model) {
   9548 			case 0x0004: /* XXX */
   9549 				new_phytype = WMPHY_82578;
   9550 				break;
   9551 			default:
   9552 				break;
   9553 			}
   9554 			break;
   9555 		case MII_OUI_xxMARVELL:
   9556 			switch (phy_model) {
   9557 			case MII_MODEL_xxMARVELL_I210:
   9558 				new_phytype = WMPHY_I210;
   9559 				break;
   9560 			case MII_MODEL_xxMARVELL_E1011:
   9561 			case MII_MODEL_xxMARVELL_E1000_3:
   9562 			case MII_MODEL_xxMARVELL_E1000_5:
   9563 			case MII_MODEL_xxMARVELL_E1112:
   9564 				new_phytype = WMPHY_M88;
   9565 				break;
   9566 			case MII_MODEL_xxMARVELL_E1149:
   9567 				new_phytype = WMPHY_BM;
   9568 				break;
   9569 			case MII_MODEL_xxMARVELL_E1111:
   9570 			case MII_MODEL_xxMARVELL_I347:
   9571 			case MII_MODEL_xxMARVELL_E1512:
   9572 			case MII_MODEL_xxMARVELL_E1340M:
   9573 			case MII_MODEL_xxMARVELL_E1543:
   9574 				new_phytype = WMPHY_M88;
   9575 				break;
   9576 			case MII_MODEL_xxMARVELL_I82563:
   9577 				new_phytype = WMPHY_GG82563;
   9578 				break;
   9579 			default:
   9580 				break;
   9581 			}
   9582 			break;
   9583 		case MII_OUI_INTEL:
   9584 			switch (phy_model) {
   9585 			case MII_MODEL_INTEL_I82577:
   9586 				new_phytype = WMPHY_82577;
   9587 				break;
   9588 			case MII_MODEL_INTEL_I82579:
   9589 				new_phytype = WMPHY_82579;
   9590 				break;
   9591 			case MII_MODEL_INTEL_I217:
   9592 				new_phytype = WMPHY_I217;
   9593 				break;
   9594 			case MII_MODEL_INTEL_I82580:
   9595 			case MII_MODEL_INTEL_I350:
   9596 				new_phytype = WMPHY_82580;
   9597 				break;
   9598 			default:
   9599 				break;
   9600 			}
   9601 			break;
   9602 		case MII_OUI_yyINTEL:
   9603 			switch (phy_model) {
   9604 			case MII_MODEL_yyINTEL_I82562G:
   9605 			case MII_MODEL_yyINTEL_I82562EM:
   9606 			case MII_MODEL_yyINTEL_I82562ET:
   9607 				new_phytype = WMPHY_IFE;
   9608 				break;
   9609 			case MII_MODEL_yyINTEL_IGP01E1000:
   9610 				new_phytype = WMPHY_IGP;
   9611 				break;
   9612 			case MII_MODEL_yyINTEL_I82566:
   9613 				new_phytype = WMPHY_IGP_3;
   9614 				break;
   9615 			default:
   9616 				break;
   9617 			}
   9618 			break;
   9619 		default:
   9620 			break;
   9621 		}
   9622 		if (new_phytype == WMPHY_UNKNOWN)
   9623 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9624 			    __func__);
   9625 
   9626 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9627 		    && (sc->sc_phytype != new_phytype )) {
   9628 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9629 			    "was incorrect. PHY type from PHY ID = %u\n",
   9630 			    sc->sc_phytype, new_phytype);
   9631 		}
   9632 	}
   9633 
   9634 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9635 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9636 		/* SGMII */
   9637 		new_readreg = wm_sgmii_readreg;
   9638 		new_writereg = wm_sgmii_writereg;
   9639 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9640 		/* BM2 (phyaddr == 1) */
   9641 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9642 		    && (new_phytype != WMPHY_BM)
   9643 		    && (new_phytype != WMPHY_UNKNOWN))
   9644 			doubt_phytype = new_phytype;
   9645 		new_phytype = WMPHY_BM;
   9646 		new_readreg = wm_gmii_bm_readreg;
   9647 		new_writereg = wm_gmii_bm_writereg;
   9648 	} else if (sc->sc_type >= WM_T_PCH) {
   9649 		/* All PCH* use _hv_ */
   9650 		new_readreg = wm_gmii_hv_readreg;
   9651 		new_writereg = wm_gmii_hv_writereg;
   9652 	} else if (sc->sc_type >= WM_T_ICH8) {
   9653 		/* non-82567 ICH8, 9 and 10 */
   9654 		new_readreg = wm_gmii_i82544_readreg;
   9655 		new_writereg = wm_gmii_i82544_writereg;
   9656 	} else if (sc->sc_type >= WM_T_80003) {
   9657 		/* 80003 */
   9658 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9659 		    && (new_phytype != WMPHY_GG82563)
   9660 		    && (new_phytype != WMPHY_UNKNOWN))
   9661 			doubt_phytype = new_phytype;
   9662 		new_phytype = WMPHY_GG82563;
   9663 		new_readreg = wm_gmii_i80003_readreg;
   9664 		new_writereg = wm_gmii_i80003_writereg;
   9665 	} else if (sc->sc_type >= WM_T_I210) {
   9666 		/* I210 and I211 */
   9667 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9668 		    && (new_phytype != WMPHY_I210)
   9669 		    && (new_phytype != WMPHY_UNKNOWN))
   9670 			doubt_phytype = new_phytype;
   9671 		new_phytype = WMPHY_I210;
   9672 		new_readreg = wm_gmii_gs40g_readreg;
   9673 		new_writereg = wm_gmii_gs40g_writereg;
   9674 	} else if (sc->sc_type >= WM_T_82580) {
   9675 		/* 82580, I350 and I354 */
   9676 		new_readreg = wm_gmii_82580_readreg;
   9677 		new_writereg = wm_gmii_82580_writereg;
   9678 	} else if (sc->sc_type >= WM_T_82544) {
   9679 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9680 		new_readreg = wm_gmii_i82544_readreg;
   9681 		new_writereg = wm_gmii_i82544_writereg;
   9682 	} else {
   9683 		new_readreg = wm_gmii_i82543_readreg;
   9684 		new_writereg = wm_gmii_i82543_writereg;
   9685 	}
   9686 
   9687 	if (new_phytype == WMPHY_BM) {
   9688 		/* All BM use _bm_ */
   9689 		new_readreg = wm_gmii_bm_readreg;
   9690 		new_writereg = wm_gmii_bm_writereg;
   9691 	}
   9692 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   9693 		/* All PCH* use _hv_ */
   9694 		new_readreg = wm_gmii_hv_readreg;
   9695 		new_writereg = wm_gmii_hv_writereg;
   9696 	}
   9697 
   9698 	/* Diag output */
   9699 	if (doubt_phytype != WMPHY_UNKNOWN)
   9700 		aprint_error_dev(dev, "Assumed new PHY type was "
   9701 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9702 		    new_phytype);
   9703 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9704 	    && (sc->sc_phytype != new_phytype ))
   9705 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9706 		    "was incorrect. New PHY type = %u\n",
   9707 		    sc->sc_phytype, new_phytype);
   9708 
   9709 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9710 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9711 
   9712 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9713 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9714 		    "function was incorrect.\n");
   9715 
   9716 	/* Update now */
   9717 	sc->sc_phytype = new_phytype;
   9718 	mii->mii_readreg = new_readreg;
   9719 	mii->mii_writereg = new_writereg;
   9720 	if (new_readreg == wm_gmii_hv_readreg) {
   9721 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   9722 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   9723 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   9724 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   9725 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   9726 	}
   9727 }
   9728 
   9729 /*
   9730  * wm_get_phy_id_82575:
   9731  *
   9732  * Return PHY ID. Return -1 if it failed.
   9733  */
   9734 static int
   9735 wm_get_phy_id_82575(struct wm_softc *sc)
   9736 {
   9737 	uint32_t reg;
   9738 	int phyid = -1;
   9739 
   9740 	/* XXX */
   9741 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9742 		return -1;
   9743 
   9744 	if (wm_sgmii_uses_mdio(sc)) {
   9745 		switch (sc->sc_type) {
   9746 		case WM_T_82575:
   9747 		case WM_T_82576:
   9748 			reg = CSR_READ(sc, WMREG_MDIC);
   9749 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9750 			break;
   9751 		case WM_T_82580:
   9752 		case WM_T_I350:
   9753 		case WM_T_I354:
   9754 		case WM_T_I210:
   9755 		case WM_T_I211:
   9756 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9757 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9758 			break;
   9759 		default:
   9760 			return -1;
   9761 		}
   9762 	}
   9763 
   9764 	return phyid;
   9765 }
   9766 
   9767 
   9768 /*
   9769  * wm_gmii_mediainit:
   9770  *
   9771  *	Initialize media for use on 1000BASE-T devices.
   9772  */
   9773 static void
   9774 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9775 {
   9776 	device_t dev = sc->sc_dev;
   9777 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9778 	struct mii_data *mii = &sc->sc_mii;
   9779 	uint32_t reg;
   9780 
   9781 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9782 		device_xname(sc->sc_dev), __func__));
   9783 
   9784 	/* We have GMII. */
   9785 	sc->sc_flags |= WM_F_HAS_MII;
   9786 
   9787 	if (sc->sc_type == WM_T_80003)
   9788 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9789 	else
   9790 		sc->sc_tipg = TIPG_1000T_DFLT;
   9791 
   9792 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9793 	if ((sc->sc_type == WM_T_82580)
   9794 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9795 	    || (sc->sc_type == WM_T_I211)) {
   9796 		reg = CSR_READ(sc, WMREG_PHPM);
   9797 		reg &= ~PHPM_GO_LINK_D;
   9798 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9799 	}
   9800 
   9801 	/*
   9802 	 * Let the chip set speed/duplex on its own based on
   9803 	 * signals from the PHY.
   9804 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9805 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9806 	 */
   9807 	sc->sc_ctrl |= CTRL_SLU;
   9808 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9809 
   9810 	/* Initialize our media structures and probe the GMII. */
   9811 	mii->mii_ifp = ifp;
   9812 
   9813 	mii->mii_statchg = wm_gmii_statchg;
   9814 
   9815 	/* get PHY control from SMBus to PCIe */
   9816 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9817 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   9818 	    || (sc->sc_type == WM_T_PCH_CNP))
   9819 		wm_smbustopci(sc);
   9820 
   9821 	wm_gmii_reset(sc);
   9822 
   9823 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9824 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9825 	    wm_gmii_mediastatus);
   9826 
   9827 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9828 	    || (sc->sc_type == WM_T_82580)
   9829 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9830 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9831 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9832 			/* Attach only one port */
   9833 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9834 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9835 		} else {
   9836 			int i, id;
   9837 			uint32_t ctrl_ext;
   9838 
   9839 			id = wm_get_phy_id_82575(sc);
   9840 			if (id != -1) {
   9841 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9842 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9843 			}
   9844 			if ((id == -1)
   9845 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9846 				/* Power on sgmii phy if it is disabled */
   9847 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9848 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9849 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9850 				CSR_WRITE_FLUSH(sc);
   9851 				delay(300*1000); /* XXX too long */
   9852 
   9853 				/* from 1 to 8 */
   9854 				for (i = 1; i < 8; i++)
   9855 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9856 					    0xffffffff, i, MII_OFFSET_ANY,
   9857 					    MIIF_DOPAUSE);
   9858 
   9859 				/* restore previous sfp cage power state */
   9860 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9861 			}
   9862 		}
   9863 	} else
   9864 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9865 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9866 
   9867 	/*
   9868 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9869 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9870 	 */
   9871 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   9872 		|| (sc->sc_type == WM_T_PCH_SPT)
   9873 		|| (sc->sc_type == WM_T_PCH_CNP))
   9874 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9875 		wm_set_mdio_slow_mode_hv(sc);
   9876 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9877 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9878 	}
   9879 
   9880 	/*
   9881 	 * (For ICH8 variants)
   9882 	 * If PHY detection failed, use BM's r/w function and retry.
   9883 	 */
   9884 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9885 		/* if failed, retry with *_bm_* */
   9886 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9887 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9888 		    sc->sc_phytype);
   9889 		sc->sc_phytype = WMPHY_BM;
   9890 		mii->mii_readreg = wm_gmii_bm_readreg;
   9891 		mii->mii_writereg = wm_gmii_bm_writereg;
   9892 
   9893 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9894 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9895 	}
   9896 
   9897 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9898 		/* Any PHY wasn't find */
   9899 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9900 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9901 		sc->sc_phytype = WMPHY_NONE;
   9902 	} else {
   9903 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9904 
   9905 		/*
   9906 		 * PHY Found! Check PHY type again by the second call of
   9907 		 * wm_gmii_setup_phytype.
   9908 		 */
   9909 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9910 		    child->mii_mpd_model);
   9911 
   9912 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9913 	}
   9914 }
   9915 
   9916 /*
   9917  * wm_gmii_mediachange:	[ifmedia interface function]
   9918  *
   9919  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9920  */
   9921 static int
   9922 wm_gmii_mediachange(struct ifnet *ifp)
   9923 {
   9924 	struct wm_softc *sc = ifp->if_softc;
   9925 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9926 	int rc;
   9927 
   9928 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9929 		device_xname(sc->sc_dev), __func__));
   9930 	if ((ifp->if_flags & IFF_UP) == 0)
   9931 		return 0;
   9932 
   9933 	/* Disable D0 LPLU. */
   9934 	wm_lplu_d0_disable(sc);
   9935 
   9936 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9937 	sc->sc_ctrl |= CTRL_SLU;
   9938 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9939 	    || (sc->sc_type > WM_T_82543)) {
   9940 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9941 	} else {
   9942 		sc->sc_ctrl &= ~CTRL_ASDE;
   9943 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9944 		if (ife->ifm_media & IFM_FDX)
   9945 			sc->sc_ctrl |= CTRL_FD;
   9946 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9947 		case IFM_10_T:
   9948 			sc->sc_ctrl |= CTRL_SPEED_10;
   9949 			break;
   9950 		case IFM_100_TX:
   9951 			sc->sc_ctrl |= CTRL_SPEED_100;
   9952 			break;
   9953 		case IFM_1000_T:
   9954 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9955 			break;
   9956 		default:
   9957 			panic("wm_gmii_mediachange: bad media 0x%x",
   9958 			    ife->ifm_media);
   9959 		}
   9960 	}
   9961 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9962 	CSR_WRITE_FLUSH(sc);
   9963 	if (sc->sc_type <= WM_T_82543)
   9964 		wm_gmii_reset(sc);
   9965 
   9966 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9967 		return 0;
   9968 	return rc;
   9969 }
   9970 
   9971 /*
   9972  * wm_gmii_mediastatus:	[ifmedia interface function]
   9973  *
   9974  *	Get the current interface media status on a 1000BASE-T device.
   9975  */
   9976 static void
   9977 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9978 {
   9979 	struct wm_softc *sc = ifp->if_softc;
   9980 
   9981 	ether_mediastatus(ifp, ifmr);
   9982 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9983 	    | sc->sc_flowflags;
   9984 }
   9985 
   9986 #define	MDI_IO		CTRL_SWDPIN(2)
   9987 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9988 #define	MDI_CLK		CTRL_SWDPIN(3)
   9989 
   9990 static void
   9991 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9992 {
   9993 	uint32_t i, v;
   9994 
   9995 	v = CSR_READ(sc, WMREG_CTRL);
   9996 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9997 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9998 
   9999 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   10000 		if (data & i)
   10001 			v |= MDI_IO;
   10002 		else
   10003 			v &= ~MDI_IO;
   10004 		CSR_WRITE(sc, WMREG_CTRL, v);
   10005 		CSR_WRITE_FLUSH(sc);
   10006 		delay(10);
   10007 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10008 		CSR_WRITE_FLUSH(sc);
   10009 		delay(10);
   10010 		CSR_WRITE(sc, WMREG_CTRL, v);
   10011 		CSR_WRITE_FLUSH(sc);
   10012 		delay(10);
   10013 	}
   10014 }
   10015 
   10016 static uint32_t
   10017 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10018 {
   10019 	uint32_t v, i, data = 0;
   10020 
   10021 	v = CSR_READ(sc, WMREG_CTRL);
   10022 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10023 	v |= CTRL_SWDPIO(3);
   10024 
   10025 	CSR_WRITE(sc, WMREG_CTRL, v);
   10026 	CSR_WRITE_FLUSH(sc);
   10027 	delay(10);
   10028 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10029 	CSR_WRITE_FLUSH(sc);
   10030 	delay(10);
   10031 	CSR_WRITE(sc, WMREG_CTRL, v);
   10032 	CSR_WRITE_FLUSH(sc);
   10033 	delay(10);
   10034 
   10035 	for (i = 0; i < 16; i++) {
   10036 		data <<= 1;
   10037 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10038 		CSR_WRITE_FLUSH(sc);
   10039 		delay(10);
   10040 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10041 			data |= 1;
   10042 		CSR_WRITE(sc, WMREG_CTRL, v);
   10043 		CSR_WRITE_FLUSH(sc);
   10044 		delay(10);
   10045 	}
   10046 
   10047 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10048 	CSR_WRITE_FLUSH(sc);
   10049 	delay(10);
   10050 	CSR_WRITE(sc, WMREG_CTRL, v);
   10051 	CSR_WRITE_FLUSH(sc);
   10052 	delay(10);
   10053 
   10054 	return data;
   10055 }
   10056 
   10057 #undef MDI_IO
   10058 #undef MDI_DIR
   10059 #undef MDI_CLK
   10060 
   10061 /*
   10062  * wm_gmii_i82543_readreg:	[mii interface function]
   10063  *
   10064  *	Read a PHY register on the GMII (i82543 version).
   10065  */
   10066 static int
   10067 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   10068 {
   10069 	struct wm_softc *sc = device_private(dev);
   10070 	int rv;
   10071 
   10072 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10073 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10074 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10075 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   10076 
   10077 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   10078 		device_xname(dev), phy, reg, rv));
   10079 
   10080 	return rv;
   10081 }
   10082 
   10083 /*
   10084  * wm_gmii_i82543_writereg:	[mii interface function]
   10085  *
   10086  *	Write a PHY register on the GMII (i82543 version).
   10087  */
   10088 static void
   10089 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   10090 {
   10091 	struct wm_softc *sc = device_private(dev);
   10092 
   10093 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10094 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10095 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10096 	    (MII_COMMAND_START << 30), 32);
   10097 }
   10098 
   10099 /*
   10100  * wm_gmii_mdic_readreg:	[mii interface function]
   10101  *
   10102  *	Read a PHY register on the GMII.
   10103  */
   10104 static int
   10105 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   10106 {
   10107 	struct wm_softc *sc = device_private(dev);
   10108 	uint32_t mdic = 0;
   10109 	int i, rv;
   10110 
   10111 	if (reg > MII_ADDRMASK) {
   10112 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10113 		    __func__, sc->sc_phytype, reg);
   10114 		reg &= MII_ADDRMASK;
   10115 	}
   10116 
   10117 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10118 	    MDIC_REGADD(reg));
   10119 
   10120 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10121 		delay(50);
   10122 		mdic = CSR_READ(sc, WMREG_MDIC);
   10123 		if (mdic & MDIC_READY)
   10124 			break;
   10125 	}
   10126 
   10127 	if ((mdic & MDIC_READY) == 0) {
   10128 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   10129 		    device_xname(dev), phy, reg);
   10130 		return 0;
   10131 	} else if (mdic & MDIC_E) {
   10132 #if 0 /* This is normal if no PHY is present. */
   10133 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   10134 		    device_xname(dev), phy, reg);
   10135 #endif
   10136 		return 0;
   10137 	} else {
   10138 		rv = MDIC_DATA(mdic);
   10139 		if (rv == 0xffff)
   10140 			rv = 0;
   10141 	}
   10142 
   10143 	/*
   10144 	 * Allow some time after each MDIC transaction to avoid
   10145 	 * reading duplicate data in the next MDIC transaction.
   10146 	 */
   10147 	if (sc->sc_type == WM_T_PCH2)
   10148 		delay(100);
   10149 
   10150 	return rv;
   10151 }
   10152 
   10153 /*
   10154  * wm_gmii_mdic_writereg:	[mii interface function]
   10155  *
   10156  *	Write a PHY register on the GMII.
   10157  */
   10158 static void
   10159 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   10160 {
   10161 	struct wm_softc *sc = device_private(dev);
   10162 	uint32_t mdic = 0;
   10163 	int i;
   10164 
   10165 	if (reg > MII_ADDRMASK) {
   10166 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10167 		    __func__, sc->sc_phytype, reg);
   10168 		reg &= MII_ADDRMASK;
   10169 	}
   10170 
   10171 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10172 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10173 
   10174 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10175 		delay(50);
   10176 		mdic = CSR_READ(sc, WMREG_MDIC);
   10177 		if (mdic & MDIC_READY)
   10178 			break;
   10179 	}
   10180 
   10181 	if ((mdic & MDIC_READY) == 0) {
   10182 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   10183 		    device_xname(dev), phy, reg);
   10184 		return;
   10185 	} else if (mdic & MDIC_E) {
   10186 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   10187 		    device_xname(dev), phy, reg);
   10188 		return;
   10189 	}
   10190 
   10191 	/*
   10192 	 * Allow some time after each MDIC transaction to avoid
   10193 	 * reading duplicate data in the next MDIC transaction.
   10194 	 */
   10195 	if (sc->sc_type == WM_T_PCH2)
   10196 		delay(100);
   10197 }
   10198 
   10199 /*
   10200  * wm_gmii_i82544_readreg:	[mii interface function]
   10201  *
   10202  *	Read a PHY register on the GMII.
   10203  */
   10204 static int
   10205 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   10206 {
   10207 	struct wm_softc *sc = device_private(dev);
   10208 	uint16_t val;
   10209 
   10210 	if (sc->phy.acquire(sc)) {
   10211 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10212 		return 0;
   10213 	}
   10214 
   10215 	wm_gmii_i82544_readreg_locked(dev, phy, reg, &val);
   10216 
   10217 	sc->phy.release(sc);
   10218 
   10219 	return val;
   10220 }
   10221 
   10222 static int
   10223 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10224 {
   10225 	struct wm_softc *sc = device_private(dev);
   10226 
   10227 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10228 		switch (sc->sc_phytype) {
   10229 		case WMPHY_IGP:
   10230 		case WMPHY_IGP_2:
   10231 		case WMPHY_IGP_3:
   10232 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10233 			    reg);
   10234 			break;
   10235 		default:
   10236 #ifdef WM_DEBUG
   10237 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10238 			    __func__, sc->sc_phytype, reg);
   10239 #endif
   10240 			break;
   10241 		}
   10242 	}
   10243 
   10244 	*val = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10245 
   10246 	return 0;
   10247 }
   10248 
   10249 /*
   10250  * wm_gmii_i82544_writereg:	[mii interface function]
   10251  *
   10252  *	Write a PHY register on the GMII.
   10253  */
   10254 static void
   10255 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   10256 {
   10257 	struct wm_softc *sc = device_private(dev);
   10258 
   10259 	if (sc->phy.acquire(sc)) {
   10260 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10261 		return;
   10262 	}
   10263 
   10264 	wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10265 	sc->phy.release(sc);
   10266 }
   10267 
   10268 static int
   10269 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10270 {
   10271 	struct wm_softc *sc = device_private(dev);
   10272 
   10273 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10274 		switch (sc->sc_phytype) {
   10275 		case WMPHY_IGP:
   10276 		case WMPHY_IGP_2:
   10277 		case WMPHY_IGP_3:
   10278 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10279 			    reg);
   10280 			break;
   10281 		default:
   10282 #ifdef WM_DEBUG
   10283 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10284 			    __func__, sc->sc_phytype, reg);
   10285 #endif
   10286 			break;
   10287 		}
   10288 	}
   10289 
   10290 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10291 
   10292 	return 0;
   10293 }
   10294 
   10295 /*
   10296  * wm_gmii_i80003_readreg:	[mii interface function]
   10297  *
   10298  *	Read a PHY register on the kumeran
   10299  * This could be handled by the PHY layer if we didn't have to lock the
   10300  * ressource ...
   10301  */
   10302 static int
   10303 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   10304 {
   10305 	struct wm_softc *sc = device_private(dev);
   10306 	int page_select, temp;
   10307 	int rv;
   10308 
   10309 	if (phy != 1) /* only one PHY on kumeran bus */
   10310 		return 0;
   10311 
   10312 	if (sc->phy.acquire(sc)) {
   10313 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10314 		return 0;
   10315 	}
   10316 
   10317 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10318 		page_select = GG82563_PHY_PAGE_SELECT;
   10319 	else {
   10320 		/*
   10321 		 * Use Alternative Page Select register to access registers
   10322 		 * 30 and 31.
   10323 		 */
   10324 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10325 	}
   10326 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10327 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10328 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10329 		/*
   10330 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10331 		 * register.
   10332 		 */
   10333 		delay(200);
   10334 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10335 			device_printf(dev, "%s failed\n", __func__);
   10336 			rv = 0; /* XXX */
   10337 			goto out;
   10338 		}
   10339 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10340 		delay(200);
   10341 	} else
   10342 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10343 
   10344 out:
   10345 	sc->phy.release(sc);
   10346 	return rv;
   10347 }
   10348 
   10349 /*
   10350  * wm_gmii_i80003_writereg:	[mii interface function]
   10351  *
   10352  *	Write a PHY register on the kumeran.
   10353  * This could be handled by the PHY layer if we didn't have to lock the
   10354  * ressource ...
   10355  */
   10356 static void
   10357 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10358 {
   10359 	struct wm_softc *sc = device_private(dev);
   10360 	int page_select, temp;
   10361 
   10362 	if (phy != 1) /* only one PHY on kumeran bus */
   10363 		return;
   10364 
   10365 	if (sc->phy.acquire(sc)) {
   10366 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10367 		return;
   10368 	}
   10369 
   10370 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10371 		page_select = GG82563_PHY_PAGE_SELECT;
   10372 	else {
   10373 		/*
   10374 		 * Use Alternative Page Select register to access registers
   10375 		 * 30 and 31.
   10376 		 */
   10377 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10378 	}
   10379 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10380 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10381 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10382 		/*
   10383 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10384 		 * register.
   10385 		 */
   10386 		delay(200);
   10387 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10388 			device_printf(dev, "%s failed\n", __func__);
   10389 			goto out;
   10390 		}
   10391 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10392 		delay(200);
   10393 	} else
   10394 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10395 
   10396 out:
   10397 	sc->phy.release(sc);
   10398 }
   10399 
   10400 /*
   10401  * wm_gmii_bm_readreg:	[mii interface function]
   10402  *
   10403  *	Read a PHY register on the kumeran
   10404  * This could be handled by the PHY layer if we didn't have to lock the
   10405  * ressource ...
   10406  */
   10407 static int
   10408 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10409 {
   10410 	struct wm_softc *sc = device_private(dev);
   10411 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10412 	uint16_t val;
   10413 	int rv;
   10414 
   10415 	if (sc->phy.acquire(sc)) {
   10416 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10417 		return 0;
   10418 	}
   10419 
   10420 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10421 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10422 		    || (reg == 31)) ? 1 : phy;
   10423 	/* Page 800 works differently than the rest so it has its own func */
   10424 	if (page == BM_WUC_PAGE) {
   10425 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10426 		rv = val;
   10427 		goto release;
   10428 	}
   10429 
   10430 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10431 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10432 		    && (sc->sc_type != WM_T_82583))
   10433 			wm_gmii_mdic_writereg(dev, phy,
   10434 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10435 		else
   10436 			wm_gmii_mdic_writereg(dev, phy,
   10437 			    BME1000_PHY_PAGE_SELECT, page);
   10438 	}
   10439 
   10440 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10441 
   10442 release:
   10443 	sc->phy.release(sc);
   10444 	return rv;
   10445 }
   10446 
   10447 /*
   10448  * wm_gmii_bm_writereg:	[mii interface function]
   10449  *
   10450  *	Write a PHY register on the kumeran.
   10451  * This could be handled by the PHY layer if we didn't have to lock the
   10452  * ressource ...
   10453  */
   10454 static void
   10455 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10456 {
   10457 	struct wm_softc *sc = device_private(dev);
   10458 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10459 
   10460 	if (sc->phy.acquire(sc)) {
   10461 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10462 		return;
   10463 	}
   10464 
   10465 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10466 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10467 		    || (reg == 31)) ? 1 : phy;
   10468 	/* Page 800 works differently than the rest so it has its own func */
   10469 	if (page == BM_WUC_PAGE) {
   10470 		uint16_t tmp;
   10471 
   10472 		tmp = val;
   10473 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10474 		goto release;
   10475 	}
   10476 
   10477 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10478 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10479 		    && (sc->sc_type != WM_T_82583))
   10480 			wm_gmii_mdic_writereg(dev, phy,
   10481 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10482 		else
   10483 			wm_gmii_mdic_writereg(dev, phy,
   10484 			    BME1000_PHY_PAGE_SELECT, page);
   10485 	}
   10486 
   10487 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10488 
   10489 release:
   10490 	sc->phy.release(sc);
   10491 }
   10492 
   10493 static void
   10494 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10495 {
   10496 	struct wm_softc *sc = device_private(dev);
   10497 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10498 	uint16_t wuce, reg;
   10499 
   10500 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10501 		device_xname(dev), __func__));
   10502 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10503 	if (sc->sc_type == WM_T_PCH) {
   10504 		/* XXX e1000 driver do nothing... why? */
   10505 	}
   10506 
   10507 	/*
   10508 	 * 1) Enable PHY wakeup register first.
   10509 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10510 	 */
   10511 
   10512 	/* Set page 769 */
   10513 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10514 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10515 
   10516 	/* Read WUCE and save it */
   10517 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10518 
   10519 	reg = wuce | BM_WUC_ENABLE_BIT;
   10520 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10521 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10522 
   10523 	/* Select page 800 */
   10524 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10525 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10526 
   10527 	/*
   10528 	 * 2) Access PHY wakeup register.
   10529 	 * See e1000_access_phy_wakeup_reg_bm.
   10530 	 */
   10531 
   10532 	/* Write page 800 */
   10533 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10534 
   10535 	if (rd)
   10536 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10537 	else
   10538 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10539 
   10540 	/*
   10541 	 * 3) Disable PHY wakeup register.
   10542 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10543 	 */
   10544 	/* Set page 769 */
   10545 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10546 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10547 
   10548 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10549 }
   10550 
   10551 /*
   10552  * wm_gmii_hv_readreg:	[mii interface function]
   10553  *
   10554  *	Read a PHY register on the kumeran
   10555  * This could be handled by the PHY layer if we didn't have to lock the
   10556  * ressource ...
   10557  */
   10558 static int
   10559 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10560 {
   10561 	struct wm_softc *sc = device_private(dev);
   10562 	uint16_t val;
   10563 
   10564 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10565 		device_xname(dev), __func__));
   10566 	if (sc->phy.acquire(sc)) {
   10567 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10568 		return 0;
   10569 	}
   10570 
   10571 	wm_gmii_hv_readreg_locked(dev, phy, reg, &val);
   10572 	sc->phy.release(sc);
   10573 	return val;
   10574 }
   10575 
   10576 static int
   10577 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10578 {
   10579 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10580 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10581 
   10582 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10583 
   10584 	/* Page 800 works differently than the rest so it has its own func */
   10585 	if (page == BM_WUC_PAGE) {
   10586 		wm_access_phy_wakeup_reg_bm(dev, reg, val, 1);
   10587 		return 0;
   10588 	}
   10589 
   10590 	/*
   10591 	 * Lower than page 768 works differently than the rest so it has its
   10592 	 * own func
   10593 	 */
   10594 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10595 		printf("gmii_hv_readreg!!!\n");
   10596 		return 0;
   10597 	}
   10598 
   10599 	/*
   10600 	 * XXX I21[789] documents say that the SMBus Address register is at
   10601 	 * PHY address 01, Page 0 (not 768), Register 26.
   10602 	 */
   10603 	if (page == HV_INTC_FC_PAGE_START)
   10604 		page = 0;
   10605 
   10606 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10607 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10608 		    page << BME1000_PAGE_SHIFT);
   10609 	}
   10610 
   10611 	*val = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10612 	return 0;
   10613 }
   10614 
   10615 /*
   10616  * wm_gmii_hv_writereg:	[mii interface function]
   10617  *
   10618  *	Write a PHY register on the kumeran.
   10619  * This could be handled by the PHY layer if we didn't have to lock the
   10620  * ressource ...
   10621  */
   10622 static void
   10623 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10624 {
   10625 	struct wm_softc *sc = device_private(dev);
   10626 
   10627 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10628 		device_xname(dev), __func__));
   10629 
   10630 	if (sc->phy.acquire(sc)) {
   10631 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10632 		return;
   10633 	}
   10634 
   10635 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10636 	sc->phy.release(sc);
   10637 }
   10638 
   10639 static int
   10640 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10641 {
   10642 	struct wm_softc *sc = device_private(dev);
   10643 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10644 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10645 
   10646 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10647 
   10648 	/* Page 800 works differently than the rest so it has its own func */
   10649 	if (page == BM_WUC_PAGE) {
   10650 		uint16_t tmp;
   10651 
   10652 		tmp = val;
   10653 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10654 		return 0;
   10655 	}
   10656 
   10657 	/*
   10658 	 * Lower than page 768 works differently than the rest so it has its
   10659 	 * own func
   10660 	 */
   10661 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10662 		printf("gmii_hv_writereg!!!\n");
   10663 		return -1;
   10664 	}
   10665 
   10666 	{
   10667 		/*
   10668 		 * XXX I21[789] documents say that the SMBus Address register
   10669 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10670 		 */
   10671 		if (page == HV_INTC_FC_PAGE_START)
   10672 			page = 0;
   10673 
   10674 		/*
   10675 		 * XXX Workaround MDIO accesses being disabled after entering
   10676 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10677 		 * register is set)
   10678 		 */
   10679 		if (sc->sc_phytype == WMPHY_82578) {
   10680 			struct mii_softc *child;
   10681 
   10682 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10683 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10684 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10685 			    && ((val & (1 << 11)) != 0)) {
   10686 				printf("XXX need workaround\n");
   10687 			}
   10688 		}
   10689 
   10690 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10691 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10692 			    page << BME1000_PAGE_SHIFT);
   10693 		}
   10694 	}
   10695 
   10696 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10697 
   10698 	return 0;
   10699 }
   10700 
   10701 /*
   10702  * wm_gmii_82580_readreg:	[mii interface function]
   10703  *
   10704  *	Read a PHY register on the 82580 and I350.
   10705  * This could be handled by the PHY layer if we didn't have to lock the
   10706  * ressource ...
   10707  */
   10708 static int
   10709 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10710 {
   10711 	struct wm_softc *sc = device_private(dev);
   10712 	int rv;
   10713 
   10714 	if (sc->phy.acquire(sc) != 0) {
   10715 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10716 		return 0;
   10717 	}
   10718 
   10719 #ifdef DIAGNOSTIC
   10720 	if (reg > MII_ADDRMASK) {
   10721 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10722 		    __func__, sc->sc_phytype, reg);
   10723 		reg &= MII_ADDRMASK;
   10724 	}
   10725 #endif
   10726 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10727 
   10728 	sc->phy.release(sc);
   10729 	return rv;
   10730 }
   10731 
   10732 /*
   10733  * wm_gmii_82580_writereg:	[mii interface function]
   10734  *
   10735  *	Write a PHY register on the 82580 and I350.
   10736  * This could be handled by the PHY layer if we didn't have to lock the
   10737  * ressource ...
   10738  */
   10739 static void
   10740 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10741 {
   10742 	struct wm_softc *sc = device_private(dev);
   10743 
   10744 	if (sc->phy.acquire(sc) != 0) {
   10745 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10746 		return;
   10747 	}
   10748 
   10749 #ifdef DIAGNOSTIC
   10750 	if (reg > MII_ADDRMASK) {
   10751 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10752 		    __func__, sc->sc_phytype, reg);
   10753 		reg &= MII_ADDRMASK;
   10754 	}
   10755 #endif
   10756 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10757 
   10758 	sc->phy.release(sc);
   10759 }
   10760 
   10761 /*
   10762  * wm_gmii_gs40g_readreg:	[mii interface function]
   10763  *
   10764  *	Read a PHY register on the I2100 and I211.
   10765  * This could be handled by the PHY layer if we didn't have to lock the
   10766  * ressource ...
   10767  */
   10768 static int
   10769 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10770 {
   10771 	struct wm_softc *sc = device_private(dev);
   10772 	int page, offset;
   10773 	int rv;
   10774 
   10775 	/* Acquire semaphore */
   10776 	if (sc->phy.acquire(sc)) {
   10777 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10778 		return 0;
   10779 	}
   10780 
   10781 	/* Page select */
   10782 	page = reg >> GS40G_PAGE_SHIFT;
   10783 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10784 
   10785 	/* Read reg */
   10786 	offset = reg & GS40G_OFFSET_MASK;
   10787 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10788 
   10789 	sc->phy.release(sc);
   10790 	return rv;
   10791 }
   10792 
   10793 /*
   10794  * wm_gmii_gs40g_writereg:	[mii interface function]
   10795  *
   10796  *	Write a PHY register on the I210 and I211.
   10797  * This could be handled by the PHY layer if we didn't have to lock the
   10798  * ressource ...
   10799  */
   10800 static void
   10801 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10802 {
   10803 	struct wm_softc *sc = device_private(dev);
   10804 	int page, offset;
   10805 
   10806 	/* Acquire semaphore */
   10807 	if (sc->phy.acquire(sc)) {
   10808 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10809 		return;
   10810 	}
   10811 
   10812 	/* Page select */
   10813 	page = reg >> GS40G_PAGE_SHIFT;
   10814 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10815 
   10816 	/* Write reg */
   10817 	offset = reg & GS40G_OFFSET_MASK;
   10818 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10819 
   10820 	/* Release semaphore */
   10821 	sc->phy.release(sc);
   10822 }
   10823 
   10824 /*
   10825  * wm_gmii_statchg:	[mii interface function]
   10826  *
   10827  *	Callback from MII layer when media changes.
   10828  */
   10829 static void
   10830 wm_gmii_statchg(struct ifnet *ifp)
   10831 {
   10832 	struct wm_softc *sc = ifp->if_softc;
   10833 	struct mii_data *mii = &sc->sc_mii;
   10834 
   10835 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10836 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10837 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10838 
   10839 	/*
   10840 	 * Get flow control negotiation result.
   10841 	 */
   10842 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10843 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10844 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10845 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10846 	}
   10847 
   10848 	if (sc->sc_flowflags & IFM_FLOW) {
   10849 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10850 			sc->sc_ctrl |= CTRL_TFCE;
   10851 			sc->sc_fcrtl |= FCRTL_XONE;
   10852 		}
   10853 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10854 			sc->sc_ctrl |= CTRL_RFCE;
   10855 	}
   10856 
   10857 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10858 		DPRINTF(WM_DEBUG_LINK,
   10859 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10860 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10861 	} else {
   10862 		DPRINTF(WM_DEBUG_LINK,
   10863 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10864 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10865 	}
   10866 
   10867 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10868 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10869 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10870 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10871 	if (sc->sc_type == WM_T_80003) {
   10872 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10873 		case IFM_1000_T:
   10874 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10875 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10876 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10877 			break;
   10878 		default:
   10879 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10880 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10881 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10882 			break;
   10883 		}
   10884 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10885 	}
   10886 }
   10887 
   10888 /* kumeran related (80003, ICH* and PCH*) */
   10889 
   10890 /*
   10891  * wm_kmrn_readreg:
   10892  *
   10893  *	Read a kumeran register
   10894  */
   10895 static int
   10896 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   10897 {
   10898 	int rv;
   10899 
   10900 	if (sc->sc_type == WM_T_80003)
   10901 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10902 	else
   10903 		rv = sc->phy.acquire(sc);
   10904 	if (rv != 0) {
   10905 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10906 		    __func__);
   10907 		return rv;
   10908 	}
   10909 
   10910 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   10911 
   10912 	if (sc->sc_type == WM_T_80003)
   10913 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10914 	else
   10915 		sc->phy.release(sc);
   10916 
   10917 	return rv;
   10918 }
   10919 
   10920 static int
   10921 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   10922 {
   10923 
   10924 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10925 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10926 	    KUMCTRLSTA_REN);
   10927 	CSR_WRITE_FLUSH(sc);
   10928 	delay(2);
   10929 
   10930 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10931 
   10932 	return 0;
   10933 }
   10934 
   10935 /*
   10936  * wm_kmrn_writereg:
   10937  *
   10938  *	Write a kumeran register
   10939  */
   10940 static int
   10941 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   10942 {
   10943 	int rv;
   10944 
   10945 	if (sc->sc_type == WM_T_80003)
   10946 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10947 	else
   10948 		rv = sc->phy.acquire(sc);
   10949 	if (rv != 0) {
   10950 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10951 		    __func__);
   10952 		return rv;
   10953 	}
   10954 
   10955 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   10956 
   10957 	if (sc->sc_type == WM_T_80003)
   10958 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10959 	else
   10960 		sc->phy.release(sc);
   10961 
   10962 	return rv;
   10963 }
   10964 
   10965 static int
   10966 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   10967 {
   10968 
   10969 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10970 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   10971 
   10972 	return 0;
   10973 }
   10974 
   10975 /* SGMII related */
   10976 
   10977 /*
   10978  * wm_sgmii_uses_mdio
   10979  *
   10980  * Check whether the transaction is to the internal PHY or the external
   10981  * MDIO interface. Return true if it's MDIO.
   10982  */
   10983 static bool
   10984 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10985 {
   10986 	uint32_t reg;
   10987 	bool ismdio = false;
   10988 
   10989 	switch (sc->sc_type) {
   10990 	case WM_T_82575:
   10991 	case WM_T_82576:
   10992 		reg = CSR_READ(sc, WMREG_MDIC);
   10993 		ismdio = ((reg & MDIC_DEST) != 0);
   10994 		break;
   10995 	case WM_T_82580:
   10996 	case WM_T_I350:
   10997 	case WM_T_I354:
   10998 	case WM_T_I210:
   10999 	case WM_T_I211:
   11000 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11001 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11002 		break;
   11003 	default:
   11004 		break;
   11005 	}
   11006 
   11007 	return ismdio;
   11008 }
   11009 
   11010 /*
   11011  * wm_sgmii_readreg:	[mii interface function]
   11012  *
   11013  *	Read a PHY register on the SGMII
   11014  * This could be handled by the PHY layer if we didn't have to lock the
   11015  * ressource ...
   11016  */
   11017 static int
   11018 wm_sgmii_readreg(device_t dev, int phy, int reg)
   11019 {
   11020 	struct wm_softc *sc = device_private(dev);
   11021 	uint32_t i2ccmd;
   11022 	int i, rv;
   11023 
   11024 	if (sc->phy.acquire(sc)) {
   11025 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11026 		return 0;
   11027 	}
   11028 
   11029 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11030 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11031 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11032 
   11033 	/* Poll the ready bit */
   11034 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11035 		delay(50);
   11036 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11037 		if (i2ccmd & I2CCMD_READY)
   11038 			break;
   11039 	}
   11040 	if ((i2ccmd & I2CCMD_READY) == 0)
   11041 		device_printf(dev, "I2CCMD Read did not complete\n");
   11042 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11043 		device_printf(dev, "I2CCMD Error bit set\n");
   11044 
   11045 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11046 
   11047 	sc->phy.release(sc);
   11048 	return rv;
   11049 }
   11050 
   11051 /*
   11052  * wm_sgmii_writereg:	[mii interface function]
   11053  *
   11054  *	Write a PHY register on the SGMII.
   11055  * This could be handled by the PHY layer if we didn't have to lock the
   11056  * ressource ...
   11057  */
   11058 static void
   11059 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   11060 {
   11061 	struct wm_softc *sc = device_private(dev);
   11062 	uint32_t i2ccmd;
   11063 	int i;
   11064 	int swapdata;
   11065 
   11066 	if (sc->phy.acquire(sc) != 0) {
   11067 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11068 		return;
   11069 	}
   11070 	/* Swap the data bytes for the I2C interface */
   11071 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11072 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11073 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11074 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11075 
   11076 	/* Poll the ready bit */
   11077 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11078 		delay(50);
   11079 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11080 		if (i2ccmd & I2CCMD_READY)
   11081 			break;
   11082 	}
   11083 	if ((i2ccmd & I2CCMD_READY) == 0)
   11084 		device_printf(dev, "I2CCMD Write did not complete\n");
   11085 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11086 		device_printf(dev, "I2CCMD Error bit set\n");
   11087 
   11088 	sc->phy.release(sc);
   11089 }
   11090 
   11091 /* TBI related */
   11092 
   11093 static bool
   11094 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11095 {
   11096 	bool sig;
   11097 
   11098 	sig = ctrl & CTRL_SWDPIN(1);
   11099 
   11100 	/*
   11101 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11102 	 * detect a signal, 1 if they don't.
   11103 	 */
   11104 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11105 		sig = !sig;
   11106 
   11107 	return sig;
   11108 }
   11109 
   11110 /*
   11111  * wm_tbi_mediainit:
   11112  *
   11113  *	Initialize media for use on 1000BASE-X devices.
   11114  */
   11115 static void
   11116 wm_tbi_mediainit(struct wm_softc *sc)
   11117 {
   11118 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11119 	const char *sep = "";
   11120 
   11121 	if (sc->sc_type < WM_T_82543)
   11122 		sc->sc_tipg = TIPG_WM_DFLT;
   11123 	else
   11124 		sc->sc_tipg = TIPG_LG_DFLT;
   11125 
   11126 	sc->sc_tbi_serdes_anegticks = 5;
   11127 
   11128 	/* Initialize our media structures */
   11129 	sc->sc_mii.mii_ifp = ifp;
   11130 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11131 
   11132 	if ((sc->sc_type >= WM_T_82575)
   11133 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11134 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11135 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11136 	else
   11137 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11138 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11139 
   11140 	/*
   11141 	 * SWD Pins:
   11142 	 *
   11143 	 *	0 = Link LED (output)
   11144 	 *	1 = Loss Of Signal (input)
   11145 	 */
   11146 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11147 
   11148 	/* XXX Perhaps this is only for TBI */
   11149 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11150 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11151 
   11152 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11153 		sc->sc_ctrl &= ~CTRL_LRST;
   11154 
   11155 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11156 
   11157 #define	ADD(ss, mm, dd)							\
   11158 do {									\
   11159 	aprint_normal("%s%s", sep, ss);					\
   11160 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11161 	sep = ", ";							\
   11162 } while (/*CONSTCOND*/0)
   11163 
   11164 	aprint_normal_dev(sc->sc_dev, "");
   11165 
   11166 	if (sc->sc_type == WM_T_I354) {
   11167 		uint32_t status;
   11168 
   11169 		status = CSR_READ(sc, WMREG_STATUS);
   11170 		if (((status & STATUS_2P5_SKU) != 0)
   11171 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11172 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11173 		} else
   11174 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11175 	} else if (sc->sc_type == WM_T_82545) {
   11176 		/* Only 82545 is LX (XXX except SFP) */
   11177 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11178 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11179 	} else {
   11180 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11181 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11182 	}
   11183 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11184 	aprint_normal("\n");
   11185 
   11186 #undef ADD
   11187 
   11188 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11189 }
   11190 
   11191 /*
   11192  * wm_tbi_mediachange:	[ifmedia interface function]
   11193  *
   11194  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11195  */
   11196 static int
   11197 wm_tbi_mediachange(struct ifnet *ifp)
   11198 {
   11199 	struct wm_softc *sc = ifp->if_softc;
   11200 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11201 	uint32_t status, ctrl;
   11202 	bool signal;
   11203 	int i;
   11204 
   11205 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11206 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11207 		/* XXX need some work for >= 82571 and < 82575 */
   11208 		if (sc->sc_type < WM_T_82575)
   11209 			return 0;
   11210 	}
   11211 
   11212 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11213 	    || (sc->sc_type >= WM_T_82575))
   11214 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11215 
   11216 	sc->sc_ctrl &= ~CTRL_LRST;
   11217 	sc->sc_txcw = TXCW_ANE;
   11218 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11219 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11220 	else if (ife->ifm_media & IFM_FDX)
   11221 		sc->sc_txcw |= TXCW_FD;
   11222 	else
   11223 		sc->sc_txcw |= TXCW_HD;
   11224 
   11225 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11226 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11227 
   11228 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11229 		device_xname(sc->sc_dev), sc->sc_txcw));
   11230 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11231 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11232 	CSR_WRITE_FLUSH(sc);
   11233 	delay(1000);
   11234 
   11235 	ctrl =  CSR_READ(sc, WMREG_CTRL);
   11236 	signal = wm_tbi_havesignal(sc, ctrl);
   11237 
   11238 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11239 		signal));
   11240 
   11241 	if (signal) {
   11242 		/* Have signal; wait for the link to come up. */
   11243 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11244 			delay(10000);
   11245 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11246 				break;
   11247 		}
   11248 
   11249 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11250 			device_xname(sc->sc_dev),i));
   11251 
   11252 		status = CSR_READ(sc, WMREG_STATUS);
   11253 		DPRINTF(WM_DEBUG_LINK,
   11254 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11255 			device_xname(sc->sc_dev),status, STATUS_LU));
   11256 		if (status & STATUS_LU) {
   11257 			/* Link is up. */
   11258 			DPRINTF(WM_DEBUG_LINK,
   11259 			    ("%s: LINK: set media -> link up %s\n",
   11260 				device_xname(sc->sc_dev),
   11261 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11262 
   11263 			/*
   11264 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11265 			 * so we should update sc->sc_ctrl
   11266 			 */
   11267 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11268 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11269 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11270 			if (status & STATUS_FD)
   11271 				sc->sc_tctl |=
   11272 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11273 			else
   11274 				sc->sc_tctl |=
   11275 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11276 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11277 				sc->sc_fcrtl |= FCRTL_XONE;
   11278 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11279 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11280 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11281 			sc->sc_tbi_linkup = 1;
   11282 		} else {
   11283 			if (i == WM_LINKUP_TIMEOUT)
   11284 				wm_check_for_link(sc);
   11285 			/* Link is down. */
   11286 			DPRINTF(WM_DEBUG_LINK,
   11287 			    ("%s: LINK: set media -> link down\n",
   11288 				device_xname(sc->sc_dev)));
   11289 			sc->sc_tbi_linkup = 0;
   11290 		}
   11291 	} else {
   11292 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11293 			device_xname(sc->sc_dev)));
   11294 		sc->sc_tbi_linkup = 0;
   11295 	}
   11296 
   11297 	wm_tbi_serdes_set_linkled(sc);
   11298 
   11299 	return 0;
   11300 }
   11301 
   11302 /*
   11303  * wm_tbi_mediastatus:	[ifmedia interface function]
   11304  *
   11305  *	Get the current interface media status on a 1000BASE-X device.
   11306  */
   11307 static void
   11308 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11309 {
   11310 	struct wm_softc *sc = ifp->if_softc;
   11311 	uint32_t ctrl, status;
   11312 
   11313 	ifmr->ifm_status = IFM_AVALID;
   11314 	ifmr->ifm_active = IFM_ETHER;
   11315 
   11316 	status = CSR_READ(sc, WMREG_STATUS);
   11317 	if ((status & STATUS_LU) == 0) {
   11318 		ifmr->ifm_active |= IFM_NONE;
   11319 		return;
   11320 	}
   11321 
   11322 	ifmr->ifm_status |= IFM_ACTIVE;
   11323 	/* Only 82545 is LX */
   11324 	if (sc->sc_type == WM_T_82545)
   11325 		ifmr->ifm_active |= IFM_1000_LX;
   11326 	else
   11327 		ifmr->ifm_active |= IFM_1000_SX;
   11328 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11329 		ifmr->ifm_active |= IFM_FDX;
   11330 	else
   11331 		ifmr->ifm_active |= IFM_HDX;
   11332 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11333 	if (ctrl & CTRL_RFCE)
   11334 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11335 	if (ctrl & CTRL_TFCE)
   11336 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11337 }
   11338 
   11339 /* XXX TBI only */
   11340 static int
   11341 wm_check_for_link(struct wm_softc *sc)
   11342 {
   11343 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11344 	uint32_t rxcw;
   11345 	uint32_t ctrl;
   11346 	uint32_t status;
   11347 	bool signal;
   11348 
   11349 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11350 		device_xname(sc->sc_dev), __func__));
   11351 
   11352 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11353 		/* XXX need some work for >= 82571 */
   11354 		if (sc->sc_type >= WM_T_82571) {
   11355 			sc->sc_tbi_linkup = 1;
   11356 			return 0;
   11357 		}
   11358 	}
   11359 
   11360 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11361 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11362 	status = CSR_READ(sc, WMREG_STATUS);
   11363 	signal = wm_tbi_havesignal(sc, ctrl);
   11364 
   11365 	DPRINTF(WM_DEBUG_LINK,
   11366 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11367 		device_xname(sc->sc_dev), __func__, signal,
   11368 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11369 
   11370 	/*
   11371 	 * SWDPIN   LU RXCW
   11372 	 *	0    0	  0
   11373 	 *	0    0	  1	(should not happen)
   11374 	 *	0    1	  0	(should not happen)
   11375 	 *	0    1	  1	(should not happen)
   11376 	 *	1    0	  0	Disable autonego and force linkup
   11377 	 *	1    0	  1	got /C/ but not linkup yet
   11378 	 *	1    1	  0	(linkup)
   11379 	 *	1    1	  1	If IFM_AUTO, back to autonego
   11380 	 *
   11381 	 */
   11382 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   11383 		DPRINTF(WM_DEBUG_LINK,
   11384 		    ("%s: %s: force linkup and fullduplex\n",
   11385 			device_xname(sc->sc_dev), __func__));
   11386 		sc->sc_tbi_linkup = 0;
   11387 		/* Disable auto-negotiation in the TXCW register */
   11388 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11389 
   11390 		/*
   11391 		 * Force link-up and also force full-duplex.
   11392 		 *
   11393 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11394 		 * so we should update sc->sc_ctrl
   11395 		 */
   11396 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11397 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11398 	} else if (((status & STATUS_LU) != 0)
   11399 	    && ((rxcw & RXCW_C) != 0)
   11400 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11401 		sc->sc_tbi_linkup = 1;
   11402 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   11403 			device_xname(sc->sc_dev),
   11404 			__func__));
   11405 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11406 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11407 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   11408 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   11409 			device_xname(sc->sc_dev), __func__));
   11410 	} else {
   11411 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   11412 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   11413 			status));
   11414 	}
   11415 
   11416 	return 0;
   11417 }
   11418 
   11419 /*
   11420  * wm_tbi_tick:
   11421  *
   11422  *	Check the link on TBI devices.
   11423  *	This function acts as mii_tick().
   11424  */
   11425 static void
   11426 wm_tbi_tick(struct wm_softc *sc)
   11427 {
   11428 	struct mii_data *mii = &sc->sc_mii;
   11429 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11430 	uint32_t status;
   11431 
   11432 	KASSERT(WM_CORE_LOCKED(sc));
   11433 
   11434 	status = CSR_READ(sc, WMREG_STATUS);
   11435 
   11436 	/* XXX is this needed? */
   11437 	(void)CSR_READ(sc, WMREG_RXCW);
   11438 	(void)CSR_READ(sc, WMREG_CTRL);
   11439 
   11440 	/* set link status */
   11441 	if ((status & STATUS_LU) == 0) {
   11442 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   11443 			device_xname(sc->sc_dev)));
   11444 		sc->sc_tbi_linkup = 0;
   11445 	} else if (sc->sc_tbi_linkup == 0) {
   11446 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   11447 			device_xname(sc->sc_dev),
   11448 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11449 		sc->sc_tbi_linkup = 1;
   11450 		sc->sc_tbi_serdes_ticks = 0;
   11451 	}
   11452 
   11453 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11454 		goto setled;
   11455 
   11456 	if ((status & STATUS_LU) == 0) {
   11457 		sc->sc_tbi_linkup = 0;
   11458 		/* If the timer expired, retry autonegotiation */
   11459 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11460 		    && (++sc->sc_tbi_serdes_ticks
   11461 			>= sc->sc_tbi_serdes_anegticks)) {
   11462 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11463 			sc->sc_tbi_serdes_ticks = 0;
   11464 			/*
   11465 			 * Reset the link, and let autonegotiation do
   11466 			 * its thing
   11467 			 */
   11468 			sc->sc_ctrl |= CTRL_LRST;
   11469 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11470 			CSR_WRITE_FLUSH(sc);
   11471 			delay(1000);
   11472 			sc->sc_ctrl &= ~CTRL_LRST;
   11473 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11474 			CSR_WRITE_FLUSH(sc);
   11475 			delay(1000);
   11476 			CSR_WRITE(sc, WMREG_TXCW,
   11477 			    sc->sc_txcw & ~TXCW_ANE);
   11478 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11479 		}
   11480 	}
   11481 
   11482 setled:
   11483 	wm_tbi_serdes_set_linkled(sc);
   11484 }
   11485 
   11486 /* SERDES related */
   11487 static void
   11488 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11489 {
   11490 	uint32_t reg;
   11491 
   11492 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11493 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11494 		return;
   11495 
   11496 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11497 	reg |= PCS_CFG_PCS_EN;
   11498 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11499 
   11500 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11501 	reg &= ~CTRL_EXT_SWDPIN(3);
   11502 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11503 	CSR_WRITE_FLUSH(sc);
   11504 }
   11505 
   11506 static int
   11507 wm_serdes_mediachange(struct ifnet *ifp)
   11508 {
   11509 	struct wm_softc *sc = ifp->if_softc;
   11510 	bool pcs_autoneg = true; /* XXX */
   11511 	uint32_t ctrl_ext, pcs_lctl, reg;
   11512 
   11513 	/* XXX Currently, this function is not called on 8257[12] */
   11514 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11515 	    || (sc->sc_type >= WM_T_82575))
   11516 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11517 
   11518 	wm_serdes_power_up_link_82575(sc);
   11519 
   11520 	sc->sc_ctrl |= CTRL_SLU;
   11521 
   11522 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11523 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11524 
   11525 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11526 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11527 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11528 	case CTRL_EXT_LINK_MODE_SGMII:
   11529 		pcs_autoneg = true;
   11530 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11531 		break;
   11532 	case CTRL_EXT_LINK_MODE_1000KX:
   11533 		pcs_autoneg = false;
   11534 		/* FALLTHROUGH */
   11535 	default:
   11536 		if ((sc->sc_type == WM_T_82575)
   11537 		    || (sc->sc_type == WM_T_82576)) {
   11538 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11539 				pcs_autoneg = false;
   11540 		}
   11541 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11542 		    | CTRL_FRCFDX;
   11543 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11544 	}
   11545 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11546 
   11547 	if (pcs_autoneg) {
   11548 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11549 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11550 
   11551 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11552 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11553 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11554 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11555 	} else
   11556 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11557 
   11558 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11559 
   11560 
   11561 	return 0;
   11562 }
   11563 
   11564 static void
   11565 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11566 {
   11567 	struct wm_softc *sc = ifp->if_softc;
   11568 	struct mii_data *mii = &sc->sc_mii;
   11569 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11570 	uint32_t pcs_adv, pcs_lpab, reg;
   11571 
   11572 	ifmr->ifm_status = IFM_AVALID;
   11573 	ifmr->ifm_active = IFM_ETHER;
   11574 
   11575 	/* Check PCS */
   11576 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11577 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11578 		ifmr->ifm_active |= IFM_NONE;
   11579 		sc->sc_tbi_linkup = 0;
   11580 		goto setled;
   11581 	}
   11582 
   11583 	sc->sc_tbi_linkup = 1;
   11584 	ifmr->ifm_status |= IFM_ACTIVE;
   11585 	if (sc->sc_type == WM_T_I354) {
   11586 		uint32_t status;
   11587 
   11588 		status = CSR_READ(sc, WMREG_STATUS);
   11589 		if (((status & STATUS_2P5_SKU) != 0)
   11590 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11591 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11592 		} else
   11593 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11594 	} else {
   11595 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11596 		case PCS_LSTS_SPEED_10:
   11597 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11598 			break;
   11599 		case PCS_LSTS_SPEED_100:
   11600 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11601 			break;
   11602 		case PCS_LSTS_SPEED_1000:
   11603 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11604 			break;
   11605 		default:
   11606 			device_printf(sc->sc_dev, "Unknown speed\n");
   11607 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11608 			break;
   11609 		}
   11610 	}
   11611 	if ((reg & PCS_LSTS_FDX) != 0)
   11612 		ifmr->ifm_active |= IFM_FDX;
   11613 	else
   11614 		ifmr->ifm_active |= IFM_HDX;
   11615 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11616 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11617 		/* Check flow */
   11618 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11619 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11620 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11621 			goto setled;
   11622 		}
   11623 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11624 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11625 		DPRINTF(WM_DEBUG_LINK,
   11626 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11627 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11628 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11629 			mii->mii_media_active |= IFM_FLOW
   11630 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11631 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11632 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11633 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11634 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11635 			mii->mii_media_active |= IFM_FLOW
   11636 			    | IFM_ETH_TXPAUSE;
   11637 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11638 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11639 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11640 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11641 			mii->mii_media_active |= IFM_FLOW
   11642 			    | IFM_ETH_RXPAUSE;
   11643 		}
   11644 	}
   11645 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11646 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11647 setled:
   11648 	wm_tbi_serdes_set_linkled(sc);
   11649 }
   11650 
   11651 /*
   11652  * wm_serdes_tick:
   11653  *
   11654  *	Check the link on serdes devices.
   11655  */
   11656 static void
   11657 wm_serdes_tick(struct wm_softc *sc)
   11658 {
   11659 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11660 	struct mii_data *mii = &sc->sc_mii;
   11661 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11662 	uint32_t reg;
   11663 
   11664 	KASSERT(WM_CORE_LOCKED(sc));
   11665 
   11666 	mii->mii_media_status = IFM_AVALID;
   11667 	mii->mii_media_active = IFM_ETHER;
   11668 
   11669 	/* Check PCS */
   11670 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11671 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11672 		mii->mii_media_status |= IFM_ACTIVE;
   11673 		sc->sc_tbi_linkup = 1;
   11674 		sc->sc_tbi_serdes_ticks = 0;
   11675 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11676 		if ((reg & PCS_LSTS_FDX) != 0)
   11677 			mii->mii_media_active |= IFM_FDX;
   11678 		else
   11679 			mii->mii_media_active |= IFM_HDX;
   11680 	} else {
   11681 		mii->mii_media_status |= IFM_NONE;
   11682 		sc->sc_tbi_linkup = 0;
   11683 		/* If the timer expired, retry autonegotiation */
   11684 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11685 		    && (++sc->sc_tbi_serdes_ticks
   11686 			>= sc->sc_tbi_serdes_anegticks)) {
   11687 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11688 			sc->sc_tbi_serdes_ticks = 0;
   11689 			/* XXX */
   11690 			wm_serdes_mediachange(ifp);
   11691 		}
   11692 	}
   11693 
   11694 	wm_tbi_serdes_set_linkled(sc);
   11695 }
   11696 
   11697 /* SFP related */
   11698 
   11699 static int
   11700 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11701 {
   11702 	uint32_t i2ccmd;
   11703 	int i;
   11704 
   11705 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11706 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11707 
   11708 	/* Poll the ready bit */
   11709 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11710 		delay(50);
   11711 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11712 		if (i2ccmd & I2CCMD_READY)
   11713 			break;
   11714 	}
   11715 	if ((i2ccmd & I2CCMD_READY) == 0)
   11716 		return -1;
   11717 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11718 		return -1;
   11719 
   11720 	*data = i2ccmd & 0x00ff;
   11721 
   11722 	return 0;
   11723 }
   11724 
   11725 static uint32_t
   11726 wm_sfp_get_media_type(struct wm_softc *sc)
   11727 {
   11728 	uint32_t ctrl_ext;
   11729 	uint8_t val = 0;
   11730 	int timeout = 3;
   11731 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11732 	int rv = -1;
   11733 
   11734 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11735 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11736 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11737 	CSR_WRITE_FLUSH(sc);
   11738 
   11739 	/* Read SFP module data */
   11740 	while (timeout) {
   11741 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11742 		if (rv == 0)
   11743 			break;
   11744 		delay(100*1000); /* XXX too big */
   11745 		timeout--;
   11746 	}
   11747 	if (rv != 0)
   11748 		goto out;
   11749 	switch (val) {
   11750 	case SFF_SFP_ID_SFF:
   11751 		aprint_normal_dev(sc->sc_dev,
   11752 		    "Module/Connector soldered to board\n");
   11753 		break;
   11754 	case SFF_SFP_ID_SFP:
   11755 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11756 		break;
   11757 	case SFF_SFP_ID_UNKNOWN:
   11758 		goto out;
   11759 	default:
   11760 		break;
   11761 	}
   11762 
   11763 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11764 	if (rv != 0) {
   11765 		goto out;
   11766 	}
   11767 
   11768 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11769 		mediatype = WM_MEDIATYPE_SERDES;
   11770 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   11771 		sc->sc_flags |= WM_F_SGMII;
   11772 		mediatype = WM_MEDIATYPE_COPPER;
   11773 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   11774 		sc->sc_flags |= WM_F_SGMII;
   11775 		mediatype = WM_MEDIATYPE_SERDES;
   11776 	}
   11777 
   11778 out:
   11779 	/* Restore I2C interface setting */
   11780 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11781 
   11782 	return mediatype;
   11783 }
   11784 
   11785 /*
   11786  * NVM related.
   11787  * Microwire, SPI (w/wo EERD) and Flash.
   11788  */
   11789 
   11790 /* Both spi and uwire */
   11791 
   11792 /*
   11793  * wm_eeprom_sendbits:
   11794  *
   11795  *	Send a series of bits to the EEPROM.
   11796  */
   11797 static void
   11798 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11799 {
   11800 	uint32_t reg;
   11801 	int x;
   11802 
   11803 	reg = CSR_READ(sc, WMREG_EECD);
   11804 
   11805 	for (x = nbits; x > 0; x--) {
   11806 		if (bits & (1U << (x - 1)))
   11807 			reg |= EECD_DI;
   11808 		else
   11809 			reg &= ~EECD_DI;
   11810 		CSR_WRITE(sc, WMREG_EECD, reg);
   11811 		CSR_WRITE_FLUSH(sc);
   11812 		delay(2);
   11813 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11814 		CSR_WRITE_FLUSH(sc);
   11815 		delay(2);
   11816 		CSR_WRITE(sc, WMREG_EECD, reg);
   11817 		CSR_WRITE_FLUSH(sc);
   11818 		delay(2);
   11819 	}
   11820 }
   11821 
   11822 /*
   11823  * wm_eeprom_recvbits:
   11824  *
   11825  *	Receive a series of bits from the EEPROM.
   11826  */
   11827 static void
   11828 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11829 {
   11830 	uint32_t reg, val;
   11831 	int x;
   11832 
   11833 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11834 
   11835 	val = 0;
   11836 	for (x = nbits; x > 0; x--) {
   11837 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11838 		CSR_WRITE_FLUSH(sc);
   11839 		delay(2);
   11840 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11841 			val |= (1U << (x - 1));
   11842 		CSR_WRITE(sc, WMREG_EECD, reg);
   11843 		CSR_WRITE_FLUSH(sc);
   11844 		delay(2);
   11845 	}
   11846 	*valp = val;
   11847 }
   11848 
   11849 /* Microwire */
   11850 
   11851 /*
   11852  * wm_nvm_read_uwire:
   11853  *
   11854  *	Read a word from the EEPROM using the MicroWire protocol.
   11855  */
   11856 static int
   11857 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11858 {
   11859 	uint32_t reg, val;
   11860 	int i;
   11861 
   11862 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11863 		device_xname(sc->sc_dev), __func__));
   11864 
   11865 	if (sc->nvm.acquire(sc) != 0)
   11866 		return -1;
   11867 
   11868 	for (i = 0; i < wordcnt; i++) {
   11869 		/* Clear SK and DI. */
   11870 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11871 		CSR_WRITE(sc, WMREG_EECD, reg);
   11872 
   11873 		/*
   11874 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11875 		 * and Xen.
   11876 		 *
   11877 		 * We use this workaround only for 82540 because qemu's
   11878 		 * e1000 act as 82540.
   11879 		 */
   11880 		if (sc->sc_type == WM_T_82540) {
   11881 			reg |= EECD_SK;
   11882 			CSR_WRITE(sc, WMREG_EECD, reg);
   11883 			reg &= ~EECD_SK;
   11884 			CSR_WRITE(sc, WMREG_EECD, reg);
   11885 			CSR_WRITE_FLUSH(sc);
   11886 			delay(2);
   11887 		}
   11888 		/* XXX: end of workaround */
   11889 
   11890 		/* Set CHIP SELECT. */
   11891 		reg |= EECD_CS;
   11892 		CSR_WRITE(sc, WMREG_EECD, reg);
   11893 		CSR_WRITE_FLUSH(sc);
   11894 		delay(2);
   11895 
   11896 		/* Shift in the READ command. */
   11897 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11898 
   11899 		/* Shift in address. */
   11900 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11901 
   11902 		/* Shift out the data. */
   11903 		wm_eeprom_recvbits(sc, &val, 16);
   11904 		data[i] = val & 0xffff;
   11905 
   11906 		/* Clear CHIP SELECT. */
   11907 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11908 		CSR_WRITE(sc, WMREG_EECD, reg);
   11909 		CSR_WRITE_FLUSH(sc);
   11910 		delay(2);
   11911 	}
   11912 
   11913 	sc->nvm.release(sc);
   11914 	return 0;
   11915 }
   11916 
   11917 /* SPI */
   11918 
   11919 /*
   11920  * Set SPI and FLASH related information from the EECD register.
   11921  * For 82541 and 82547, the word size is taken from EEPROM.
   11922  */
   11923 static int
   11924 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11925 {
   11926 	int size;
   11927 	uint32_t reg;
   11928 	uint16_t data;
   11929 
   11930 	reg = CSR_READ(sc, WMREG_EECD);
   11931 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11932 
   11933 	/* Read the size of NVM from EECD by default */
   11934 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11935 	switch (sc->sc_type) {
   11936 	case WM_T_82541:
   11937 	case WM_T_82541_2:
   11938 	case WM_T_82547:
   11939 	case WM_T_82547_2:
   11940 		/* Set dummy value to access EEPROM */
   11941 		sc->sc_nvm_wordsize = 64;
   11942 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   11943 			aprint_error_dev(sc->sc_dev,
   11944 			    "%s: failed to read EEPROM size\n", __func__);
   11945 		}
   11946 		reg = data;
   11947 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11948 		if (size == 0)
   11949 			size = 6; /* 64 word size */
   11950 		else
   11951 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11952 		break;
   11953 	case WM_T_80003:
   11954 	case WM_T_82571:
   11955 	case WM_T_82572:
   11956 	case WM_T_82573: /* SPI case */
   11957 	case WM_T_82574: /* SPI case */
   11958 	case WM_T_82583: /* SPI case */
   11959 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11960 		if (size > 14)
   11961 			size = 14;
   11962 		break;
   11963 	case WM_T_82575:
   11964 	case WM_T_82576:
   11965 	case WM_T_82580:
   11966 	case WM_T_I350:
   11967 	case WM_T_I354:
   11968 	case WM_T_I210:
   11969 	case WM_T_I211:
   11970 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11971 		if (size > 15)
   11972 			size = 15;
   11973 		break;
   11974 	default:
   11975 		aprint_error_dev(sc->sc_dev,
   11976 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11977 		return -1;
   11978 		break;
   11979 	}
   11980 
   11981 	sc->sc_nvm_wordsize = 1 << size;
   11982 
   11983 	return 0;
   11984 }
   11985 
   11986 /*
   11987  * wm_nvm_ready_spi:
   11988  *
   11989  *	Wait for a SPI EEPROM to be ready for commands.
   11990  */
   11991 static int
   11992 wm_nvm_ready_spi(struct wm_softc *sc)
   11993 {
   11994 	uint32_t val;
   11995 	int usec;
   11996 
   11997 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11998 		device_xname(sc->sc_dev), __func__));
   11999 
   12000 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12001 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12002 		wm_eeprom_recvbits(sc, &val, 8);
   12003 		if ((val & SPI_SR_RDY) == 0)
   12004 			break;
   12005 	}
   12006 	if (usec >= SPI_MAX_RETRIES) {
   12007 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12008 		return -1;
   12009 	}
   12010 	return 0;
   12011 }
   12012 
   12013 /*
   12014  * wm_nvm_read_spi:
   12015  *
   12016  *	Read a work from the EEPROM using the SPI protocol.
   12017  */
   12018 static int
   12019 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12020 {
   12021 	uint32_t reg, val;
   12022 	int i;
   12023 	uint8_t opc;
   12024 	int rv = 0;
   12025 
   12026 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12027 		device_xname(sc->sc_dev), __func__));
   12028 
   12029 	if (sc->nvm.acquire(sc) != 0)
   12030 		return -1;
   12031 
   12032 	/* Clear SK and CS. */
   12033 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12034 	CSR_WRITE(sc, WMREG_EECD, reg);
   12035 	CSR_WRITE_FLUSH(sc);
   12036 	delay(2);
   12037 
   12038 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12039 		goto out;
   12040 
   12041 	/* Toggle CS to flush commands. */
   12042 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12043 	CSR_WRITE_FLUSH(sc);
   12044 	delay(2);
   12045 	CSR_WRITE(sc, WMREG_EECD, reg);
   12046 	CSR_WRITE_FLUSH(sc);
   12047 	delay(2);
   12048 
   12049 	opc = SPI_OPC_READ;
   12050 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12051 		opc |= SPI_OPC_A8;
   12052 
   12053 	wm_eeprom_sendbits(sc, opc, 8);
   12054 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12055 
   12056 	for (i = 0; i < wordcnt; i++) {
   12057 		wm_eeprom_recvbits(sc, &val, 16);
   12058 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12059 	}
   12060 
   12061 	/* Raise CS and clear SK. */
   12062 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12063 	CSR_WRITE(sc, WMREG_EECD, reg);
   12064 	CSR_WRITE_FLUSH(sc);
   12065 	delay(2);
   12066 
   12067 out:
   12068 	sc->nvm.release(sc);
   12069 	return rv;
   12070 }
   12071 
   12072 /* Using with EERD */
   12073 
   12074 static int
   12075 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12076 {
   12077 	uint32_t attempts = 100000;
   12078 	uint32_t i, reg = 0;
   12079 	int32_t done = -1;
   12080 
   12081 	for (i = 0; i < attempts; i++) {
   12082 		reg = CSR_READ(sc, rw);
   12083 
   12084 		if (reg & EERD_DONE) {
   12085 			done = 0;
   12086 			break;
   12087 		}
   12088 		delay(5);
   12089 	}
   12090 
   12091 	return done;
   12092 }
   12093 
   12094 static int
   12095 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12096 {
   12097 	int i, eerd = 0;
   12098 	int rv = 0;
   12099 
   12100 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12101 		device_xname(sc->sc_dev), __func__));
   12102 
   12103 	if (sc->nvm.acquire(sc) != 0)
   12104 		return -1;
   12105 
   12106 	for (i = 0; i < wordcnt; i++) {
   12107 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12108 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12109 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12110 		if (rv != 0) {
   12111 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12112 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12113 			break;
   12114 		}
   12115 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12116 	}
   12117 
   12118 	sc->nvm.release(sc);
   12119 	return rv;
   12120 }
   12121 
   12122 /* Flash */
   12123 
   12124 static int
   12125 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12126 {
   12127 	uint32_t eecd;
   12128 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12129 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12130 	uint32_t nvm_dword = 0;
   12131 	uint8_t sig_byte = 0;
   12132 	int rv;
   12133 
   12134 	switch (sc->sc_type) {
   12135 	case WM_T_PCH_SPT:
   12136 	case WM_T_PCH_CNP:
   12137 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12138 		act_offset = ICH_NVM_SIG_WORD * 2;
   12139 
   12140 		/* set bank to 0 in case flash read fails. */
   12141 		*bank = 0;
   12142 
   12143 		/* Check bank 0 */
   12144 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12145 		if (rv != 0)
   12146 			return rv;
   12147 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12148 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12149 			*bank = 0;
   12150 			return 0;
   12151 		}
   12152 
   12153 		/* Check bank 1 */
   12154 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12155 		    &nvm_dword);
   12156 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12157 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12158 			*bank = 1;
   12159 			return 0;
   12160 		}
   12161 		aprint_error_dev(sc->sc_dev,
   12162 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12163 		return -1;
   12164 	case WM_T_ICH8:
   12165 	case WM_T_ICH9:
   12166 		eecd = CSR_READ(sc, WMREG_EECD);
   12167 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12168 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12169 			return 0;
   12170 		}
   12171 		/* FALLTHROUGH */
   12172 	default:
   12173 		/* Default to 0 */
   12174 		*bank = 0;
   12175 
   12176 		/* Check bank 0 */
   12177 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12178 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12179 			*bank = 0;
   12180 			return 0;
   12181 		}
   12182 
   12183 		/* Check bank 1 */
   12184 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12185 		    &sig_byte);
   12186 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12187 			*bank = 1;
   12188 			return 0;
   12189 		}
   12190 	}
   12191 
   12192 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12193 		device_xname(sc->sc_dev)));
   12194 	return -1;
   12195 }
   12196 
   12197 /******************************************************************************
   12198  * This function does initial flash setup so that a new read/write/erase cycle
   12199  * can be started.
   12200  *
   12201  * sc - The pointer to the hw structure
   12202  ****************************************************************************/
   12203 static int32_t
   12204 wm_ich8_cycle_init(struct wm_softc *sc)
   12205 {
   12206 	uint16_t hsfsts;
   12207 	int32_t error = 1;
   12208 	int32_t i     = 0;
   12209 
   12210 	if (sc->sc_type >= WM_T_PCH_SPT)
   12211 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12212 	else
   12213 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12214 
   12215 	/* May be check the Flash Des Valid bit in Hw status */
   12216 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   12217 		return error;
   12218 
   12219 	/* Clear FCERR in Hw status by writing 1 */
   12220 	/* Clear DAEL in Hw status by writing a 1 */
   12221 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12222 
   12223 	if (sc->sc_type >= WM_T_PCH_SPT)
   12224 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12225 	else
   12226 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12227 
   12228 	/*
   12229 	 * Either we should have a hardware SPI cycle in progress bit to check
   12230 	 * against, in order to start a new cycle or FDONE bit should be
   12231 	 * changed in the hardware so that it is 1 after harware reset, which
   12232 	 * can then be used as an indication whether a cycle is in progress or
   12233 	 * has been completed .. we should also have some software semaphore
   12234 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12235 	 * threads access to those bits can be sequentiallized or a way so that
   12236 	 * 2 threads dont start the cycle at the same time
   12237 	 */
   12238 
   12239 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12240 		/*
   12241 		 * There is no cycle running at present, so we can start a
   12242 		 * cycle
   12243 		 */
   12244 
   12245 		/* Begin by setting Flash Cycle Done. */
   12246 		hsfsts |= HSFSTS_DONE;
   12247 		if (sc->sc_type >= WM_T_PCH_SPT)
   12248 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12249 			    hsfsts & 0xffffUL);
   12250 		else
   12251 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12252 		error = 0;
   12253 	} else {
   12254 		/*
   12255 		 * otherwise poll for sometime so the current cycle has a
   12256 		 * chance to end before giving up.
   12257 		 */
   12258 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12259 			if (sc->sc_type >= WM_T_PCH_SPT)
   12260 				hsfsts = ICH8_FLASH_READ32(sc,
   12261 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12262 			else
   12263 				hsfsts = ICH8_FLASH_READ16(sc,
   12264 				    ICH_FLASH_HSFSTS);
   12265 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12266 				error = 0;
   12267 				break;
   12268 			}
   12269 			delay(1);
   12270 		}
   12271 		if (error == 0) {
   12272 			/*
   12273 			 * Successful in waiting for previous cycle to timeout,
   12274 			 * now set the Flash Cycle Done.
   12275 			 */
   12276 			hsfsts |= HSFSTS_DONE;
   12277 			if (sc->sc_type >= WM_T_PCH_SPT)
   12278 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12279 				    hsfsts & 0xffffUL);
   12280 			else
   12281 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12282 				    hsfsts);
   12283 		}
   12284 	}
   12285 	return error;
   12286 }
   12287 
   12288 /******************************************************************************
   12289  * This function starts a flash cycle and waits for its completion
   12290  *
   12291  * sc - The pointer to the hw structure
   12292  ****************************************************************************/
   12293 static int32_t
   12294 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12295 {
   12296 	uint16_t hsflctl;
   12297 	uint16_t hsfsts;
   12298 	int32_t error = 1;
   12299 	uint32_t i = 0;
   12300 
   12301 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12302 	if (sc->sc_type >= WM_T_PCH_SPT)
   12303 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12304 	else
   12305 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12306 	hsflctl |= HSFCTL_GO;
   12307 	if (sc->sc_type >= WM_T_PCH_SPT)
   12308 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12309 		    (uint32_t)hsflctl << 16);
   12310 	else
   12311 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12312 
   12313 	/* Wait till FDONE bit is set to 1 */
   12314 	do {
   12315 		if (sc->sc_type >= WM_T_PCH_SPT)
   12316 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12317 			    & 0xffffUL;
   12318 		else
   12319 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12320 		if (hsfsts & HSFSTS_DONE)
   12321 			break;
   12322 		delay(1);
   12323 		i++;
   12324 	} while (i < timeout);
   12325 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12326 		error = 0;
   12327 
   12328 	return error;
   12329 }
   12330 
   12331 /******************************************************************************
   12332  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12333  *
   12334  * sc - The pointer to the hw structure
   12335  * index - The index of the byte or word to read.
   12336  * size - Size of data to read, 1=byte 2=word, 4=dword
   12337  * data - Pointer to the word to store the value read.
   12338  *****************************************************************************/
   12339 static int32_t
   12340 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12341     uint32_t size, uint32_t *data)
   12342 {
   12343 	uint16_t hsfsts;
   12344 	uint16_t hsflctl;
   12345 	uint32_t flash_linear_address;
   12346 	uint32_t flash_data = 0;
   12347 	int32_t error = 1;
   12348 	int32_t count = 0;
   12349 
   12350 	if (size < 1  || size > 4 || data == 0x0 ||
   12351 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12352 		return error;
   12353 
   12354 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12355 	    sc->sc_ich8_flash_base;
   12356 
   12357 	do {
   12358 		delay(1);
   12359 		/* Steps */
   12360 		error = wm_ich8_cycle_init(sc);
   12361 		if (error)
   12362 			break;
   12363 
   12364 		if (sc->sc_type >= WM_T_PCH_SPT)
   12365 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12366 			    >> 16;
   12367 		else
   12368 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12369 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12370 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12371 		    & HSFCTL_BCOUNT_MASK;
   12372 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12373 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12374 			/*
   12375 			 * In SPT, This register is in Lan memory space, not
   12376 			 * flash. Therefore, only 32 bit access is supported.
   12377 			 */
   12378 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12379 			    (uint32_t)hsflctl << 16);
   12380 		} else
   12381 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12382 
   12383 		/*
   12384 		 * Write the last 24 bits of index into Flash Linear address
   12385 		 * field in Flash Address
   12386 		 */
   12387 		/* TODO: TBD maybe check the index against the size of flash */
   12388 
   12389 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12390 
   12391 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12392 
   12393 		/*
   12394 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12395 		 * the whole sequence a few more times, else read in (shift in)
   12396 		 * the Flash Data0, the order is least significant byte first
   12397 		 * msb to lsb
   12398 		 */
   12399 		if (error == 0) {
   12400 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12401 			if (size == 1)
   12402 				*data = (uint8_t)(flash_data & 0x000000FF);
   12403 			else if (size == 2)
   12404 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12405 			else if (size == 4)
   12406 				*data = (uint32_t)flash_data;
   12407 			break;
   12408 		} else {
   12409 			/*
   12410 			 * If we've gotten here, then things are probably
   12411 			 * completely hosed, but if the error condition is
   12412 			 * detected, it won't hurt to give it another try...
   12413 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12414 			 */
   12415 			if (sc->sc_type >= WM_T_PCH_SPT)
   12416 				hsfsts = ICH8_FLASH_READ32(sc,
   12417 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12418 			else
   12419 				hsfsts = ICH8_FLASH_READ16(sc,
   12420 				    ICH_FLASH_HSFSTS);
   12421 
   12422 			if (hsfsts & HSFSTS_ERR) {
   12423 				/* Repeat for some time before giving up. */
   12424 				continue;
   12425 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12426 				break;
   12427 		}
   12428 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12429 
   12430 	return error;
   12431 }
   12432 
   12433 /******************************************************************************
   12434  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12435  *
   12436  * sc - pointer to wm_hw structure
   12437  * index - The index of the byte to read.
   12438  * data - Pointer to a byte to store the value read.
   12439  *****************************************************************************/
   12440 static int32_t
   12441 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12442 {
   12443 	int32_t status;
   12444 	uint32_t word = 0;
   12445 
   12446 	status = wm_read_ich8_data(sc, index, 1, &word);
   12447 	if (status == 0)
   12448 		*data = (uint8_t)word;
   12449 	else
   12450 		*data = 0;
   12451 
   12452 	return status;
   12453 }
   12454 
   12455 /******************************************************************************
   12456  * Reads a word from the NVM using the ICH8 flash access registers.
   12457  *
   12458  * sc - pointer to wm_hw structure
   12459  * index - The starting byte index of the word to read.
   12460  * data - Pointer to a word to store the value read.
   12461  *****************************************************************************/
   12462 static int32_t
   12463 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12464 {
   12465 	int32_t status;
   12466 	uint32_t word = 0;
   12467 
   12468 	status = wm_read_ich8_data(sc, index, 2, &word);
   12469 	if (status == 0)
   12470 		*data = (uint16_t)word;
   12471 	else
   12472 		*data = 0;
   12473 
   12474 	return status;
   12475 }
   12476 
   12477 /******************************************************************************
   12478  * Reads a dword from the NVM using the ICH8 flash access registers.
   12479  *
   12480  * sc - pointer to wm_hw structure
   12481  * index - The starting byte index of the word to read.
   12482  * data - Pointer to a word to store the value read.
   12483  *****************************************************************************/
   12484 static int32_t
   12485 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12486 {
   12487 	int32_t status;
   12488 
   12489 	status = wm_read_ich8_data(sc, index, 4, data);
   12490 	return status;
   12491 }
   12492 
   12493 /******************************************************************************
   12494  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12495  * register.
   12496  *
   12497  * sc - Struct containing variables accessed by shared code
   12498  * offset - offset of word in the EEPROM to read
   12499  * data - word read from the EEPROM
   12500  * words - number of words to read
   12501  *****************************************************************************/
   12502 static int
   12503 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12504 {
   12505 	int32_t	 rv = 0;
   12506 	uint32_t flash_bank = 0;
   12507 	uint32_t act_offset = 0;
   12508 	uint32_t bank_offset = 0;
   12509 	uint16_t word = 0;
   12510 	uint16_t i = 0;
   12511 
   12512 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12513 		device_xname(sc->sc_dev), __func__));
   12514 
   12515 	if (sc->nvm.acquire(sc) != 0)
   12516 		return -1;
   12517 
   12518 	/*
   12519 	 * We need to know which is the valid flash bank.  In the event
   12520 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12521 	 * managing flash_bank. So it cannot be trusted and needs
   12522 	 * to be updated with each read.
   12523 	 */
   12524 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12525 	if (rv) {
   12526 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12527 			device_xname(sc->sc_dev)));
   12528 		flash_bank = 0;
   12529 	}
   12530 
   12531 	/*
   12532 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12533 	 * size
   12534 	 */
   12535 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12536 
   12537 	for (i = 0; i < words; i++) {
   12538 		/* The NVM part needs a byte offset, hence * 2 */
   12539 		act_offset = bank_offset + ((offset + i) * 2);
   12540 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12541 		if (rv) {
   12542 			aprint_error_dev(sc->sc_dev,
   12543 			    "%s: failed to read NVM\n", __func__);
   12544 			break;
   12545 		}
   12546 		data[i] = word;
   12547 	}
   12548 
   12549 	sc->nvm.release(sc);
   12550 	return rv;
   12551 }
   12552 
   12553 /******************************************************************************
   12554  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12555  * register.
   12556  *
   12557  * sc - Struct containing variables accessed by shared code
   12558  * offset - offset of word in the EEPROM to read
   12559  * data - word read from the EEPROM
   12560  * words - number of words to read
   12561  *****************************************************************************/
   12562 static int
   12563 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12564 {
   12565 	int32_t	 rv = 0;
   12566 	uint32_t flash_bank = 0;
   12567 	uint32_t act_offset = 0;
   12568 	uint32_t bank_offset = 0;
   12569 	uint32_t dword = 0;
   12570 	uint16_t i = 0;
   12571 
   12572 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12573 		device_xname(sc->sc_dev), __func__));
   12574 
   12575 	if (sc->nvm.acquire(sc) != 0)
   12576 		return -1;
   12577 
   12578 	/*
   12579 	 * We need to know which is the valid flash bank.  In the event
   12580 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12581 	 * managing flash_bank. So it cannot be trusted and needs
   12582 	 * to be updated with each read.
   12583 	 */
   12584 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12585 	if (rv) {
   12586 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12587 			device_xname(sc->sc_dev)));
   12588 		flash_bank = 0;
   12589 	}
   12590 
   12591 	/*
   12592 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12593 	 * size
   12594 	 */
   12595 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12596 
   12597 	for (i = 0; i < words; i++) {
   12598 		/* The NVM part needs a byte offset, hence * 2 */
   12599 		act_offset = bank_offset + ((offset + i) * 2);
   12600 		/* but we must read dword aligned, so mask ... */
   12601 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12602 		if (rv) {
   12603 			aprint_error_dev(sc->sc_dev,
   12604 			    "%s: failed to read NVM\n", __func__);
   12605 			break;
   12606 		}
   12607 		/* ... and pick out low or high word */
   12608 		if ((act_offset & 0x2) == 0)
   12609 			data[i] = (uint16_t)(dword & 0xFFFF);
   12610 		else
   12611 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12612 	}
   12613 
   12614 	sc->nvm.release(sc);
   12615 	return rv;
   12616 }
   12617 
   12618 /* iNVM */
   12619 
   12620 static int
   12621 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12622 {
   12623 	int32_t	 rv = 0;
   12624 	uint32_t invm_dword;
   12625 	uint16_t i;
   12626 	uint8_t record_type, word_address;
   12627 
   12628 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12629 		device_xname(sc->sc_dev), __func__));
   12630 
   12631 	for (i = 0; i < INVM_SIZE; i++) {
   12632 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12633 		/* Get record type */
   12634 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12635 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12636 			break;
   12637 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12638 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12639 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12640 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12641 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12642 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12643 			if (word_address == address) {
   12644 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12645 				rv = 0;
   12646 				break;
   12647 			}
   12648 		}
   12649 	}
   12650 
   12651 	return rv;
   12652 }
   12653 
   12654 static int
   12655 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12656 {
   12657 	int rv = 0;
   12658 	int i;
   12659 
   12660 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12661 		device_xname(sc->sc_dev), __func__));
   12662 
   12663 	if (sc->nvm.acquire(sc) != 0)
   12664 		return -1;
   12665 
   12666 	for (i = 0; i < words; i++) {
   12667 		switch (offset + i) {
   12668 		case NVM_OFF_MACADDR:
   12669 		case NVM_OFF_MACADDR1:
   12670 		case NVM_OFF_MACADDR2:
   12671 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12672 			if (rv != 0) {
   12673 				data[i] = 0xffff;
   12674 				rv = -1;
   12675 			}
   12676 			break;
   12677 		case NVM_OFF_CFG2:
   12678 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12679 			if (rv != 0) {
   12680 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12681 				rv = 0;
   12682 			}
   12683 			break;
   12684 		case NVM_OFF_CFG4:
   12685 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12686 			if (rv != 0) {
   12687 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12688 				rv = 0;
   12689 			}
   12690 			break;
   12691 		case NVM_OFF_LED_1_CFG:
   12692 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12693 			if (rv != 0) {
   12694 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12695 				rv = 0;
   12696 			}
   12697 			break;
   12698 		case NVM_OFF_LED_0_2_CFG:
   12699 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12700 			if (rv != 0) {
   12701 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12702 				rv = 0;
   12703 			}
   12704 			break;
   12705 		case NVM_OFF_ID_LED_SETTINGS:
   12706 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12707 			if (rv != 0) {
   12708 				*data = ID_LED_RESERVED_FFFF;
   12709 				rv = 0;
   12710 			}
   12711 			break;
   12712 		default:
   12713 			DPRINTF(WM_DEBUG_NVM,
   12714 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12715 			*data = NVM_RESERVED_WORD;
   12716 			break;
   12717 		}
   12718 	}
   12719 
   12720 	sc->nvm.release(sc);
   12721 	return rv;
   12722 }
   12723 
   12724 /* Lock, detecting NVM type, validate checksum, version and read */
   12725 
   12726 static int
   12727 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12728 {
   12729 	uint32_t eecd = 0;
   12730 
   12731 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12732 	    || sc->sc_type == WM_T_82583) {
   12733 		eecd = CSR_READ(sc, WMREG_EECD);
   12734 
   12735 		/* Isolate bits 15 & 16 */
   12736 		eecd = ((eecd >> 15) & 0x03);
   12737 
   12738 		/* If both bits are set, device is Flash type */
   12739 		if (eecd == 0x03)
   12740 			return 0;
   12741 	}
   12742 	return 1;
   12743 }
   12744 
   12745 static int
   12746 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   12747 {
   12748 	uint32_t eec;
   12749 
   12750 	eec = CSR_READ(sc, WMREG_EEC);
   12751 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12752 		return 1;
   12753 
   12754 	return 0;
   12755 }
   12756 
   12757 /*
   12758  * wm_nvm_validate_checksum
   12759  *
   12760  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12761  */
   12762 static int
   12763 wm_nvm_validate_checksum(struct wm_softc *sc)
   12764 {
   12765 	uint16_t checksum;
   12766 	uint16_t eeprom_data;
   12767 #ifdef WM_DEBUG
   12768 	uint16_t csum_wordaddr, valid_checksum;
   12769 #endif
   12770 	int i;
   12771 
   12772 	checksum = 0;
   12773 
   12774 	/* Don't check for I211 */
   12775 	if (sc->sc_type == WM_T_I211)
   12776 		return 0;
   12777 
   12778 #ifdef WM_DEBUG
   12779 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   12780 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   12781 		csum_wordaddr = NVM_OFF_COMPAT;
   12782 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12783 	} else {
   12784 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12785 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12786 	}
   12787 
   12788 	/* Dump EEPROM image for debug */
   12789 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12790 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12791 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12792 		/* XXX PCH_SPT? */
   12793 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12794 		if ((eeprom_data & valid_checksum) == 0) {
   12795 			DPRINTF(WM_DEBUG_NVM,
   12796 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12797 				device_xname(sc->sc_dev), eeprom_data,
   12798 				    valid_checksum));
   12799 		}
   12800 	}
   12801 
   12802 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12803 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12804 		for (i = 0; i < NVM_SIZE; i++) {
   12805 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12806 				printf("XXXX ");
   12807 			else
   12808 				printf("%04hx ", eeprom_data);
   12809 			if (i % 8 == 7)
   12810 				printf("\n");
   12811 		}
   12812 	}
   12813 
   12814 #endif /* WM_DEBUG */
   12815 
   12816 	for (i = 0; i < NVM_SIZE; i++) {
   12817 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12818 			return 1;
   12819 		checksum += eeprom_data;
   12820 	}
   12821 
   12822 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12823 #ifdef WM_DEBUG
   12824 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12825 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12826 #endif
   12827 	}
   12828 
   12829 	return 0;
   12830 }
   12831 
   12832 static void
   12833 wm_nvm_version_invm(struct wm_softc *sc)
   12834 {
   12835 	uint32_t dword;
   12836 
   12837 	/*
   12838 	 * Linux's code to decode version is very strange, so we don't
   12839 	 * obey that algorithm and just use word 61 as the document.
   12840 	 * Perhaps it's not perfect though...
   12841 	 *
   12842 	 * Example:
   12843 	 *
   12844 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12845 	 */
   12846 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12847 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12848 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12849 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12850 }
   12851 
   12852 static void
   12853 wm_nvm_version(struct wm_softc *sc)
   12854 {
   12855 	uint16_t major, minor, build, patch;
   12856 	uint16_t uid0, uid1;
   12857 	uint16_t nvm_data;
   12858 	uint16_t off;
   12859 	bool check_version = false;
   12860 	bool check_optionrom = false;
   12861 	bool have_build = false;
   12862 	bool have_uid = true;
   12863 
   12864 	/*
   12865 	 * Version format:
   12866 	 *
   12867 	 * XYYZ
   12868 	 * X0YZ
   12869 	 * X0YY
   12870 	 *
   12871 	 * Example:
   12872 	 *
   12873 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12874 	 *	82571	0x50a6	5.10.6?
   12875 	 *	82572	0x506a	5.6.10?
   12876 	 *	82572EI	0x5069	5.6.9?
   12877 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12878 	 *		0x2013	2.1.3?
   12879 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12880 	 */
   12881 
   12882 	/*
   12883 	 * XXX
   12884 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   12885 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   12886 	 */
   12887 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   12888 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   12889 		have_uid = false;
   12890 
   12891 	switch (sc->sc_type) {
   12892 	case WM_T_82571:
   12893 	case WM_T_82572:
   12894 	case WM_T_82574:
   12895 	case WM_T_82583:
   12896 		check_version = true;
   12897 		check_optionrom = true;
   12898 		have_build = true;
   12899 		break;
   12900 	case WM_T_82575:
   12901 	case WM_T_82576:
   12902 	case WM_T_82580:
   12903 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12904 			check_version = true;
   12905 		break;
   12906 	case WM_T_I211:
   12907 		wm_nvm_version_invm(sc);
   12908 		have_uid = false;
   12909 		goto printver;
   12910 	case WM_T_I210:
   12911 		if (!wm_nvm_flash_presence_i210(sc)) {
   12912 			wm_nvm_version_invm(sc);
   12913 			have_uid = false;
   12914 			goto printver;
   12915 		}
   12916 		/* FALLTHROUGH */
   12917 	case WM_T_I350:
   12918 	case WM_T_I354:
   12919 		check_version = true;
   12920 		check_optionrom = true;
   12921 		break;
   12922 	default:
   12923 		return;
   12924 	}
   12925 	if (check_version
   12926 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   12927 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12928 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12929 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12930 			build = nvm_data & NVM_BUILD_MASK;
   12931 			have_build = true;
   12932 		} else
   12933 			minor = nvm_data & 0x00ff;
   12934 
   12935 		/* Decimal */
   12936 		minor = (minor / 16) * 10 + (minor % 16);
   12937 		sc->sc_nvm_ver_major = major;
   12938 		sc->sc_nvm_ver_minor = minor;
   12939 
   12940 printver:
   12941 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12942 		    sc->sc_nvm_ver_minor);
   12943 		if (have_build) {
   12944 			sc->sc_nvm_ver_build = build;
   12945 			aprint_verbose(".%d", build);
   12946 		}
   12947 	}
   12948 
   12949 	/* Assume the Option ROM area is at avove NVM_SIZE */
   12950 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   12951 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   12952 		/* Option ROM Version */
   12953 		if ((off != 0x0000) && (off != 0xffff)) {
   12954 			int rv;
   12955 
   12956 			off += NVM_COMBO_VER_OFF;
   12957 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   12958 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   12959 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   12960 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12961 				/* 16bits */
   12962 				major = uid0 >> 8;
   12963 				build = (uid0 << 8) | (uid1 >> 8);
   12964 				patch = uid1 & 0x00ff;
   12965 				aprint_verbose(", option ROM Version %d.%d.%d",
   12966 				    major, build, patch);
   12967 			}
   12968 		}
   12969 	}
   12970 
   12971 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   12972 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12973 }
   12974 
   12975 /*
   12976  * wm_nvm_read:
   12977  *
   12978  *	Read data from the serial EEPROM.
   12979  */
   12980 static int
   12981 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12982 {
   12983 	int rv;
   12984 
   12985 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12986 		device_xname(sc->sc_dev), __func__));
   12987 
   12988 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12989 		return -1;
   12990 
   12991 	rv = sc->nvm.read(sc, word, wordcnt, data);
   12992 
   12993 	return rv;
   12994 }
   12995 
   12996 /*
   12997  * Hardware semaphores.
   12998  * Very complexed...
   12999  */
   13000 
   13001 static int
   13002 wm_get_null(struct wm_softc *sc)
   13003 {
   13004 
   13005 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13006 		device_xname(sc->sc_dev), __func__));
   13007 	return 0;
   13008 }
   13009 
   13010 static void
   13011 wm_put_null(struct wm_softc *sc)
   13012 {
   13013 
   13014 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13015 		device_xname(sc->sc_dev), __func__));
   13016 	return;
   13017 }
   13018 
   13019 static int
   13020 wm_get_eecd(struct wm_softc *sc)
   13021 {
   13022 	uint32_t reg;
   13023 	int x;
   13024 
   13025 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13026 		device_xname(sc->sc_dev), __func__));
   13027 
   13028 	reg = CSR_READ(sc, WMREG_EECD);
   13029 
   13030 	/* Request EEPROM access. */
   13031 	reg |= EECD_EE_REQ;
   13032 	CSR_WRITE(sc, WMREG_EECD, reg);
   13033 
   13034 	/* ..and wait for it to be granted. */
   13035 	for (x = 0; x < 1000; x++) {
   13036 		reg = CSR_READ(sc, WMREG_EECD);
   13037 		if (reg & EECD_EE_GNT)
   13038 			break;
   13039 		delay(5);
   13040 	}
   13041 	if ((reg & EECD_EE_GNT) == 0) {
   13042 		aprint_error_dev(sc->sc_dev,
   13043 		    "could not acquire EEPROM GNT\n");
   13044 		reg &= ~EECD_EE_REQ;
   13045 		CSR_WRITE(sc, WMREG_EECD, reg);
   13046 		return -1;
   13047 	}
   13048 
   13049 	return 0;
   13050 }
   13051 
   13052 static void
   13053 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13054 {
   13055 
   13056 	*eecd |= EECD_SK;
   13057 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13058 	CSR_WRITE_FLUSH(sc);
   13059 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13060 		delay(1);
   13061 	else
   13062 		delay(50);
   13063 }
   13064 
   13065 static void
   13066 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13067 {
   13068 
   13069 	*eecd &= ~EECD_SK;
   13070 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13071 	CSR_WRITE_FLUSH(sc);
   13072 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13073 		delay(1);
   13074 	else
   13075 		delay(50);
   13076 }
   13077 
   13078 static void
   13079 wm_put_eecd(struct wm_softc *sc)
   13080 {
   13081 	uint32_t reg;
   13082 
   13083 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13084 		device_xname(sc->sc_dev), __func__));
   13085 
   13086 	/* Stop nvm */
   13087 	reg = CSR_READ(sc, WMREG_EECD);
   13088 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13089 		/* Pull CS high */
   13090 		reg |= EECD_CS;
   13091 		wm_nvm_eec_clock_lower(sc, &reg);
   13092 	} else {
   13093 		/* CS on Microwire is active-high */
   13094 		reg &= ~(EECD_CS | EECD_DI);
   13095 		CSR_WRITE(sc, WMREG_EECD, reg);
   13096 		wm_nvm_eec_clock_raise(sc, &reg);
   13097 		wm_nvm_eec_clock_lower(sc, &reg);
   13098 	}
   13099 
   13100 	reg = CSR_READ(sc, WMREG_EECD);
   13101 	reg &= ~EECD_EE_REQ;
   13102 	CSR_WRITE(sc, WMREG_EECD, reg);
   13103 
   13104 	return;
   13105 }
   13106 
   13107 /*
   13108  * Get hardware semaphore.
   13109  * Same as e1000_get_hw_semaphore_generic()
   13110  */
   13111 static int
   13112 wm_get_swsm_semaphore(struct wm_softc *sc)
   13113 {
   13114 	int32_t timeout;
   13115 	uint32_t swsm;
   13116 
   13117 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13118 		device_xname(sc->sc_dev), __func__));
   13119 	KASSERT(sc->sc_nvm_wordsize > 0);
   13120 
   13121 retry:
   13122 	/* Get the SW semaphore. */
   13123 	timeout = sc->sc_nvm_wordsize + 1;
   13124 	while (timeout) {
   13125 		swsm = CSR_READ(sc, WMREG_SWSM);
   13126 
   13127 		if ((swsm & SWSM_SMBI) == 0)
   13128 			break;
   13129 
   13130 		delay(50);
   13131 		timeout--;
   13132 	}
   13133 
   13134 	if (timeout == 0) {
   13135 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13136 			/*
   13137 			 * In rare circumstances, the SW semaphore may already
   13138 			 * be held unintentionally. Clear the semaphore once
   13139 			 * before giving up.
   13140 			 */
   13141 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13142 			wm_put_swsm_semaphore(sc);
   13143 			goto retry;
   13144 		}
   13145 		aprint_error_dev(sc->sc_dev,
   13146 		    "could not acquire SWSM SMBI\n");
   13147 		return 1;
   13148 	}
   13149 
   13150 	/* Get the FW semaphore. */
   13151 	timeout = sc->sc_nvm_wordsize + 1;
   13152 	while (timeout) {
   13153 		swsm = CSR_READ(sc, WMREG_SWSM);
   13154 		swsm |= SWSM_SWESMBI;
   13155 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13156 		/* If we managed to set the bit we got the semaphore. */
   13157 		swsm = CSR_READ(sc, WMREG_SWSM);
   13158 		if (swsm & SWSM_SWESMBI)
   13159 			break;
   13160 
   13161 		delay(50);
   13162 		timeout--;
   13163 	}
   13164 
   13165 	if (timeout == 0) {
   13166 		aprint_error_dev(sc->sc_dev,
   13167 		    "could not acquire SWSM SWESMBI\n");
   13168 		/* Release semaphores */
   13169 		wm_put_swsm_semaphore(sc);
   13170 		return 1;
   13171 	}
   13172 	return 0;
   13173 }
   13174 
   13175 /*
   13176  * Put hardware semaphore.
   13177  * Same as e1000_put_hw_semaphore_generic()
   13178  */
   13179 static void
   13180 wm_put_swsm_semaphore(struct wm_softc *sc)
   13181 {
   13182 	uint32_t swsm;
   13183 
   13184 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13185 		device_xname(sc->sc_dev), __func__));
   13186 
   13187 	swsm = CSR_READ(sc, WMREG_SWSM);
   13188 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13189 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13190 }
   13191 
   13192 /*
   13193  * Get SW/FW semaphore.
   13194  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13195  */
   13196 static int
   13197 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13198 {
   13199 	uint32_t swfw_sync;
   13200 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13201 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13202 	int timeout;
   13203 
   13204 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13205 		device_xname(sc->sc_dev), __func__));
   13206 
   13207 	if (sc->sc_type == WM_T_80003)
   13208 		timeout = 50;
   13209 	else
   13210 		timeout = 200;
   13211 
   13212 	while (timeout) {
   13213 		if (wm_get_swsm_semaphore(sc)) {
   13214 			aprint_error_dev(sc->sc_dev,
   13215 			    "%s: failed to get semaphore\n",
   13216 			    __func__);
   13217 			return 1;
   13218 		}
   13219 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13220 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13221 			swfw_sync |= swmask;
   13222 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13223 			wm_put_swsm_semaphore(sc);
   13224 			return 0;
   13225 		}
   13226 		wm_put_swsm_semaphore(sc);
   13227 		delay(5000);
   13228 		timeout--;
   13229 	}
   13230 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13231 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13232 	return 1;
   13233 }
   13234 
   13235 static void
   13236 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13237 {
   13238 	uint32_t swfw_sync;
   13239 
   13240 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13241 		device_xname(sc->sc_dev), __func__));
   13242 
   13243 	while (wm_get_swsm_semaphore(sc) != 0)
   13244 		continue;
   13245 
   13246 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13247 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13248 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13249 
   13250 	wm_put_swsm_semaphore(sc);
   13251 }
   13252 
   13253 static int
   13254 wm_get_nvm_80003(struct wm_softc *sc)
   13255 {
   13256 	int rv;
   13257 
   13258 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13259 		device_xname(sc->sc_dev), __func__));
   13260 
   13261 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13262 		aprint_error_dev(sc->sc_dev,
   13263 		    "%s: failed to get semaphore(SWFW)\n",
   13264 		    __func__);
   13265 		return rv;
   13266 	}
   13267 
   13268 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13269 	    && (rv = wm_get_eecd(sc)) != 0) {
   13270 		aprint_error_dev(sc->sc_dev,
   13271 		    "%s: failed to get semaphore(EECD)\n",
   13272 		    __func__);
   13273 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13274 		return rv;
   13275 	}
   13276 
   13277 	return 0;
   13278 }
   13279 
   13280 static void
   13281 wm_put_nvm_80003(struct wm_softc *sc)
   13282 {
   13283 
   13284 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13285 		device_xname(sc->sc_dev), __func__));
   13286 
   13287 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13288 		wm_put_eecd(sc);
   13289 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13290 }
   13291 
   13292 static int
   13293 wm_get_nvm_82571(struct wm_softc *sc)
   13294 {
   13295 	int rv;
   13296 
   13297 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13298 		device_xname(sc->sc_dev), __func__));
   13299 
   13300 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13301 		return rv;
   13302 
   13303 	switch (sc->sc_type) {
   13304 	case WM_T_82573:
   13305 		break;
   13306 	default:
   13307 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13308 			rv = wm_get_eecd(sc);
   13309 		break;
   13310 	}
   13311 
   13312 	if (rv != 0) {
   13313 		aprint_error_dev(sc->sc_dev,
   13314 		    "%s: failed to get semaphore\n",
   13315 		    __func__);
   13316 		wm_put_swsm_semaphore(sc);
   13317 	}
   13318 
   13319 	return rv;
   13320 }
   13321 
   13322 static void
   13323 wm_put_nvm_82571(struct wm_softc *sc)
   13324 {
   13325 
   13326 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13327 		device_xname(sc->sc_dev), __func__));
   13328 
   13329 	switch (sc->sc_type) {
   13330 	case WM_T_82573:
   13331 		break;
   13332 	default:
   13333 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13334 			wm_put_eecd(sc);
   13335 		break;
   13336 	}
   13337 
   13338 	wm_put_swsm_semaphore(sc);
   13339 }
   13340 
   13341 static int
   13342 wm_get_phy_82575(struct wm_softc *sc)
   13343 {
   13344 
   13345 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13346 		device_xname(sc->sc_dev), __func__));
   13347 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13348 }
   13349 
   13350 static void
   13351 wm_put_phy_82575(struct wm_softc *sc)
   13352 {
   13353 
   13354 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13355 		device_xname(sc->sc_dev), __func__));
   13356 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13357 }
   13358 
   13359 static int
   13360 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13361 {
   13362 	uint32_t ext_ctrl;
   13363 	int timeout = 200;
   13364 
   13365 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13366 		device_xname(sc->sc_dev), __func__));
   13367 
   13368 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13369 	for (timeout = 0; timeout < 200; timeout++) {
   13370 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13371 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13372 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13373 
   13374 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13375 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13376 			return 0;
   13377 		delay(5000);
   13378 	}
   13379 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13380 	    device_xname(sc->sc_dev), ext_ctrl);
   13381 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13382 	return 1;
   13383 }
   13384 
   13385 static void
   13386 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13387 {
   13388 	uint32_t ext_ctrl;
   13389 
   13390 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13391 		device_xname(sc->sc_dev), __func__));
   13392 
   13393 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13394 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13395 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13396 
   13397 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13398 }
   13399 
   13400 static int
   13401 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13402 {
   13403 	uint32_t ext_ctrl;
   13404 	int timeout;
   13405 
   13406 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13407 		device_xname(sc->sc_dev), __func__));
   13408 	mutex_enter(sc->sc_ich_phymtx);
   13409 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13410 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13411 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13412 			break;
   13413 		delay(1000);
   13414 	}
   13415 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13416 		printf("%s: SW has already locked the resource\n",
   13417 		    device_xname(sc->sc_dev));
   13418 		goto out;
   13419 	}
   13420 
   13421 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13422 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13423 	for (timeout = 0; timeout < 1000; timeout++) {
   13424 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13425 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13426 			break;
   13427 		delay(1000);
   13428 	}
   13429 	if (timeout >= 1000) {
   13430 		printf("%s: failed to acquire semaphore\n",
   13431 		    device_xname(sc->sc_dev));
   13432 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13433 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13434 		goto out;
   13435 	}
   13436 	return 0;
   13437 
   13438 out:
   13439 	mutex_exit(sc->sc_ich_phymtx);
   13440 	return 1;
   13441 }
   13442 
   13443 static void
   13444 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13445 {
   13446 	uint32_t ext_ctrl;
   13447 
   13448 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13449 		device_xname(sc->sc_dev), __func__));
   13450 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13451 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13452 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13453 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13454 	} else {
   13455 		printf("%s: Semaphore unexpectedly released\n",
   13456 		    device_xname(sc->sc_dev));
   13457 	}
   13458 
   13459 	mutex_exit(sc->sc_ich_phymtx);
   13460 }
   13461 
   13462 static int
   13463 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13464 {
   13465 
   13466 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13467 		device_xname(sc->sc_dev), __func__));
   13468 	mutex_enter(sc->sc_ich_nvmmtx);
   13469 
   13470 	return 0;
   13471 }
   13472 
   13473 static void
   13474 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13475 {
   13476 
   13477 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13478 		device_xname(sc->sc_dev), __func__));
   13479 	mutex_exit(sc->sc_ich_nvmmtx);
   13480 }
   13481 
   13482 static int
   13483 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13484 {
   13485 	int i = 0;
   13486 	uint32_t reg;
   13487 
   13488 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13489 		device_xname(sc->sc_dev), __func__));
   13490 
   13491 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13492 	do {
   13493 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13494 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13495 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13496 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13497 			break;
   13498 		delay(2*1000);
   13499 		i++;
   13500 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13501 
   13502 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13503 		wm_put_hw_semaphore_82573(sc);
   13504 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13505 		    device_xname(sc->sc_dev));
   13506 		return -1;
   13507 	}
   13508 
   13509 	return 0;
   13510 }
   13511 
   13512 static void
   13513 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13514 {
   13515 	uint32_t reg;
   13516 
   13517 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13518 		device_xname(sc->sc_dev), __func__));
   13519 
   13520 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13521 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13522 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13523 }
   13524 
   13525 /*
   13526  * Management mode and power management related subroutines.
   13527  * BMC, AMT, suspend/resume and EEE.
   13528  */
   13529 
   13530 #ifdef WM_WOL
   13531 static int
   13532 wm_check_mng_mode(struct wm_softc *sc)
   13533 {
   13534 	int rv;
   13535 
   13536 	switch (sc->sc_type) {
   13537 	case WM_T_ICH8:
   13538 	case WM_T_ICH9:
   13539 	case WM_T_ICH10:
   13540 	case WM_T_PCH:
   13541 	case WM_T_PCH2:
   13542 	case WM_T_PCH_LPT:
   13543 	case WM_T_PCH_SPT:
   13544 	case WM_T_PCH_CNP:
   13545 		rv = wm_check_mng_mode_ich8lan(sc);
   13546 		break;
   13547 	case WM_T_82574:
   13548 	case WM_T_82583:
   13549 		rv = wm_check_mng_mode_82574(sc);
   13550 		break;
   13551 	case WM_T_82571:
   13552 	case WM_T_82572:
   13553 	case WM_T_82573:
   13554 	case WM_T_80003:
   13555 		rv = wm_check_mng_mode_generic(sc);
   13556 		break;
   13557 	default:
   13558 		/* noting to do */
   13559 		rv = 0;
   13560 		break;
   13561 	}
   13562 
   13563 	return rv;
   13564 }
   13565 
   13566 static int
   13567 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13568 {
   13569 	uint32_t fwsm;
   13570 
   13571 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13572 
   13573 	if (((fwsm & FWSM_FW_VALID) != 0)
   13574 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13575 		return 1;
   13576 
   13577 	return 0;
   13578 }
   13579 
   13580 static int
   13581 wm_check_mng_mode_82574(struct wm_softc *sc)
   13582 {
   13583 	uint16_t data;
   13584 
   13585 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13586 
   13587 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13588 		return 1;
   13589 
   13590 	return 0;
   13591 }
   13592 
   13593 static int
   13594 wm_check_mng_mode_generic(struct wm_softc *sc)
   13595 {
   13596 	uint32_t fwsm;
   13597 
   13598 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13599 
   13600 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13601 		return 1;
   13602 
   13603 	return 0;
   13604 }
   13605 #endif /* WM_WOL */
   13606 
   13607 static int
   13608 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13609 {
   13610 	uint32_t manc, fwsm, factps;
   13611 
   13612 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13613 		return 0;
   13614 
   13615 	manc = CSR_READ(sc, WMREG_MANC);
   13616 
   13617 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13618 		device_xname(sc->sc_dev), manc));
   13619 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13620 		return 0;
   13621 
   13622 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13623 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13624 		factps = CSR_READ(sc, WMREG_FACTPS);
   13625 		if (((factps & FACTPS_MNGCG) == 0)
   13626 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13627 			return 1;
   13628 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13629 		uint16_t data;
   13630 
   13631 		factps = CSR_READ(sc, WMREG_FACTPS);
   13632 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13633 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13634 			device_xname(sc->sc_dev), factps, data));
   13635 		if (((factps & FACTPS_MNGCG) == 0)
   13636 		    && ((data & NVM_CFG2_MNGM_MASK)
   13637 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13638 			return 1;
   13639 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13640 	    && ((manc & MANC_ASF_EN) == 0))
   13641 		return 1;
   13642 
   13643 	return 0;
   13644 }
   13645 
   13646 static bool
   13647 wm_phy_resetisblocked(struct wm_softc *sc)
   13648 {
   13649 	bool blocked = false;
   13650 	uint32_t reg;
   13651 	int i = 0;
   13652 
   13653 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13654 		device_xname(sc->sc_dev), __func__));
   13655 
   13656 	switch (sc->sc_type) {
   13657 	case WM_T_ICH8:
   13658 	case WM_T_ICH9:
   13659 	case WM_T_ICH10:
   13660 	case WM_T_PCH:
   13661 	case WM_T_PCH2:
   13662 	case WM_T_PCH_LPT:
   13663 	case WM_T_PCH_SPT:
   13664 	case WM_T_PCH_CNP:
   13665 		do {
   13666 			reg = CSR_READ(sc, WMREG_FWSM);
   13667 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13668 				blocked = true;
   13669 				delay(10*1000);
   13670 				continue;
   13671 			}
   13672 			blocked = false;
   13673 		} while (blocked && (i++ < 30));
   13674 		return blocked;
   13675 		break;
   13676 	case WM_T_82571:
   13677 	case WM_T_82572:
   13678 	case WM_T_82573:
   13679 	case WM_T_82574:
   13680 	case WM_T_82583:
   13681 	case WM_T_80003:
   13682 		reg = CSR_READ(sc, WMREG_MANC);
   13683 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13684 			return true;
   13685 		else
   13686 			return false;
   13687 		break;
   13688 	default:
   13689 		/* no problem */
   13690 		break;
   13691 	}
   13692 
   13693 	return false;
   13694 }
   13695 
   13696 static void
   13697 wm_get_hw_control(struct wm_softc *sc)
   13698 {
   13699 	uint32_t reg;
   13700 
   13701 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13702 		device_xname(sc->sc_dev), __func__));
   13703 
   13704 	if (sc->sc_type == WM_T_82573) {
   13705 		reg = CSR_READ(sc, WMREG_SWSM);
   13706 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13707 	} else if (sc->sc_type >= WM_T_82571) {
   13708 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13709 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13710 	}
   13711 }
   13712 
   13713 static void
   13714 wm_release_hw_control(struct wm_softc *sc)
   13715 {
   13716 	uint32_t reg;
   13717 
   13718 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13719 		device_xname(sc->sc_dev), __func__));
   13720 
   13721 	if (sc->sc_type == WM_T_82573) {
   13722 		reg = CSR_READ(sc, WMREG_SWSM);
   13723 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13724 	} else if (sc->sc_type >= WM_T_82571) {
   13725 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13726 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13727 	}
   13728 }
   13729 
   13730 static void
   13731 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13732 {
   13733 	uint32_t reg;
   13734 
   13735 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13736 		device_xname(sc->sc_dev), __func__));
   13737 
   13738 	if (sc->sc_type < WM_T_PCH2)
   13739 		return;
   13740 
   13741 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13742 
   13743 	if (gate)
   13744 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13745 	else
   13746 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13747 
   13748 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13749 }
   13750 
   13751 static void
   13752 wm_smbustopci(struct wm_softc *sc)
   13753 {
   13754 	uint32_t fwsm, reg;
   13755 	int rv = 0;
   13756 
   13757 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13758 		device_xname(sc->sc_dev), __func__));
   13759 
   13760 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13761 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13762 
   13763 	/* Disable ULP */
   13764 	wm_ulp_disable(sc);
   13765 
   13766 	/* Acquire PHY semaphore */
   13767 	sc->phy.acquire(sc);
   13768 
   13769 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13770 	switch (sc->sc_type) {
   13771 	case WM_T_PCH_LPT:
   13772 	case WM_T_PCH_SPT:
   13773 	case WM_T_PCH_CNP:
   13774 		if (wm_phy_is_accessible_pchlan(sc))
   13775 			break;
   13776 
   13777 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13778 		reg |= CTRL_EXT_FORCE_SMBUS;
   13779 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13780 #if 0
   13781 		/* XXX Isn't this required??? */
   13782 		CSR_WRITE_FLUSH(sc);
   13783 #endif
   13784 		delay(50 * 1000);
   13785 		/* FALLTHROUGH */
   13786 	case WM_T_PCH2:
   13787 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13788 			break;
   13789 		/* FALLTHROUGH */
   13790 	case WM_T_PCH:
   13791 		if (sc->sc_type == WM_T_PCH)
   13792 			if ((fwsm & FWSM_FW_VALID) != 0)
   13793 				break;
   13794 
   13795 		if (wm_phy_resetisblocked(sc) == true) {
   13796 			printf("XXX reset is blocked(3)\n");
   13797 			break;
   13798 		}
   13799 
   13800 		wm_toggle_lanphypc_pch_lpt(sc);
   13801 
   13802 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13803 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13804 				break;
   13805 
   13806 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13807 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13808 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13809 
   13810 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13811 				break;
   13812 			rv = -1;
   13813 		}
   13814 		break;
   13815 	default:
   13816 		break;
   13817 	}
   13818 
   13819 	/* Release semaphore */
   13820 	sc->phy.release(sc);
   13821 
   13822 	if (rv == 0) {
   13823 		if (wm_phy_resetisblocked(sc)) {
   13824 			printf("XXX reset is blocked(4)\n");
   13825 			goto out;
   13826 		}
   13827 		wm_reset_phy(sc);
   13828 		if (wm_phy_resetisblocked(sc))
   13829 			printf("XXX reset is blocked(4)\n");
   13830 	}
   13831 
   13832 out:
   13833 	/*
   13834 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13835 	 */
   13836 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13837 		delay(10*1000);
   13838 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13839 	}
   13840 }
   13841 
   13842 static void
   13843 wm_init_manageability(struct wm_softc *sc)
   13844 {
   13845 
   13846 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13847 		device_xname(sc->sc_dev), __func__));
   13848 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13849 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13850 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13851 
   13852 		/* Disable hardware interception of ARP */
   13853 		manc &= ~MANC_ARP_EN;
   13854 
   13855 		/* Enable receiving management packets to the host */
   13856 		if (sc->sc_type >= WM_T_82571) {
   13857 			manc |= MANC_EN_MNG2HOST;
   13858 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   13859 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13860 		}
   13861 
   13862 		CSR_WRITE(sc, WMREG_MANC, manc);
   13863 	}
   13864 }
   13865 
   13866 static void
   13867 wm_release_manageability(struct wm_softc *sc)
   13868 {
   13869 
   13870 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13871 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13872 
   13873 		manc |= MANC_ARP_EN;
   13874 		if (sc->sc_type >= WM_T_82571)
   13875 			manc &= ~MANC_EN_MNG2HOST;
   13876 
   13877 		CSR_WRITE(sc, WMREG_MANC, manc);
   13878 	}
   13879 }
   13880 
   13881 static void
   13882 wm_get_wakeup(struct wm_softc *sc)
   13883 {
   13884 
   13885 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13886 	switch (sc->sc_type) {
   13887 	case WM_T_82573:
   13888 	case WM_T_82583:
   13889 		sc->sc_flags |= WM_F_HAS_AMT;
   13890 		/* FALLTHROUGH */
   13891 	case WM_T_80003:
   13892 	case WM_T_82575:
   13893 	case WM_T_82576:
   13894 	case WM_T_82580:
   13895 	case WM_T_I350:
   13896 	case WM_T_I354:
   13897 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13898 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13899 		/* FALLTHROUGH */
   13900 	case WM_T_82541:
   13901 	case WM_T_82541_2:
   13902 	case WM_T_82547:
   13903 	case WM_T_82547_2:
   13904 	case WM_T_82571:
   13905 	case WM_T_82572:
   13906 	case WM_T_82574:
   13907 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13908 		break;
   13909 	case WM_T_ICH8:
   13910 	case WM_T_ICH9:
   13911 	case WM_T_ICH10:
   13912 	case WM_T_PCH:
   13913 	case WM_T_PCH2:
   13914 	case WM_T_PCH_LPT:
   13915 	case WM_T_PCH_SPT:
   13916 	case WM_T_PCH_CNP:
   13917 		sc->sc_flags |= WM_F_HAS_AMT;
   13918 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13919 		break;
   13920 	default:
   13921 		break;
   13922 	}
   13923 
   13924 	/* 1: HAS_MANAGE */
   13925 	if (wm_enable_mng_pass_thru(sc) != 0)
   13926 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13927 
   13928 	/*
   13929 	 * Note that the WOL flags is set after the resetting of the eeprom
   13930 	 * stuff
   13931 	 */
   13932 }
   13933 
   13934 /*
   13935  * Unconfigure Ultra Low Power mode.
   13936  * Only for I217 and newer (see below).
   13937  */
   13938 static int
   13939 wm_ulp_disable(struct wm_softc *sc)
   13940 {
   13941 	uint32_t reg;
   13942 	uint16_t phyreg;
   13943 	int i = 0, rv = 0;
   13944 
   13945 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13946 		device_xname(sc->sc_dev), __func__));
   13947 	/* Exclude old devices */
   13948 	if ((sc->sc_type < WM_T_PCH_LPT)
   13949 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13950 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13951 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13952 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13953 		return 0;
   13954 
   13955 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13956 		/* Request ME un-configure ULP mode in the PHY */
   13957 		reg = CSR_READ(sc, WMREG_H2ME);
   13958 		reg &= ~H2ME_ULP;
   13959 		reg |= H2ME_ENFORCE_SETTINGS;
   13960 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13961 
   13962 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13963 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13964 			if (i++ == 30) {
   13965 				printf("%s timed out\n", __func__);
   13966 				return -1;
   13967 			}
   13968 			delay(10 * 1000);
   13969 		}
   13970 		reg = CSR_READ(sc, WMREG_H2ME);
   13971 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13972 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13973 
   13974 		return 0;
   13975 	}
   13976 
   13977 	/* Acquire semaphore */
   13978 	sc->phy.acquire(sc);
   13979 
   13980 	/* Toggle LANPHYPC */
   13981 	wm_toggle_lanphypc_pch_lpt(sc);
   13982 
   13983 	/* Unforce SMBus mode in PHY */
   13984 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   13985 	if (rv != 0) {
   13986 		uint32_t reg2;
   13987 
   13988 		printf("%s: Force SMBus first.\n", __func__);
   13989 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13990 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13991 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13992 		delay(50 * 1000);
   13993 
   13994 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   13995 		    &phyreg);
   13996 		if (rv != 0)
   13997 			goto release;
   13998 	}
   13999 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14000 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14001 
   14002 	/* Unforce SMBus mode in MAC */
   14003 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14004 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14005 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14006 
   14007 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14008 	if (rv != 0)
   14009 		goto release;
   14010 	phyreg |= HV_PM_CTRL_K1_ENA;
   14011 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14012 
   14013 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14014 		&phyreg);
   14015 	if (rv != 0)
   14016 		goto release;
   14017 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14018 	    | I218_ULP_CONFIG1_STICKY_ULP
   14019 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14020 	    | I218_ULP_CONFIG1_WOL_HOST
   14021 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14022 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14023 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14024 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14025 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14026 	phyreg |= I218_ULP_CONFIG1_START;
   14027 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14028 
   14029 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14030 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14031 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14032 
   14033 release:
   14034 	/* Release semaphore */
   14035 	sc->phy.release(sc);
   14036 	wm_gmii_reset(sc);
   14037 	delay(50 * 1000);
   14038 
   14039 	return rv;
   14040 }
   14041 
   14042 /* WOL in the newer chipset interfaces (pchlan) */
   14043 static void
   14044 wm_enable_phy_wakeup(struct wm_softc *sc)
   14045 {
   14046 #if 0
   14047 	uint16_t preg;
   14048 
   14049 	/* Copy MAC RARs to PHY RARs */
   14050 
   14051 	/* Copy MAC MTA to PHY MTA */
   14052 
   14053 	/* Configure PHY Rx Control register */
   14054 
   14055 	/* Enable PHY wakeup in MAC register */
   14056 
   14057 	/* Configure and enable PHY wakeup in PHY registers */
   14058 
   14059 	/* Activate PHY wakeup */
   14060 
   14061 	/* XXX */
   14062 #endif
   14063 }
   14064 
   14065 /* Power down workaround on D3 */
   14066 static void
   14067 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14068 {
   14069 	uint32_t reg;
   14070 	int i;
   14071 
   14072 	for (i = 0; i < 2; i++) {
   14073 		/* Disable link */
   14074 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14075 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14076 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14077 
   14078 		/*
   14079 		 * Call gig speed drop workaround on Gig disable before
   14080 		 * accessing any PHY registers
   14081 		 */
   14082 		if (sc->sc_type == WM_T_ICH8)
   14083 			wm_gig_downshift_workaround_ich8lan(sc);
   14084 
   14085 		/* Write VR power-down enable */
   14086 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   14087 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14088 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14089 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   14090 
   14091 		/* Read it back and test */
   14092 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   14093 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14094 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14095 			break;
   14096 
   14097 		/* Issue PHY reset and repeat at most one more time */
   14098 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14099 	}
   14100 }
   14101 
   14102 static void
   14103 wm_enable_wakeup(struct wm_softc *sc)
   14104 {
   14105 	uint32_t reg, pmreg;
   14106 	pcireg_t pmode;
   14107 
   14108 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14109 		device_xname(sc->sc_dev), __func__));
   14110 
   14111 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14112 		&pmreg, NULL) == 0)
   14113 		return;
   14114 
   14115 	/* Advertise the wakeup capability */
   14116 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   14117 	    | CTRL_SWDPIN(3));
   14118 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   14119 
   14120 	/* ICH workaround */
   14121 	switch (sc->sc_type) {
   14122 	case WM_T_ICH8:
   14123 	case WM_T_ICH9:
   14124 	case WM_T_ICH10:
   14125 	case WM_T_PCH:
   14126 	case WM_T_PCH2:
   14127 	case WM_T_PCH_LPT:
   14128 	case WM_T_PCH_SPT:
   14129 	case WM_T_PCH_CNP:
   14130 		/* Disable gig during WOL */
   14131 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14132 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   14133 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14134 		if (sc->sc_type == WM_T_PCH)
   14135 			wm_gmii_reset(sc);
   14136 
   14137 		/* Power down workaround */
   14138 		if (sc->sc_phytype == WMPHY_82577) {
   14139 			struct mii_softc *child;
   14140 
   14141 			/* Assume that the PHY is copper */
   14142 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14143 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   14144 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   14145 				    (768 << 5) | 25, 0x0444); /* magic num */
   14146 		}
   14147 		break;
   14148 	default:
   14149 		break;
   14150 	}
   14151 
   14152 	/* Keep the laser running on fiber adapters */
   14153 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   14154 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   14155 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14156 		reg |= CTRL_EXT_SWDPIN(3);
   14157 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14158 	}
   14159 
   14160 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   14161 #if 0	/* for the multicast packet */
   14162 	reg |= WUFC_MC;
   14163 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   14164 #endif
   14165 
   14166 	if (sc->sc_type >= WM_T_PCH)
   14167 		wm_enable_phy_wakeup(sc);
   14168 	else {
   14169 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   14170 		CSR_WRITE(sc, WMREG_WUFC, reg);
   14171 	}
   14172 
   14173 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14174 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14175 		|| (sc->sc_type == WM_T_PCH2))
   14176 	    && (sc->sc_phytype == WMPHY_IGP_3))
   14177 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   14178 
   14179 	/* Request PME */
   14180 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   14181 #if 0
   14182 	/* Disable WOL */
   14183 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   14184 #else
   14185 	/* For WOL */
   14186 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   14187 #endif
   14188 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   14189 }
   14190 
   14191 /* Disable ASPM L0s and/or L1 for workaround */
   14192 static void
   14193 wm_disable_aspm(struct wm_softc *sc)
   14194 {
   14195 	pcireg_t reg, mask = 0;
   14196 	unsigned const char *str = "";
   14197 
   14198 	/*
   14199 	 *  Only for PCIe device which has PCIe capability in the PCI config
   14200 	 * space.
   14201 	 */
   14202 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   14203 		return;
   14204 
   14205 	switch (sc->sc_type) {
   14206 	case WM_T_82571:
   14207 	case WM_T_82572:
   14208 		/*
   14209 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   14210 		 * State Power management L1 State (ASPM L1).
   14211 		 */
   14212 		mask = PCIE_LCSR_ASPM_L1;
   14213 		str = "L1 is";
   14214 		break;
   14215 	case WM_T_82573:
   14216 	case WM_T_82574:
   14217 	case WM_T_82583:
   14218 		/*
   14219 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   14220 		 *
   14221 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   14222 		 * some chipset.  The document of 82574 and 82583 says that
   14223 		 * disabling L0s with some specific chipset is sufficient,
   14224 		 * but we follow as of the Intel em driver does.
   14225 		 *
   14226 		 * References:
   14227 		 * Errata 8 of the Specification Update of i82573.
   14228 		 * Errata 20 of the Specification Update of i82574.
   14229 		 * Errata 9 of the Specification Update of i82583.
   14230 		 */
   14231 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   14232 		str = "L0s and L1 are";
   14233 		break;
   14234 	default:
   14235 		return;
   14236 	}
   14237 
   14238 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14239 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   14240 	reg &= ~mask;
   14241 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14242 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   14243 
   14244 	/* Print only in wm_attach() */
   14245 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   14246 		aprint_verbose_dev(sc->sc_dev,
   14247 		    "ASPM %s disabled to workaround the errata.\n", str);
   14248 }
   14249 
   14250 /* LPLU */
   14251 
   14252 static void
   14253 wm_lplu_d0_disable(struct wm_softc *sc)
   14254 {
   14255 	struct mii_data *mii = &sc->sc_mii;
   14256 	uint32_t reg;
   14257 
   14258 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14259 		device_xname(sc->sc_dev), __func__));
   14260 
   14261 	if (sc->sc_phytype == WMPHY_IFE)
   14262 		return;
   14263 
   14264 	switch (sc->sc_type) {
   14265 	case WM_T_82571:
   14266 	case WM_T_82572:
   14267 	case WM_T_82573:
   14268 	case WM_T_82575:
   14269 	case WM_T_82576:
   14270 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   14271 		reg &= ~PMR_D0_LPLU;
   14272 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   14273 		break;
   14274 	case WM_T_82580:
   14275 	case WM_T_I350:
   14276 	case WM_T_I210:
   14277 	case WM_T_I211:
   14278 		reg = CSR_READ(sc, WMREG_PHPM);
   14279 		reg &= ~PHPM_D0A_LPLU;
   14280 		CSR_WRITE(sc, WMREG_PHPM, reg);
   14281 		break;
   14282 	case WM_T_82574:
   14283 	case WM_T_82583:
   14284 	case WM_T_ICH8:
   14285 	case WM_T_ICH9:
   14286 	case WM_T_ICH10:
   14287 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14288 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   14289 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14290 		CSR_WRITE_FLUSH(sc);
   14291 		break;
   14292 	case WM_T_PCH:
   14293 	case WM_T_PCH2:
   14294 	case WM_T_PCH_LPT:
   14295 	case WM_T_PCH_SPT:
   14296 	case WM_T_PCH_CNP:
   14297 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   14298 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   14299 		if (wm_phy_resetisblocked(sc) == false)
   14300 			reg |= HV_OEM_BITS_ANEGNOW;
   14301 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   14302 		break;
   14303 	default:
   14304 		break;
   14305 	}
   14306 }
   14307 
   14308 /* EEE */
   14309 
   14310 static void
   14311 wm_set_eee_i350(struct wm_softc *sc)
   14312 {
   14313 	uint32_t ipcnfg, eeer;
   14314 
   14315 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   14316 	eeer = CSR_READ(sc, WMREG_EEER);
   14317 
   14318 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   14319 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14320 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14321 		    | EEER_LPI_FC);
   14322 	} else {
   14323 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14324 		ipcnfg &= ~IPCNFG_10BASE_TE;
   14325 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14326 		    | EEER_LPI_FC);
   14327 	}
   14328 
   14329 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   14330 	CSR_WRITE(sc, WMREG_EEER, eeer);
   14331 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   14332 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   14333 }
   14334 
   14335 /*
   14336  * Workarounds (mainly PHY related).
   14337  * Basically, PHY's workarounds are in the PHY drivers.
   14338  */
   14339 
   14340 /* Work-around for 82566 Kumeran PCS lock loss */
   14341 static void
   14342 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   14343 {
   14344 	struct mii_data *mii = &sc->sc_mii;
   14345 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14346 	int i;
   14347 	int reg;
   14348 
   14349 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14350 		device_xname(sc->sc_dev), __func__));
   14351 
   14352 	/* If the link is not up, do nothing */
   14353 	if ((status & STATUS_LU) == 0)
   14354 		return;
   14355 
   14356 	/* Nothing to do if the link is other than 1Gbps */
   14357 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   14358 		return;
   14359 
   14360 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14361 	for (i = 0; i < 10; i++) {
   14362 		/* read twice */
   14363 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14364 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14365 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   14366 			goto out;	/* GOOD! */
   14367 
   14368 		/* Reset the PHY */
   14369 		wm_reset_phy(sc);
   14370 		delay(5*1000);
   14371 	}
   14372 
   14373 	/* Disable GigE link negotiation */
   14374 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14375 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14376 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14377 
   14378 	/*
   14379 	 * Call gig speed drop workaround on Gig disable before accessing
   14380 	 * any PHY registers.
   14381 	 */
   14382 	wm_gig_downshift_workaround_ich8lan(sc);
   14383 
   14384 out:
   14385 	return;
   14386 }
   14387 
   14388 /* WOL from S5 stops working */
   14389 static void
   14390 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   14391 {
   14392 	uint16_t kmreg;
   14393 
   14394 	/* Only for igp3 */
   14395 	if (sc->sc_phytype == WMPHY_IGP_3) {
   14396 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   14397 			return;
   14398 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   14399 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   14400 			return;
   14401 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   14402 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   14403 	}
   14404 }
   14405 
   14406 /*
   14407  * Workaround for pch's PHYs
   14408  * XXX should be moved to new PHY driver?
   14409  */
   14410 static void
   14411 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   14412 {
   14413 
   14414 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14415 		device_xname(sc->sc_dev), __func__));
   14416 	KASSERT(sc->sc_type == WM_T_PCH);
   14417 
   14418 	if (sc->sc_phytype == WMPHY_82577)
   14419 		wm_set_mdio_slow_mode_hv(sc);
   14420 
   14421 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   14422 
   14423 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   14424 
   14425 	/* 82578 */
   14426 	if (sc->sc_phytype == WMPHY_82578) {
   14427 		struct mii_softc *child;
   14428 
   14429 		/*
   14430 		 * Return registers to default by doing a soft reset then
   14431 		 * writing 0x3140 to the control register
   14432 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   14433 		 */
   14434 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14435 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   14436 			PHY_RESET(child);
   14437 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   14438 			    0x3140);
   14439 		}
   14440 	}
   14441 
   14442 	/* Select page 0 */
   14443 	sc->phy.acquire(sc);
   14444 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   14445 	sc->phy.release(sc);
   14446 
   14447 	/*
   14448 	 * Configure the K1 Si workaround during phy reset assuming there is
   14449 	 * link so that it disables K1 if link is in 1Gbps.
   14450 	 */
   14451 	wm_k1_gig_workaround_hv(sc, 1);
   14452 }
   14453 
   14454 static void
   14455 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   14456 {
   14457 
   14458 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14459 		device_xname(sc->sc_dev), __func__));
   14460 	KASSERT(sc->sc_type == WM_T_PCH2);
   14461 
   14462 	wm_set_mdio_slow_mode_hv(sc);
   14463 }
   14464 
   14465 /**
   14466  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   14467  *  @link: link up bool flag
   14468  *
   14469  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   14470  *  preventing further DMA write requests.  Workaround the issue by disabling
   14471  *  the de-assertion of the clock request when in 1Gpbs mode.
   14472  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   14473  *  speeds in order to avoid Tx hangs.
   14474  **/
   14475 static int
   14476 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   14477 {
   14478 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   14479 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14480 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   14481 	uint16_t phyreg;
   14482 
   14483 	if (link && (speed == STATUS_SPEED_1000)) {
   14484 		sc->phy.acquire(sc);
   14485 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14486 		    &phyreg);
   14487 		if (rv != 0)
   14488 			goto release;
   14489 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14490 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   14491 		if (rv != 0)
   14492 			goto release;
   14493 		delay(20);
   14494 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   14495 
   14496 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14497 		    &phyreg);
   14498 release:
   14499 		sc->phy.release(sc);
   14500 		return rv;
   14501 	}
   14502 
   14503 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   14504 
   14505 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14506 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   14507 	    || !link
   14508 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   14509 		goto update_fextnvm6;
   14510 
   14511 	phyreg = wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL);
   14512 
   14513 	/* Clear link status transmit timeout */
   14514 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   14515 	if (speed == STATUS_SPEED_100) {
   14516 		/* Set inband Tx timeout to 5x10us for 100Half */
   14517 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   14518 
   14519 		/* Do not extend the K1 entry latency for 100Half */
   14520 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   14521 	} else {
   14522 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   14523 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   14524 
   14525 		/* Extend the K1 entry latency for 10 Mbps */
   14526 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   14527 	}
   14528 
   14529 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   14530 
   14531 update_fextnvm6:
   14532 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   14533 	return 0;
   14534 }
   14535 
   14536 static int
   14537 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   14538 {
   14539 	int k1_enable = sc->sc_nvm_k1_enabled;
   14540 
   14541 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14542 		device_xname(sc->sc_dev), __func__));
   14543 
   14544 	if (sc->phy.acquire(sc) != 0)
   14545 		return -1;
   14546 
   14547 	if (link) {
   14548 		k1_enable = 0;
   14549 
   14550 		/* Link stall fix for link up */
   14551 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14552 		    0x0100);
   14553 	} else {
   14554 		/* Link stall fix for link down */
   14555 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14556 		    0x4100);
   14557 	}
   14558 
   14559 	wm_configure_k1_ich8lan(sc, k1_enable);
   14560 	sc->phy.release(sc);
   14561 
   14562 	return 0;
   14563 }
   14564 
   14565 static void
   14566 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   14567 {
   14568 	uint32_t reg;
   14569 
   14570 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   14571 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   14572 	    reg | HV_KMRN_MDIO_SLOW);
   14573 }
   14574 
   14575 static void
   14576 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   14577 {
   14578 	uint32_t ctrl, ctrl_ext, tmp;
   14579 	uint16_t kmreg;
   14580 	int rv;
   14581 
   14582 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14583 
   14584 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   14585 	if (rv != 0)
   14586 		return;
   14587 
   14588 	if (k1_enable)
   14589 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   14590 	else
   14591 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   14592 
   14593 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   14594 	if (rv != 0)
   14595 		return;
   14596 
   14597 	delay(20);
   14598 
   14599 	ctrl = CSR_READ(sc, WMREG_CTRL);
   14600 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14601 
   14602 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   14603 	tmp |= CTRL_FRCSPD;
   14604 
   14605 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   14606 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   14607 	CSR_WRITE_FLUSH(sc);
   14608 	delay(20);
   14609 
   14610 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   14611 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14612 	CSR_WRITE_FLUSH(sc);
   14613 	delay(20);
   14614 
   14615 	return;
   14616 }
   14617 
   14618 /* special case - for 82575 - need to do manual init ... */
   14619 static void
   14620 wm_reset_init_script_82575(struct wm_softc *sc)
   14621 {
   14622 	/*
   14623 	 * remark: this is untested code - we have no board without EEPROM
   14624 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   14625 	 */
   14626 
   14627 	/* SerDes configuration via SERDESCTRL */
   14628 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   14629 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   14630 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   14631 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   14632 
   14633 	/* CCM configuration via CCMCTL register */
   14634 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   14635 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   14636 
   14637 	/* PCIe lanes configuration */
   14638 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   14639 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   14640 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   14641 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   14642 
   14643 	/* PCIe PLL Configuration */
   14644 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   14645 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   14646 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   14647 }
   14648 
   14649 static void
   14650 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   14651 {
   14652 	uint32_t reg;
   14653 	uint16_t nvmword;
   14654 	int rv;
   14655 
   14656 	if (sc->sc_type != WM_T_82580)
   14657 		return;
   14658 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   14659 		return;
   14660 
   14661 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   14662 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   14663 	if (rv != 0) {
   14664 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   14665 		    __func__);
   14666 		return;
   14667 	}
   14668 
   14669 	reg = CSR_READ(sc, WMREG_MDICNFG);
   14670 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   14671 		reg |= MDICNFG_DEST;
   14672 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   14673 		reg |= MDICNFG_COM_MDIO;
   14674 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14675 }
   14676 
   14677 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   14678 
   14679 static bool
   14680 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   14681 {
   14682 	uint32_t reg;
   14683 	uint16_t id1, id2;
   14684 	int i, rv;
   14685 
   14686 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14687 		device_xname(sc->sc_dev), __func__));
   14688 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14689 
   14690 	id1 = id2 = 0xffff;
   14691 	for (i = 0; i < 2; i++) {
   14692 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   14693 		    &id1);
   14694 		if ((rv != 0) || MII_INVALIDID(id1))
   14695 			continue;
   14696 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   14697 		    &id2);
   14698 		if ((rv != 0) || MII_INVALIDID(id2))
   14699 			continue;
   14700 		break;
   14701 	}
   14702 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   14703 		goto out;
   14704 
   14705 	/*
   14706 	 * In case the PHY needs to be in mdio slow mode,
   14707 	 * set slow mode and try to get the PHY id again.
   14708 	 */
   14709 	if (sc->sc_type < WM_T_PCH_LPT) {
   14710 		sc->phy.release(sc);
   14711 		wm_set_mdio_slow_mode_hv(sc);
   14712 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   14713 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   14714 		sc->phy.acquire(sc);
   14715 	}
   14716 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   14717 		printf("XXX return with false\n");
   14718 		return false;
   14719 	}
   14720 out:
   14721 	if (sc->sc_type >= WM_T_PCH_LPT) {
   14722 		/* Only unforce SMBus if ME is not active */
   14723 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14724 			uint16_t phyreg;
   14725 
   14726 			/* Unforce SMBus mode in PHY */
   14727 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   14728 			    CV_SMB_CTRL, &phyreg);
   14729 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14730 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   14731 			    CV_SMB_CTRL, phyreg);
   14732 
   14733 			/* Unforce SMBus mode in MAC */
   14734 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14735 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14736 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14737 		}
   14738 	}
   14739 	return true;
   14740 }
   14741 
   14742 static void
   14743 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   14744 {
   14745 	uint32_t reg;
   14746 	int i;
   14747 
   14748 	/* Set PHY Config Counter to 50msec */
   14749 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   14750 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   14751 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   14752 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   14753 
   14754 	/* Toggle LANPHYPC */
   14755 	reg = CSR_READ(sc, WMREG_CTRL);
   14756 	reg |= CTRL_LANPHYPC_OVERRIDE;
   14757 	reg &= ~CTRL_LANPHYPC_VALUE;
   14758 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14759 	CSR_WRITE_FLUSH(sc);
   14760 	delay(1000);
   14761 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   14762 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14763 	CSR_WRITE_FLUSH(sc);
   14764 
   14765 	if (sc->sc_type < WM_T_PCH_LPT)
   14766 		delay(50 * 1000);
   14767 	else {
   14768 		i = 20;
   14769 
   14770 		do {
   14771 			delay(5 * 1000);
   14772 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   14773 		    && i--);
   14774 
   14775 		delay(30 * 1000);
   14776 	}
   14777 }
   14778 
   14779 static int
   14780 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   14781 {
   14782 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   14783 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   14784 	uint32_t rxa;
   14785 	uint16_t scale = 0, lat_enc = 0;
   14786 	int32_t obff_hwm = 0;
   14787 	int64_t lat_ns, value;
   14788 
   14789 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14790 		device_xname(sc->sc_dev), __func__));
   14791 
   14792 	if (link) {
   14793 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   14794 		uint32_t status;
   14795 		uint16_t speed;
   14796 		pcireg_t preg;
   14797 
   14798 		status = CSR_READ(sc, WMREG_STATUS);
   14799 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   14800 		case STATUS_SPEED_10:
   14801 			speed = 10;
   14802 			break;
   14803 		case STATUS_SPEED_100:
   14804 			speed = 100;
   14805 			break;
   14806 		case STATUS_SPEED_1000:
   14807 			speed = 1000;
   14808 			break;
   14809 		default:
   14810 			device_printf(sc->sc_dev, "Unknown speed "
   14811 			    "(status = %08x)\n", status);
   14812 			return -1;
   14813 		}
   14814 
   14815 		/* Rx Packet Buffer Allocation size (KB) */
   14816 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   14817 
   14818 		/*
   14819 		 * Determine the maximum latency tolerated by the device.
   14820 		 *
   14821 		 * Per the PCIe spec, the tolerated latencies are encoded as
   14822 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   14823 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   14824 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14825 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14826 		 */
   14827 		lat_ns = ((int64_t)rxa * 1024 -
   14828 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14829 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14830 		if (lat_ns < 0)
   14831 			lat_ns = 0;
   14832 		else
   14833 			lat_ns /= speed;
   14834 		value = lat_ns;
   14835 
   14836 		while (value > LTRV_VALUE) {
   14837 			scale ++;
   14838 			value = howmany(value, __BIT(5));
   14839 		}
   14840 		if (scale > LTRV_SCALE_MAX) {
   14841 			printf("%s: Invalid LTR latency scale %d\n",
   14842 			    device_xname(sc->sc_dev), scale);
   14843 			return -1;
   14844 		}
   14845 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14846 
   14847 		/* Determine the maximum latency tolerated by the platform */
   14848 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14849 		    WM_PCI_LTR_CAP_LPT);
   14850 		max_snoop = preg & 0xffff;
   14851 		max_nosnoop = preg >> 16;
   14852 
   14853 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14854 
   14855 		if (lat_enc > max_ltr_enc) {
   14856 			lat_enc = max_ltr_enc;
   14857 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14858 			    * PCI_LTR_SCALETONS(
   14859 				    __SHIFTOUT(lat_enc,
   14860 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14861 		}
   14862 
   14863 		if (lat_ns) {
   14864 			lat_ns *= speed * 1000;
   14865 			lat_ns /= 8;
   14866 			lat_ns /= 1000000000;
   14867 			obff_hwm = (int32_t)(rxa - lat_ns);
   14868 		}
   14869 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14870 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14871 			    "(rxa = %d, lat_ns = %d)\n",
   14872 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14873 			return -1;
   14874 		}
   14875 	}
   14876 	/* Snoop and No-Snoop latencies the same */
   14877 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14878 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14879 
   14880 	/* Set OBFF high water mark */
   14881 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14882 	reg |= obff_hwm;
   14883 	CSR_WRITE(sc, WMREG_SVT, reg);
   14884 
   14885 	/* Enable OBFF */
   14886 	reg = CSR_READ(sc, WMREG_SVCR);
   14887 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14888 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14889 
   14890 	return 0;
   14891 }
   14892 
   14893 /*
   14894  * I210 Errata 25 and I211 Errata 10
   14895  * Slow System Clock.
   14896  */
   14897 static void
   14898 wm_pll_workaround_i210(struct wm_softc *sc)
   14899 {
   14900 	uint32_t mdicnfg, wuc;
   14901 	uint32_t reg;
   14902 	pcireg_t pcireg;
   14903 	uint32_t pmreg;
   14904 	uint16_t nvmword, tmp_nvmword;
   14905 	int phyval;
   14906 	bool wa_done = false;
   14907 	int i;
   14908 
   14909 	/* Save WUC and MDICNFG registers */
   14910 	wuc = CSR_READ(sc, WMREG_WUC);
   14911 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14912 
   14913 	reg = mdicnfg & ~MDICNFG_DEST;
   14914 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14915 
   14916 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14917 		nvmword = INVM_DEFAULT_AL;
   14918 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14919 
   14920 	/* Get Power Management cap offset */
   14921 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14922 		&pmreg, NULL) == 0)
   14923 		return;
   14924 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14925 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14926 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14927 
   14928 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14929 			break; /* OK */
   14930 		}
   14931 
   14932 		wa_done = true;
   14933 		/* Directly reset the internal PHY */
   14934 		reg = CSR_READ(sc, WMREG_CTRL);
   14935 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14936 
   14937 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14938 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14939 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14940 
   14941 		CSR_WRITE(sc, WMREG_WUC, 0);
   14942 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14943 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14944 
   14945 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14946 		    pmreg + PCI_PMCSR);
   14947 		pcireg |= PCI_PMCSR_STATE_D3;
   14948 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14949 		    pmreg + PCI_PMCSR, pcireg);
   14950 		delay(1000);
   14951 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14952 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14953 		    pmreg + PCI_PMCSR, pcireg);
   14954 
   14955 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14956 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14957 
   14958 		/* Restore WUC register */
   14959 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14960 	}
   14961 
   14962 	/* Restore MDICNFG setting */
   14963 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14964 	if (wa_done)
   14965 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14966 }
   14967 
   14968 static void
   14969 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14970 {
   14971 	uint32_t reg;
   14972 
   14973 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14974 		device_xname(sc->sc_dev), __func__));
   14975 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   14976 	    || (sc->sc_type == WM_T_PCH_CNP));
   14977 
   14978 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14979 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14980 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14981 
   14982 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14983 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14984 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14985 }
   14986