Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.599
      1 /*	$NetBSD: if_wm.c,v 1.599 2018/11/20 03:52:03 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.599 2018/11/20 03:52:03 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    161 #else
    162 #define	DPRINTF(x, y)	/* nothing */
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544. We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 64 DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    203  * m_defrag() is called to reduce it.
    204  */
    205 #define	WM_NTXSEGS		64
    206 #define	WM_IFQUEUELEN		256
    207 #define	WM_TXQUEUELEN_MAX	64
    208 #define	WM_TXQUEUELEN_MAX_82547	16
    209 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    210 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    211 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    212 #define	WM_NTXDESC_82542	256
    213 #define	WM_NTXDESC_82544	4096
    214 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    215 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    216 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    217 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    218 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    219 
    220 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    221 
    222 #define	WM_TXINTERQSIZE		256
    223 
    224 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    225 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    226 #endif
    227 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    228 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    229 #endif
    230 
    231 /*
    232  * Receive descriptor list size.  We have one Rx buffer for normal
    233  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    234  * packet.  We allocate 256 receive descriptors, each with a 2k
    235  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    236  */
    237 #define	WM_NRXDESC		256
    238 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    239 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    240 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    241 
    242 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 typedef union txdescs {
    250 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    251 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    252 } txdescs_t;
    253 
    254 typedef union rxdescs {
    255 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    256 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    257 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    258 } rxdescs_t;
    259 
    260 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    261 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    262 
    263 /*
    264  * Software state for transmit jobs.
    265  */
    266 struct wm_txsoft {
    267 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    268 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    269 	int txs_firstdesc;		/* first descriptor in packet */
    270 	int txs_lastdesc;		/* last descriptor in packet */
    271 	int txs_ndesc;			/* # of descriptors used */
    272 };
    273 
    274 /*
    275  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    276  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    277  * them together.
    278  */
    279 struct wm_rxsoft {
    280 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    281 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    282 };
    283 
    284 #define WM_LINKUP_TIMEOUT	50
    285 
    286 static uint16_t swfwphysem[] = {
    287 	SWFW_PHY0_SM,
    288 	SWFW_PHY1_SM,
    289 	SWFW_PHY2_SM,
    290 	SWFW_PHY3_SM
    291 };
    292 
    293 static const uint32_t wm_82580_rxpbs_table[] = {
    294 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    295 };
    296 
    297 struct wm_softc;
    298 
    299 #ifdef WM_EVENT_COUNTERS
    300 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    301 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    302 	struct evcnt qname##_ev_##evname;
    303 
    304 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    305 	do {								\
    306 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    307 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    308 		    "%s%02d%s", #qname, (qnum), #evname);		\
    309 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    310 		    (evtype), NULL, (xname),				\
    311 		    (q)->qname##_##evname##_evcnt_name);		\
    312 	} while (0)
    313 
    314 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    315 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    316 
    317 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    318 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    319 
    320 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    321 	evcnt_detach(&(q)->qname##_ev_##evname);
    322 #endif /* WM_EVENT_COUNTERS */
    323 
    324 struct wm_txqueue {
    325 	kmutex_t *txq_lock;		/* lock for tx operations */
    326 
    327 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    328 
    329 	/* Software state for the transmit descriptors. */
    330 	int txq_num;			/* must be a power of two */
    331 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    332 
    333 	/* TX control data structures. */
    334 	int txq_ndesc;			/* must be a power of two */
    335 	size_t txq_descsize;		/* a tx descriptor size */
    336 	txdescs_t *txq_descs_u;
    337 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    338 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    339 	int txq_desc_rseg;		/* real number of control segment */
    340 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    341 #define	txq_descs	txq_descs_u->sctxu_txdescs
    342 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    343 
    344 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    345 
    346 	int txq_free;			/* number of free Tx descriptors */
    347 	int txq_next;			/* next ready Tx descriptor */
    348 
    349 	int txq_sfree;			/* number of free Tx jobs */
    350 	int txq_snext;			/* next free Tx job */
    351 	int txq_sdirty;			/* dirty Tx jobs */
    352 
    353 	/* These 4 variables are used only on the 82547. */
    354 	int txq_fifo_size;		/* Tx FIFO size */
    355 	int txq_fifo_head;		/* current head of FIFO */
    356 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    357 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    358 
    359 	/*
    360 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    361 	 * CPUs. This queue intermediate them without block.
    362 	 */
    363 	pcq_t *txq_interq;
    364 
    365 	/*
    366 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    367 	 * to manage Tx H/W queue's busy flag.
    368 	 */
    369 	int txq_flags;			/* flags for H/W queue, see below */
    370 #define	WM_TXQ_NO_SPACE	0x1
    371 
    372 	bool txq_stopping;
    373 
    374 	bool txq_sending;
    375 	time_t txq_lastsent;
    376 
    377 	uint32_t txq_packets;		/* for AIM */
    378 	uint32_t txq_bytes;		/* for AIM */
    379 #ifdef WM_EVENT_COUNTERS
    380 	/* TX event counters */
    381 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    382 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    383 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    384 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    385 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    386 					    /* XXX not used? */
    387 
    388 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    389 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    392 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    393 	WM_Q_EVCNT_DEFINE(txq, tsopain)     /* Painful header manip. for TSO */
    394 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    395 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    396 					    /* other than toomanyseg */
    397 
    398 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    399 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    400 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    401 
    402 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    403 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    404 #endif /* WM_EVENT_COUNTERS */
    405 };
    406 
    407 struct wm_rxqueue {
    408 	kmutex_t *rxq_lock;		/* lock for rx operations */
    409 
    410 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    411 
    412 	/* Software state for the receive descriptors. */
    413 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    414 
    415 	/* RX control data structures. */
    416 	int rxq_ndesc;			/* must be a power of two */
    417 	size_t rxq_descsize;		/* a rx descriptor size */
    418 	rxdescs_t *rxq_descs_u;
    419 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    420 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    421 	int rxq_desc_rseg;		/* real number of control segment */
    422 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    423 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    424 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    425 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    426 
    427 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    428 
    429 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    430 	int rxq_discard;
    431 	int rxq_len;
    432 	struct mbuf *rxq_head;
    433 	struct mbuf *rxq_tail;
    434 	struct mbuf **rxq_tailp;
    435 
    436 	bool rxq_stopping;
    437 
    438 	uint32_t rxq_packets;		/* for AIM */
    439 	uint32_t rxq_bytes;		/* for AIM */
    440 #ifdef WM_EVENT_COUNTERS
    441 	/* RX event counters */
    442 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    443 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    444 
    445 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    446 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    447 #endif
    448 };
    449 
    450 struct wm_queue {
    451 	int wmq_id;			/* index of TX/RX queues */
    452 	int wmq_intr_idx;		/* index of MSI-X tables */
    453 
    454 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    455 	bool wmq_set_itr;
    456 
    457 	struct wm_txqueue wmq_txq;
    458 	struct wm_rxqueue wmq_rxq;
    459 
    460 	void *wmq_si;
    461 };
    462 
    463 struct wm_phyop {
    464 	int (*acquire)(struct wm_softc *);
    465 	void (*release)(struct wm_softc *);
    466 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    467 	int (*writereg_locked)(device_t, int, int, uint16_t);
    468 	int reset_delay_us;
    469 };
    470 
    471 struct wm_nvmop {
    472 	int (*acquire)(struct wm_softc *);
    473 	void (*release)(struct wm_softc *);
    474 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    475 };
    476 
    477 /*
    478  * Software state per device.
    479  */
    480 struct wm_softc {
    481 	device_t sc_dev;		/* generic device information */
    482 	bus_space_tag_t sc_st;		/* bus space tag */
    483 	bus_space_handle_t sc_sh;	/* bus space handle */
    484 	bus_size_t sc_ss;		/* bus space size */
    485 	bus_space_tag_t sc_iot;		/* I/O space tag */
    486 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    487 	bus_size_t sc_ios;		/* I/O space size */
    488 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    489 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    490 	bus_size_t sc_flashs;		/* flash registers space size */
    491 	off_t sc_flashreg_offset;	/*
    492 					 * offset to flash registers from
    493 					 * start of BAR
    494 					 */
    495 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    496 
    497 	struct ethercom sc_ethercom;	/* ethernet common data */
    498 	struct mii_data sc_mii;		/* MII/media information */
    499 
    500 	pci_chipset_tag_t sc_pc;
    501 	pcitag_t sc_pcitag;
    502 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    503 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    504 
    505 	uint16_t sc_pcidevid;		/* PCI device ID */
    506 	wm_chip_type sc_type;		/* MAC type */
    507 	int sc_rev;			/* MAC revision */
    508 	wm_phy_type sc_phytype;		/* PHY type */
    509 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    510 #define	WM_MEDIATYPE_UNKNOWN		0x00
    511 #define	WM_MEDIATYPE_FIBER		0x01
    512 #define	WM_MEDIATYPE_COPPER		0x02
    513 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    514 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    515 	int sc_flags;			/* flags; see below */
    516 	int sc_if_flags;		/* last if_flags */
    517 	int sc_flowflags;		/* 802.3x flow control flags */
    518 	int sc_align_tweak;
    519 
    520 	void *sc_ihs[WM_MAX_NINTR];	/*
    521 					 * interrupt cookie.
    522 					 * - legacy and msi use sc_ihs[0] only
    523 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    524 					 */
    525 	pci_intr_handle_t *sc_intrs;	/*
    526 					 * legacy and msi use sc_intrs[0] only
    527 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    528 					 */
    529 	int sc_nintrs;			/* number of interrupts */
    530 
    531 	int sc_link_intr_idx;		/* index of MSI-X tables */
    532 
    533 	callout_t sc_tick_ch;		/* tick callout */
    534 	bool sc_core_stopping;
    535 
    536 	int sc_nvm_ver_major;
    537 	int sc_nvm_ver_minor;
    538 	int sc_nvm_ver_build;
    539 	int sc_nvm_addrbits;		/* NVM address bits */
    540 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    541 	int sc_ich8_flash_base;
    542 	int sc_ich8_flash_bank_size;
    543 	int sc_nvm_k1_enabled;
    544 
    545 	int sc_nqueues;
    546 	struct wm_queue *sc_queue;
    547 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
    548 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
    549 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    550 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    551 
    552 	int sc_affinity_offset;
    553 
    554 #ifdef WM_EVENT_COUNTERS
    555 	/* Event counters. */
    556 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    557 
    558 	/* WM_T_82542_2_1 only */
    559 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    560 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    561 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    562 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    563 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    564 #endif /* WM_EVENT_COUNTERS */
    565 
    566 	/* This variable are used only on the 82547. */
    567 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    568 
    569 	uint32_t sc_ctrl;		/* prototype CTRL register */
    570 #if 0
    571 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    572 #endif
    573 	uint32_t sc_icr;		/* prototype interrupt bits */
    574 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    575 	uint32_t sc_tctl;		/* prototype TCTL register */
    576 	uint32_t sc_rctl;		/* prototype RCTL register */
    577 	uint32_t sc_txcw;		/* prototype TXCW register */
    578 	uint32_t sc_tipg;		/* prototype TIPG register */
    579 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    580 	uint32_t sc_pba;		/* prototype PBA register */
    581 
    582 	int sc_tbi_linkup;		/* TBI link status */
    583 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    584 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    585 
    586 	int sc_mchash_type;		/* multicast filter offset */
    587 
    588 	krndsource_t rnd_source;	/* random source */
    589 
    590 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    591 
    592 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    593 	kmutex_t *sc_ich_phymtx;	/*
    594 					 * 82574/82583/ICH/PCH specific PHY
    595 					 * mutex. For 82574/82583, the mutex
    596 					 * is used for both PHY and NVM.
    597 					 */
    598 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    599 
    600 	struct wm_phyop phy;
    601 	struct wm_nvmop nvm;
    602 };
    603 
    604 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    605 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    606 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    607 
    608 #define	WM_RXCHAIN_RESET(rxq)						\
    609 do {									\
    610 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    611 	*(rxq)->rxq_tailp = NULL;					\
    612 	(rxq)->rxq_len = 0;						\
    613 } while (/*CONSTCOND*/0)
    614 
    615 #define	WM_RXCHAIN_LINK(rxq, m)						\
    616 do {									\
    617 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    618 	(rxq)->rxq_tailp = &(m)->m_next;				\
    619 } while (/*CONSTCOND*/0)
    620 
    621 #ifdef WM_EVENT_COUNTERS
    622 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    623 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    624 
    625 #define WM_Q_EVCNT_INCR(qname, evname)			\
    626 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    627 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    628 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    629 #else /* !WM_EVENT_COUNTERS */
    630 #define	WM_EVCNT_INCR(ev)	/* nothing */
    631 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    632 
    633 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    634 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    635 #endif /* !WM_EVENT_COUNTERS */
    636 
    637 #define	CSR_READ(sc, reg)						\
    638 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    639 #define	CSR_WRITE(sc, reg, val)						\
    640 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    641 #define	CSR_WRITE_FLUSH(sc)						\
    642 	(void) CSR_READ((sc), WMREG_STATUS)
    643 
    644 #define ICH8_FLASH_READ32(sc, reg)					\
    645 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    646 	    (reg) + sc->sc_flashreg_offset)
    647 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    648 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    649 	    (reg) + sc->sc_flashreg_offset, (data))
    650 
    651 #define ICH8_FLASH_READ16(sc, reg)					\
    652 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    653 	    (reg) + sc->sc_flashreg_offset)
    654 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    655 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    656 	    (reg) + sc->sc_flashreg_offset, (data))
    657 
    658 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    659 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    660 
    661 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    662 #define	WM_CDTXADDR_HI(txq, x)						\
    663 	(sizeof(bus_addr_t) == 8 ?					\
    664 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    665 
    666 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    667 #define	WM_CDRXADDR_HI(rxq, x)						\
    668 	(sizeof(bus_addr_t) == 8 ?					\
    669 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    670 
    671 /*
    672  * Register read/write functions.
    673  * Other than CSR_{READ|WRITE}().
    674  */
    675 #if 0
    676 static inline uint32_t wm_io_read(struct wm_softc *, int);
    677 #endif
    678 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    679 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    680     uint32_t, uint32_t);
    681 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    682 
    683 /*
    684  * Descriptor sync/init functions.
    685  */
    686 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    687 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    688 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    689 
    690 /*
    691  * Device driver interface functions and commonly used functions.
    692  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    693  */
    694 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    695 static int	wm_match(device_t, cfdata_t, void *);
    696 static void	wm_attach(device_t, device_t, void *);
    697 static int	wm_detach(device_t, int);
    698 static bool	wm_suspend(device_t, const pmf_qual_t *);
    699 static bool	wm_resume(device_t, const pmf_qual_t *);
    700 static void	wm_watchdog(struct ifnet *);
    701 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    702     uint16_t *);
    703 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    704     uint16_t *);
    705 static void	wm_tick(void *);
    706 static int	wm_ifflags_cb(struct ethercom *);
    707 static int	wm_ioctl(struct ifnet *, u_long, void *);
    708 /* MAC address related */
    709 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    710 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    711 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    712 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    713 static void	wm_set_filter(struct wm_softc *);
    714 /* Reset and init related */
    715 static void	wm_set_vlan(struct wm_softc *);
    716 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    717 static void	wm_get_auto_rd_done(struct wm_softc *);
    718 static void	wm_lan_init_done(struct wm_softc *);
    719 static void	wm_get_cfg_done(struct wm_softc *);
    720 static void	wm_phy_post_reset(struct wm_softc *);
    721 static int	wm_write_smbus_addr(struct wm_softc *);
    722 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    723 static void	wm_initialize_hardware_bits(struct wm_softc *);
    724 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    725 static void	wm_reset_phy(struct wm_softc *);
    726 static void	wm_flush_desc_rings(struct wm_softc *);
    727 static void	wm_reset(struct wm_softc *);
    728 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    729 static void	wm_rxdrain(struct wm_rxqueue *);
    730 static void	wm_init_rss(struct wm_softc *);
    731 static void	wm_adjust_qnum(struct wm_softc *, int);
    732 static inline bool	wm_is_using_msix(struct wm_softc *);
    733 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    734 static int	wm_softint_establish(struct wm_softc *, int, int);
    735 static int	wm_setup_legacy(struct wm_softc *);
    736 static int	wm_setup_msix(struct wm_softc *);
    737 static int	wm_init(struct ifnet *);
    738 static int	wm_init_locked(struct ifnet *);
    739 static void	wm_unset_stopping_flags(struct wm_softc *);
    740 static void	wm_set_stopping_flags(struct wm_softc *);
    741 static void	wm_stop(struct ifnet *, int);
    742 static void	wm_stop_locked(struct ifnet *, int);
    743 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    744 static void	wm_82547_txfifo_stall(void *);
    745 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    746 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    747 /* DMA related */
    748 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    749 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    750 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    751 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    752     struct wm_txqueue *);
    753 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    754 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    755 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    756     struct wm_rxqueue *);
    757 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    758 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    759 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    760 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    761 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    762 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    763 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    764     struct wm_txqueue *);
    765 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    766     struct wm_rxqueue *);
    767 static int	wm_alloc_txrx_queues(struct wm_softc *);
    768 static void	wm_free_txrx_queues(struct wm_softc *);
    769 static int	wm_init_txrx_queues(struct wm_softc *);
    770 /* Start */
    771 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    772     struct wm_txsoft *, uint32_t *, uint8_t *);
    773 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    774 static void	wm_start(struct ifnet *);
    775 static void	wm_start_locked(struct ifnet *);
    776 static int	wm_transmit(struct ifnet *, struct mbuf *);
    777 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    778 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    779     bool);
    780 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    781     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    782 static void	wm_nq_start(struct ifnet *);
    783 static void	wm_nq_start_locked(struct ifnet *);
    784 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    785 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    786 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    787     bool);
    788 static void	wm_deferred_start_locked(struct wm_txqueue *);
    789 static void	wm_handle_queue(void *);
    790 /* Interrupt */
    791 static bool	wm_txeof(struct wm_txqueue *, u_int);
    792 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    793 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    794 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    795 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    796 static void	wm_linkintr(struct wm_softc *, uint32_t);
    797 static int	wm_intr_legacy(void *);
    798 static inline void	wm_txrxintr_disable(struct wm_queue *);
    799 static inline void	wm_txrxintr_enable(struct wm_queue *);
    800 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    801 static int	wm_txrxintr_msix(void *);
    802 static int	wm_linkintr_msix(void *);
    803 
    804 /*
    805  * Media related.
    806  * GMII, SGMII, TBI, SERDES and SFP.
    807  */
    808 /* Common */
    809 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    810 /* GMII related */
    811 static void	wm_gmii_reset(struct wm_softc *);
    812 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    813 static int	wm_get_phy_id_82575(struct wm_softc *);
    814 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    815 static int	wm_gmii_mediachange(struct ifnet *);
    816 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    817 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    818 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    819 static int	wm_gmii_i82543_readreg(device_t, int, int);
    820 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    821 static int	wm_gmii_mdic_readreg(device_t, int, int);
    822 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    823 static int	wm_gmii_i82544_readreg(device_t, int, int);
    824 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    825 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    826 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    827 static int	wm_gmii_i80003_readreg(device_t, int, int);
    828 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    829 static int	wm_gmii_bm_readreg(device_t, int, int);
    830 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    831 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    832 static int	wm_gmii_hv_readreg(device_t, int, int);
    833 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    834 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    835 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    836 static int	wm_gmii_82580_readreg(device_t, int, int);
    837 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    838 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    839 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    840 static void	wm_gmii_statchg(struct ifnet *);
    841 /*
    842  * kumeran related (80003, ICH* and PCH*).
    843  * These functions are not for accessing MII registers but for accessing
    844  * kumeran specific registers.
    845  */
    846 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    847 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    848 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    849 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    850 /* SGMII */
    851 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    852 static int	wm_sgmii_readreg(device_t, int, int);
    853 static void	wm_sgmii_writereg(device_t, int, int, int);
    854 /* TBI related */
    855 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    856 static void	wm_tbi_mediainit(struct wm_softc *);
    857 static int	wm_tbi_mediachange(struct ifnet *);
    858 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    859 static int	wm_check_for_link(struct wm_softc *);
    860 static void	wm_tbi_tick(struct wm_softc *);
    861 /* SERDES related */
    862 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    863 static int	wm_serdes_mediachange(struct ifnet *);
    864 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    865 static void	wm_serdes_tick(struct wm_softc *);
    866 /* SFP related */
    867 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    868 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    869 
    870 /*
    871  * NVM related.
    872  * Microwire, SPI (w/wo EERD) and Flash.
    873  */
    874 /* Misc functions */
    875 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    876 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    877 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    878 /* Microwire */
    879 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    880 /* SPI */
    881 static int	wm_nvm_ready_spi(struct wm_softc *);
    882 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    883 /* Using with EERD */
    884 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    885 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    886 /* Flash */
    887 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    888     unsigned int *);
    889 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    890 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    891 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    892     uint32_t *);
    893 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    894 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    895 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    896 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    897 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    898 /* iNVM */
    899 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    900 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    901 /* Lock, detecting NVM type, validate checksum and read */
    902 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    903 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    904 static int	wm_nvm_validate_checksum(struct wm_softc *);
    905 static void	wm_nvm_version_invm(struct wm_softc *);
    906 static void	wm_nvm_version(struct wm_softc *);
    907 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    908 
    909 /*
    910  * Hardware semaphores.
    911  * Very complexed...
    912  */
    913 static int	wm_get_null(struct wm_softc *);
    914 static void	wm_put_null(struct wm_softc *);
    915 static int	wm_get_eecd(struct wm_softc *);
    916 static void	wm_put_eecd(struct wm_softc *);
    917 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    918 static void	wm_put_swsm_semaphore(struct wm_softc *);
    919 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    920 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    921 static int	wm_get_nvm_80003(struct wm_softc *);
    922 static void	wm_put_nvm_80003(struct wm_softc *);
    923 static int	wm_get_nvm_82571(struct wm_softc *);
    924 static void	wm_put_nvm_82571(struct wm_softc *);
    925 static int	wm_get_phy_82575(struct wm_softc *);
    926 static void	wm_put_phy_82575(struct wm_softc *);
    927 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    928 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    929 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    930 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    931 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    932 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    933 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    934 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    935 
    936 /*
    937  * Management mode and power management related subroutines.
    938  * BMC, AMT, suspend/resume and EEE.
    939  */
    940 #if 0
    941 static int	wm_check_mng_mode(struct wm_softc *);
    942 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    943 static int	wm_check_mng_mode_82574(struct wm_softc *);
    944 static int	wm_check_mng_mode_generic(struct wm_softc *);
    945 #endif
    946 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    947 static bool	wm_phy_resetisblocked(struct wm_softc *);
    948 static void	wm_get_hw_control(struct wm_softc *);
    949 static void	wm_release_hw_control(struct wm_softc *);
    950 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    951 static void	wm_smbustopci(struct wm_softc *);
    952 static void	wm_init_manageability(struct wm_softc *);
    953 static void	wm_release_manageability(struct wm_softc *);
    954 static void	wm_get_wakeup(struct wm_softc *);
    955 static int	wm_ulp_disable(struct wm_softc *);
    956 static void	wm_enable_phy_wakeup(struct wm_softc *);
    957 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    958 static void	wm_enable_wakeup(struct wm_softc *);
    959 static void	wm_disable_aspm(struct wm_softc *);
    960 /* LPLU (Low Power Link Up) */
    961 static void	wm_lplu_d0_disable(struct wm_softc *);
    962 /* EEE */
    963 static void	wm_set_eee_i350(struct wm_softc *);
    964 
    965 /*
    966  * Workarounds (mainly PHY related).
    967  * Basically, PHY's workarounds are in the PHY drivers.
    968  */
    969 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    970 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    971 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    972 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    973 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    974 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    975 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    976 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    977 static void	wm_reset_init_script_82575(struct wm_softc *);
    978 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    979 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    980 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    981 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    982 static void	wm_pll_workaround_i210(struct wm_softc *);
    983 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    984 
    985 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    986     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    987 
    988 /*
    989  * Devices supported by this driver.
    990  */
    991 static const struct wm_product {
    992 	pci_vendor_id_t		wmp_vendor;
    993 	pci_product_id_t	wmp_product;
    994 	const char		*wmp_name;
    995 	wm_chip_type		wmp_type;
    996 	uint32_t		wmp_flags;
    997 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    998 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    999 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1000 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1001 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1002 } wm_products[] = {
   1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1004 	  "Intel i82542 1000BASE-X Ethernet",
   1005 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1006 
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1008 	  "Intel i82543GC 1000BASE-X Ethernet",
   1009 	  WM_T_82543,		WMP_F_FIBER },
   1010 
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1012 	  "Intel i82543GC 1000BASE-T Ethernet",
   1013 	  WM_T_82543,		WMP_F_COPPER },
   1014 
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1016 	  "Intel i82544EI 1000BASE-T Ethernet",
   1017 	  WM_T_82544,		WMP_F_COPPER },
   1018 
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1020 	  "Intel i82544EI 1000BASE-X Ethernet",
   1021 	  WM_T_82544,		WMP_F_FIBER },
   1022 
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1024 	  "Intel i82544GC 1000BASE-T Ethernet",
   1025 	  WM_T_82544,		WMP_F_COPPER },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1028 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1029 	  WM_T_82544,		WMP_F_COPPER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1032 	  "Intel i82540EM 1000BASE-T Ethernet",
   1033 	  WM_T_82540,		WMP_F_COPPER },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1036 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1037 	  WM_T_82540,		WMP_F_COPPER },
   1038 
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1040 	  "Intel i82540EP 1000BASE-T Ethernet",
   1041 	  WM_T_82540,		WMP_F_COPPER },
   1042 
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1044 	  "Intel i82540EP 1000BASE-T Ethernet",
   1045 	  WM_T_82540,		WMP_F_COPPER },
   1046 
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1048 	  "Intel i82540EP 1000BASE-T Ethernet",
   1049 	  WM_T_82540,		WMP_F_COPPER },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1052 	  "Intel i82545EM 1000BASE-T Ethernet",
   1053 	  WM_T_82545,		WMP_F_COPPER },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1056 	  "Intel i82545GM 1000BASE-T Ethernet",
   1057 	  WM_T_82545_3,		WMP_F_COPPER },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1060 	  "Intel i82545GM 1000BASE-X Ethernet",
   1061 	  WM_T_82545_3,		WMP_F_FIBER },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1064 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1065 	  WM_T_82545_3,		WMP_F_SERDES },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1068 	  "Intel i82546EB 1000BASE-T Ethernet",
   1069 	  WM_T_82546,		WMP_F_COPPER },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1072 	  "Intel i82546EB 1000BASE-T Ethernet",
   1073 	  WM_T_82546,		WMP_F_COPPER },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1076 	  "Intel i82545EM 1000BASE-X Ethernet",
   1077 	  WM_T_82545,		WMP_F_FIBER },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1080 	  "Intel i82546EB 1000BASE-X Ethernet",
   1081 	  WM_T_82546,		WMP_F_FIBER },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1084 	  "Intel i82546GB 1000BASE-T Ethernet",
   1085 	  WM_T_82546_3,		WMP_F_COPPER },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1088 	  "Intel i82546GB 1000BASE-X Ethernet",
   1089 	  WM_T_82546_3,		WMP_F_FIBER },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1092 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1093 	  WM_T_82546_3,		WMP_F_SERDES },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1096 	  "i82546GB quad-port Gigabit Ethernet",
   1097 	  WM_T_82546_3,		WMP_F_COPPER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1100 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1101 	  WM_T_82546_3,		WMP_F_COPPER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1104 	  "Intel PRO/1000MT (82546GB)",
   1105 	  WM_T_82546_3,		WMP_F_COPPER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1108 	  "Intel i82541EI 1000BASE-T Ethernet",
   1109 	  WM_T_82541,		WMP_F_COPPER },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1112 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1113 	  WM_T_82541,		WMP_F_COPPER },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1116 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1117 	  WM_T_82541,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1120 	  "Intel i82541ER 1000BASE-T Ethernet",
   1121 	  WM_T_82541_2,		WMP_F_COPPER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1124 	  "Intel i82541GI 1000BASE-T Ethernet",
   1125 	  WM_T_82541_2,		WMP_F_COPPER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1128 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1129 	  WM_T_82541_2,		WMP_F_COPPER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1132 	  "Intel i82541PI 1000BASE-T Ethernet",
   1133 	  WM_T_82541_2,		WMP_F_COPPER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1136 	  "Intel i82547EI 1000BASE-T Ethernet",
   1137 	  WM_T_82547,		WMP_F_COPPER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1140 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1141 	  WM_T_82547,		WMP_F_COPPER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1144 	  "Intel i82547GI 1000BASE-T Ethernet",
   1145 	  WM_T_82547_2,		WMP_F_COPPER },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1148 	  "Intel PRO/1000 PT (82571EB)",
   1149 	  WM_T_82571,		WMP_F_COPPER },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1152 	  "Intel PRO/1000 PF (82571EB)",
   1153 	  WM_T_82571,		WMP_F_FIBER },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1156 	  "Intel PRO/1000 PB (82571EB)",
   1157 	  WM_T_82571,		WMP_F_SERDES },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1160 	  "Intel PRO/1000 QT (82571EB)",
   1161 	  WM_T_82571,		WMP_F_COPPER },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1164 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1165 	  WM_T_82571,		WMP_F_COPPER, },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1168 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1169 	  WM_T_82571,		WMP_F_COPPER, },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1172 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1173 	  WM_T_82571,		WMP_F_SERDES, },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1176 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1177 	  WM_T_82571,		WMP_F_SERDES, },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1180 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1181 	  WM_T_82571,		WMP_F_FIBER, },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1184 	  "Intel i82572EI 1000baseT Ethernet",
   1185 	  WM_T_82572,		WMP_F_COPPER },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1188 	  "Intel i82572EI 1000baseX Ethernet",
   1189 	  WM_T_82572,		WMP_F_FIBER },
   1190 
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1192 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1193 	  WM_T_82572,		WMP_F_SERDES },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1196 	  "Intel i82572EI 1000baseT Ethernet",
   1197 	  WM_T_82572,		WMP_F_COPPER },
   1198 
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1200 	  "Intel i82573E",
   1201 	  WM_T_82573,		WMP_F_COPPER },
   1202 
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1204 	  "Intel i82573E IAMT",
   1205 	  WM_T_82573,		WMP_F_COPPER },
   1206 
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1208 	  "Intel i82573L Gigabit Ethernet",
   1209 	  WM_T_82573,		WMP_F_COPPER },
   1210 
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1212 	  "Intel i82574L",
   1213 	  WM_T_82574,		WMP_F_COPPER },
   1214 
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1216 	  "Intel i82574L",
   1217 	  WM_T_82574,		WMP_F_COPPER },
   1218 
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1220 	  "Intel i82583V",
   1221 	  WM_T_82583,		WMP_F_COPPER },
   1222 
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1224 	  "i80003 dual 1000baseT Ethernet",
   1225 	  WM_T_80003,		WMP_F_COPPER },
   1226 
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1228 	  "i80003 dual 1000baseX Ethernet",
   1229 	  WM_T_80003,		WMP_F_COPPER },
   1230 
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1232 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1233 	  WM_T_80003,		WMP_F_SERDES },
   1234 
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1236 	  "Intel i80003 1000baseT Ethernet",
   1237 	  WM_T_80003,		WMP_F_COPPER },
   1238 
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1240 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1241 	  WM_T_80003,		WMP_F_SERDES },
   1242 
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1244 	  "Intel i82801H (M_AMT) LAN Controller",
   1245 	  WM_T_ICH8,		WMP_F_COPPER },
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1247 	  "Intel i82801H (AMT) LAN Controller",
   1248 	  WM_T_ICH8,		WMP_F_COPPER },
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1250 	  "Intel i82801H LAN Controller",
   1251 	  WM_T_ICH8,		WMP_F_COPPER },
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1253 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1254 	  WM_T_ICH8,		WMP_F_COPPER },
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1256 	  "Intel i82801H (M) LAN Controller",
   1257 	  WM_T_ICH8,		WMP_F_COPPER },
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1259 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1260 	  WM_T_ICH8,		WMP_F_COPPER },
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1262 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1263 	  WM_T_ICH8,		WMP_F_COPPER },
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1265 	  "82567V-3 LAN Controller",
   1266 	  WM_T_ICH8,		WMP_F_COPPER },
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1268 	  "82801I (AMT) LAN Controller",
   1269 	  WM_T_ICH9,		WMP_F_COPPER },
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1271 	  "82801I 10/100 LAN Controller",
   1272 	  WM_T_ICH9,		WMP_F_COPPER },
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1274 	  "82801I (G) 10/100 LAN Controller",
   1275 	  WM_T_ICH9,		WMP_F_COPPER },
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1277 	  "82801I (GT) 10/100 LAN Controller",
   1278 	  WM_T_ICH9,		WMP_F_COPPER },
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1280 	  "82801I (C) LAN Controller",
   1281 	  WM_T_ICH9,		WMP_F_COPPER },
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1283 	  "82801I mobile LAN Controller",
   1284 	  WM_T_ICH9,		WMP_F_COPPER },
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1286 	  "82801I mobile (V) LAN Controller",
   1287 	  WM_T_ICH9,		WMP_F_COPPER },
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1289 	  "82801I mobile (AMT) LAN Controller",
   1290 	  WM_T_ICH9,		WMP_F_COPPER },
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1292 	  "82567LM-4 LAN Controller",
   1293 	  WM_T_ICH9,		WMP_F_COPPER },
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1295 	  "82567LM-2 LAN Controller",
   1296 	  WM_T_ICH10,		WMP_F_COPPER },
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1298 	  "82567LF-2 LAN Controller",
   1299 	  WM_T_ICH10,		WMP_F_COPPER },
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1301 	  "82567LM-3 LAN Controller",
   1302 	  WM_T_ICH10,		WMP_F_COPPER },
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1304 	  "82567LF-3 LAN Controller",
   1305 	  WM_T_ICH10,		WMP_F_COPPER },
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1307 	  "82567V-2 LAN Controller",
   1308 	  WM_T_ICH10,		WMP_F_COPPER },
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1310 	  "82567V-3? LAN Controller",
   1311 	  WM_T_ICH10,		WMP_F_COPPER },
   1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1313 	  "HANKSVILLE LAN Controller",
   1314 	  WM_T_ICH10,		WMP_F_COPPER },
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1316 	  "PCH LAN (82577LM) Controller",
   1317 	  WM_T_PCH,		WMP_F_COPPER },
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1319 	  "PCH LAN (82577LC) Controller",
   1320 	  WM_T_PCH,		WMP_F_COPPER },
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1322 	  "PCH LAN (82578DM) Controller",
   1323 	  WM_T_PCH,		WMP_F_COPPER },
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1325 	  "PCH LAN (82578DC) Controller",
   1326 	  WM_T_PCH,		WMP_F_COPPER },
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1328 	  "PCH2 LAN (82579LM) Controller",
   1329 	  WM_T_PCH2,		WMP_F_COPPER },
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1331 	  "PCH2 LAN (82579V) Controller",
   1332 	  WM_T_PCH2,		WMP_F_COPPER },
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1334 	  "82575EB dual-1000baseT Ethernet",
   1335 	  WM_T_82575,		WMP_F_COPPER },
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1337 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1338 	  WM_T_82575,		WMP_F_SERDES },
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1340 	  "82575GB quad-1000baseT Ethernet",
   1341 	  WM_T_82575,		WMP_F_COPPER },
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1343 	  "82575GB quad-1000baseT Ethernet (PM)",
   1344 	  WM_T_82575,		WMP_F_COPPER },
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1346 	  "82576 1000BaseT Ethernet",
   1347 	  WM_T_82576,		WMP_F_COPPER },
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1349 	  "82576 1000BaseX Ethernet",
   1350 	  WM_T_82576,		WMP_F_FIBER },
   1351 
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1353 	  "82576 gigabit Ethernet (SERDES)",
   1354 	  WM_T_82576,		WMP_F_SERDES },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1357 	  "82576 quad-1000BaseT Ethernet",
   1358 	  WM_T_82576,		WMP_F_COPPER },
   1359 
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1361 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1362 	  WM_T_82576,		WMP_F_COPPER },
   1363 
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1365 	  "82576 gigabit Ethernet",
   1366 	  WM_T_82576,		WMP_F_COPPER },
   1367 
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1369 	  "82576 gigabit Ethernet (SERDES)",
   1370 	  WM_T_82576,		WMP_F_SERDES },
   1371 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1372 	  "82576 quad-gigabit Ethernet (SERDES)",
   1373 	  WM_T_82576,		WMP_F_SERDES },
   1374 
   1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1376 	  "82580 1000BaseT Ethernet",
   1377 	  WM_T_82580,		WMP_F_COPPER },
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1379 	  "82580 1000BaseX Ethernet",
   1380 	  WM_T_82580,		WMP_F_FIBER },
   1381 
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1383 	  "82580 1000BaseT Ethernet (SERDES)",
   1384 	  WM_T_82580,		WMP_F_SERDES },
   1385 
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1387 	  "82580 gigabit Ethernet (SGMII)",
   1388 	  WM_T_82580,		WMP_F_COPPER },
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1390 	  "82580 dual-1000BaseT Ethernet",
   1391 	  WM_T_82580,		WMP_F_COPPER },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1394 	  "82580 quad-1000BaseX Ethernet",
   1395 	  WM_T_82580,		WMP_F_FIBER },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1398 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1399 	  WM_T_82580,		WMP_F_COPPER },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1402 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1403 	  WM_T_82580,		WMP_F_SERDES },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1406 	  "DH89XXCC 1000BASE-KX Ethernet",
   1407 	  WM_T_82580,		WMP_F_SERDES },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1410 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1411 	  WM_T_82580,		WMP_F_SERDES },
   1412 
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1414 	  "I350 Gigabit Network Connection",
   1415 	  WM_T_I350,		WMP_F_COPPER },
   1416 
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1418 	  "I350 Gigabit Fiber Network Connection",
   1419 	  WM_T_I350,		WMP_F_FIBER },
   1420 
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1422 	  "I350 Gigabit Backplane Connection",
   1423 	  WM_T_I350,		WMP_F_SERDES },
   1424 
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1426 	  "I350 Quad Port Gigabit Ethernet",
   1427 	  WM_T_I350,		WMP_F_SERDES },
   1428 
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1430 	  "I350 Gigabit Connection",
   1431 	  WM_T_I350,		WMP_F_COPPER },
   1432 
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1434 	  "I354 Gigabit Ethernet (KX)",
   1435 	  WM_T_I354,		WMP_F_SERDES },
   1436 
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1438 	  "I354 Gigabit Ethernet (SGMII)",
   1439 	  WM_T_I354,		WMP_F_COPPER },
   1440 
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1442 	  "I354 Gigabit Ethernet (2.5G)",
   1443 	  WM_T_I354,		WMP_F_COPPER },
   1444 
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1446 	  "I210-T1 Ethernet Server Adapter",
   1447 	  WM_T_I210,		WMP_F_COPPER },
   1448 
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1450 	  "I210 Ethernet (Copper OEM)",
   1451 	  WM_T_I210,		WMP_F_COPPER },
   1452 
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1454 	  "I210 Ethernet (Copper IT)",
   1455 	  WM_T_I210,		WMP_F_COPPER },
   1456 
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1458 	  "I210 Ethernet (FLASH less)",
   1459 	  WM_T_I210,		WMP_F_COPPER },
   1460 
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1462 	  "I210 Gigabit Ethernet (Fiber)",
   1463 	  WM_T_I210,		WMP_F_FIBER },
   1464 
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1466 	  "I210 Gigabit Ethernet (SERDES)",
   1467 	  WM_T_I210,		WMP_F_SERDES },
   1468 
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1470 	  "I210 Gigabit Ethernet (FLASH less)",
   1471 	  WM_T_I210,		WMP_F_SERDES },
   1472 
   1473 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1474 	  "I210 Gigabit Ethernet (SGMII)",
   1475 	  WM_T_I210,		WMP_F_COPPER },
   1476 
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1478 	  "I211 Ethernet (COPPER)",
   1479 	  WM_T_I211,		WMP_F_COPPER },
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1481 	  "I217 V Ethernet Connection",
   1482 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1484 	  "I217 LM Ethernet Connection",
   1485 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1487 	  "I218 V Ethernet Connection",
   1488 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1490 	  "I218 V Ethernet Connection",
   1491 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1493 	  "I218 V Ethernet Connection",
   1494 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1496 	  "I218 LM Ethernet Connection",
   1497 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1499 	  "I218 LM Ethernet Connection",
   1500 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1502 	  "I218 LM Ethernet Connection",
   1503 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1505 	  "I219 V Ethernet Connection",
   1506 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1508 	  "I219 V Ethernet Connection",
   1509 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1511 	  "I219 V Ethernet Connection",
   1512 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1514 	  "I219 V Ethernet Connection",
   1515 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1517 	  "I219 LM Ethernet Connection",
   1518 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1520 	  "I219 LM Ethernet Connection",
   1521 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1523 	  "I219 LM Ethernet Connection",
   1524 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1526 	  "I219 LM Ethernet Connection",
   1527 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1529 	  "I219 LM Ethernet Connection",
   1530 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1532 	  "I219 V Ethernet Connection",
   1533 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1534 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1535 	  "I219 V Ethernet Connection",
   1536 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1538 	  "I219 LM Ethernet Connection",
   1539 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1541 	  "I219 LM Ethernet Connection",
   1542 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1543 	{ 0,			0,
   1544 	  NULL,
   1545 	  0,			0 },
   1546 };
   1547 
   1548 /*
   1549  * Register read/write functions.
   1550  * Other than CSR_{READ|WRITE}().
   1551  */
   1552 
   1553 #if 0 /* Not currently used */
   1554 static inline uint32_t
   1555 wm_io_read(struct wm_softc *sc, int reg)
   1556 {
   1557 
   1558 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1559 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1560 }
   1561 #endif
   1562 
   1563 static inline void
   1564 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1565 {
   1566 
   1567 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1568 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1569 }
   1570 
   1571 static inline void
   1572 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1573     uint32_t data)
   1574 {
   1575 	uint32_t regval;
   1576 	int i;
   1577 
   1578 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1579 
   1580 	CSR_WRITE(sc, reg, regval);
   1581 
   1582 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1583 		delay(5);
   1584 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1585 			break;
   1586 	}
   1587 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1588 		aprint_error("%s: WARNING:"
   1589 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1590 		    device_xname(sc->sc_dev), reg);
   1591 	}
   1592 }
   1593 
   1594 static inline void
   1595 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1596 {
   1597 	wa->wa_low = htole32(v & 0xffffffffU);
   1598 	if (sizeof(bus_addr_t) == 8)
   1599 		wa->wa_high = htole32((uint64_t) v >> 32);
   1600 	else
   1601 		wa->wa_high = 0;
   1602 }
   1603 
   1604 /*
   1605  * Descriptor sync/init functions.
   1606  */
   1607 static inline void
   1608 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1609 {
   1610 	struct wm_softc *sc = txq->txq_sc;
   1611 
   1612 	/* If it will wrap around, sync to the end of the ring. */
   1613 	if ((start + num) > WM_NTXDESC(txq)) {
   1614 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1615 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1616 		    (WM_NTXDESC(txq) - start), ops);
   1617 		num -= (WM_NTXDESC(txq) - start);
   1618 		start = 0;
   1619 	}
   1620 
   1621 	/* Now sync whatever is left. */
   1622 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1623 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1624 }
   1625 
   1626 static inline void
   1627 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1628 {
   1629 	struct wm_softc *sc = rxq->rxq_sc;
   1630 
   1631 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1632 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1633 }
   1634 
   1635 static inline void
   1636 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1637 {
   1638 	struct wm_softc *sc = rxq->rxq_sc;
   1639 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1640 	struct mbuf *m = rxs->rxs_mbuf;
   1641 
   1642 	/*
   1643 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1644 	 * so that the payload after the Ethernet header is aligned
   1645 	 * to a 4-byte boundary.
   1646 
   1647 	 * XXX BRAINDAMAGE ALERT!
   1648 	 * The stupid chip uses the same size for every buffer, which
   1649 	 * is set in the Receive Control register.  We are using the 2K
   1650 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1651 	 * reason, we can't "scoot" packets longer than the standard
   1652 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1653 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1654 	 * the upper layer copy the headers.
   1655 	 */
   1656 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1657 
   1658 	if (sc->sc_type == WM_T_82574) {
   1659 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1660 		rxd->erx_data.erxd_addr =
   1661 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1662 		rxd->erx_data.erxd_dd = 0;
   1663 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1664 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1665 
   1666 		rxd->nqrx_data.nrxd_paddr =
   1667 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1668 		/* Currently, split header is not supported. */
   1669 		rxd->nqrx_data.nrxd_haddr = 0;
   1670 	} else {
   1671 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1672 
   1673 		wm_set_dma_addr(&rxd->wrx_addr,
   1674 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1675 		rxd->wrx_len = 0;
   1676 		rxd->wrx_cksum = 0;
   1677 		rxd->wrx_status = 0;
   1678 		rxd->wrx_errors = 0;
   1679 		rxd->wrx_special = 0;
   1680 	}
   1681 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1682 
   1683 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1684 }
   1685 
   1686 /*
   1687  * Device driver interface functions and commonly used functions.
   1688  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1689  */
   1690 
   1691 /* Lookup supported device table */
   1692 static const struct wm_product *
   1693 wm_lookup(const struct pci_attach_args *pa)
   1694 {
   1695 	const struct wm_product *wmp;
   1696 
   1697 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1698 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1699 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1700 			return wmp;
   1701 	}
   1702 	return NULL;
   1703 }
   1704 
   1705 /* The match function (ca_match) */
   1706 static int
   1707 wm_match(device_t parent, cfdata_t cf, void *aux)
   1708 {
   1709 	struct pci_attach_args *pa = aux;
   1710 
   1711 	if (wm_lookup(pa) != NULL)
   1712 		return 1;
   1713 
   1714 	return 0;
   1715 }
   1716 
   1717 /* The attach function (ca_attach) */
   1718 static void
   1719 wm_attach(device_t parent, device_t self, void *aux)
   1720 {
   1721 	struct wm_softc *sc = device_private(self);
   1722 	struct pci_attach_args *pa = aux;
   1723 	prop_dictionary_t dict;
   1724 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1725 	pci_chipset_tag_t pc = pa->pa_pc;
   1726 	int counts[PCI_INTR_TYPE_SIZE];
   1727 	pci_intr_type_t max_type;
   1728 	const char *eetype, *xname;
   1729 	bus_space_tag_t memt;
   1730 	bus_space_handle_t memh;
   1731 	bus_size_t memsize;
   1732 	int memh_valid;
   1733 	int i, error;
   1734 	const struct wm_product *wmp;
   1735 	prop_data_t ea;
   1736 	prop_number_t pn;
   1737 	uint8_t enaddr[ETHER_ADDR_LEN];
   1738 	char buf[256];
   1739 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1740 	pcireg_t preg, memtype;
   1741 	uint16_t eeprom_data, apme_mask;
   1742 	bool force_clear_smbi;
   1743 	uint32_t link_mode;
   1744 	uint32_t reg;
   1745 
   1746 	sc->sc_dev = self;
   1747 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1748 	sc->sc_core_stopping = false;
   1749 
   1750 	wmp = wm_lookup(pa);
   1751 #ifdef DIAGNOSTIC
   1752 	if (wmp == NULL) {
   1753 		printf("\n");
   1754 		panic("wm_attach: impossible");
   1755 	}
   1756 #endif
   1757 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1758 
   1759 	sc->sc_pc = pa->pa_pc;
   1760 	sc->sc_pcitag = pa->pa_tag;
   1761 
   1762 	if (pci_dma64_available(pa))
   1763 		sc->sc_dmat = pa->pa_dmat64;
   1764 	else
   1765 		sc->sc_dmat = pa->pa_dmat;
   1766 
   1767 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1768 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1769 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1770 
   1771 	sc->sc_type = wmp->wmp_type;
   1772 
   1773 	/* Set default function pointers */
   1774 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1775 	sc->phy.release = sc->nvm.release = wm_put_null;
   1776 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1777 
   1778 	if (sc->sc_type < WM_T_82543) {
   1779 		if (sc->sc_rev < 2) {
   1780 			aprint_error_dev(sc->sc_dev,
   1781 			    "i82542 must be at least rev. 2\n");
   1782 			return;
   1783 		}
   1784 		if (sc->sc_rev < 3)
   1785 			sc->sc_type = WM_T_82542_2_0;
   1786 	}
   1787 
   1788 	/*
   1789 	 * Disable MSI for Errata:
   1790 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1791 	 *
   1792 	 *  82544: Errata 25
   1793 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1794 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1795 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1796 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1797 	 *
   1798 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1799 	 *
   1800 	 *  82571 & 82572: Errata 63
   1801 	 */
   1802 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1803 	    || (sc->sc_type == WM_T_82572))
   1804 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1805 
   1806 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1807 	    || (sc->sc_type == WM_T_82580)
   1808 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1809 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1810 		sc->sc_flags |= WM_F_NEWQUEUE;
   1811 
   1812 	/* Set device properties (mactype) */
   1813 	dict = device_properties(sc->sc_dev);
   1814 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1815 
   1816 	/*
   1817 	 * Map the device.  All devices support memory-mapped acccess,
   1818 	 * and it is really required for normal operation.
   1819 	 */
   1820 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1821 	switch (memtype) {
   1822 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1823 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1824 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1825 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1826 		break;
   1827 	default:
   1828 		memh_valid = 0;
   1829 		break;
   1830 	}
   1831 
   1832 	if (memh_valid) {
   1833 		sc->sc_st = memt;
   1834 		sc->sc_sh = memh;
   1835 		sc->sc_ss = memsize;
   1836 	} else {
   1837 		aprint_error_dev(sc->sc_dev,
   1838 		    "unable to map device registers\n");
   1839 		return;
   1840 	}
   1841 
   1842 	/*
   1843 	 * In addition, i82544 and later support I/O mapped indirect
   1844 	 * register access.  It is not desirable (nor supported in
   1845 	 * this driver) to use it for normal operation, though it is
   1846 	 * required to work around bugs in some chip versions.
   1847 	 */
   1848 	if (sc->sc_type >= WM_T_82544) {
   1849 		/* First we have to find the I/O BAR. */
   1850 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1851 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1852 			if (memtype == PCI_MAPREG_TYPE_IO)
   1853 				break;
   1854 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1855 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1856 				i += 4;	/* skip high bits, too */
   1857 		}
   1858 		if (i < PCI_MAPREG_END) {
   1859 			/*
   1860 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1861 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1862 			 * It's no problem because newer chips has no this
   1863 			 * bug.
   1864 			 *
   1865 			 * The i8254x doesn't apparently respond when the
   1866 			 * I/O BAR is 0, which looks somewhat like it's not
   1867 			 * been configured.
   1868 			 */
   1869 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1870 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1871 				aprint_error_dev(sc->sc_dev,
   1872 				    "WARNING: I/O BAR at zero.\n");
   1873 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1874 					0, &sc->sc_iot, &sc->sc_ioh,
   1875 					NULL, &sc->sc_ios) == 0) {
   1876 				sc->sc_flags |= WM_F_IOH_VALID;
   1877 			} else
   1878 				aprint_error_dev(sc->sc_dev,
   1879 				    "WARNING: unable to map I/O space\n");
   1880 		}
   1881 
   1882 	}
   1883 
   1884 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1885 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1886 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1887 	if (sc->sc_type < WM_T_82542_2_1)
   1888 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1889 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1890 
   1891 	/* power up chip */
   1892 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1893 	    && error != EOPNOTSUPP) {
   1894 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1895 		return;
   1896 	}
   1897 
   1898 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1899 	/*
   1900 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1901 	 * resource.
   1902 	 */
   1903 	if (sc->sc_nqueues > 1) {
   1904 		max_type = PCI_INTR_TYPE_MSIX;
   1905 		/*
   1906 		 *  82583 has a MSI-X capability in the PCI configuration space
   1907 		 * but it doesn't support it. At least the document doesn't
   1908 		 * say anything about MSI-X.
   1909 		 */
   1910 		counts[PCI_INTR_TYPE_MSIX]
   1911 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1912 	} else {
   1913 		max_type = PCI_INTR_TYPE_MSI;
   1914 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1915 	}
   1916 
   1917 	/* Allocation settings */
   1918 	counts[PCI_INTR_TYPE_MSI] = 1;
   1919 	counts[PCI_INTR_TYPE_INTX] = 1;
   1920 	/* overridden by disable flags */
   1921 	if (wm_disable_msi != 0) {
   1922 		counts[PCI_INTR_TYPE_MSI] = 0;
   1923 		if (wm_disable_msix != 0) {
   1924 			max_type = PCI_INTR_TYPE_INTX;
   1925 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1926 		}
   1927 	} else if (wm_disable_msix != 0) {
   1928 		max_type = PCI_INTR_TYPE_MSI;
   1929 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1930 	}
   1931 
   1932 alloc_retry:
   1933 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1934 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1935 		return;
   1936 	}
   1937 
   1938 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1939 		error = wm_setup_msix(sc);
   1940 		if (error) {
   1941 			pci_intr_release(pc, sc->sc_intrs,
   1942 			    counts[PCI_INTR_TYPE_MSIX]);
   1943 
   1944 			/* Setup for MSI: Disable MSI-X */
   1945 			max_type = PCI_INTR_TYPE_MSI;
   1946 			counts[PCI_INTR_TYPE_MSI] = 1;
   1947 			counts[PCI_INTR_TYPE_INTX] = 1;
   1948 			goto alloc_retry;
   1949 		}
   1950 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1951 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1952 		error = wm_setup_legacy(sc);
   1953 		if (error) {
   1954 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1955 			    counts[PCI_INTR_TYPE_MSI]);
   1956 
   1957 			/* The next try is for INTx: Disable MSI */
   1958 			max_type = PCI_INTR_TYPE_INTX;
   1959 			counts[PCI_INTR_TYPE_INTX] = 1;
   1960 			goto alloc_retry;
   1961 		}
   1962 	} else {
   1963 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1964 		error = wm_setup_legacy(sc);
   1965 		if (error) {
   1966 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1967 			    counts[PCI_INTR_TYPE_INTX]);
   1968 			return;
   1969 		}
   1970 	}
   1971 
   1972 	/*
   1973 	 * Check the function ID (unit number of the chip).
   1974 	 */
   1975 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1976 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   1977 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1978 	    || (sc->sc_type == WM_T_82580)
   1979 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1980 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1981 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1982 	else
   1983 		sc->sc_funcid = 0;
   1984 
   1985 	/*
   1986 	 * Determine a few things about the bus we're connected to.
   1987 	 */
   1988 	if (sc->sc_type < WM_T_82543) {
   1989 		/* We don't really know the bus characteristics here. */
   1990 		sc->sc_bus_speed = 33;
   1991 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1992 		/*
   1993 		 * CSA (Communication Streaming Architecture) is about as fast
   1994 		 * a 32-bit 66MHz PCI Bus.
   1995 		 */
   1996 		sc->sc_flags |= WM_F_CSA;
   1997 		sc->sc_bus_speed = 66;
   1998 		aprint_verbose_dev(sc->sc_dev,
   1999 		    "Communication Streaming Architecture\n");
   2000 		if (sc->sc_type == WM_T_82547) {
   2001 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2002 			callout_setfunc(&sc->sc_txfifo_ch,
   2003 			    wm_82547_txfifo_stall, sc);
   2004 			aprint_verbose_dev(sc->sc_dev,
   2005 			    "using 82547 Tx FIFO stall work-around\n");
   2006 		}
   2007 	} else if (sc->sc_type >= WM_T_82571) {
   2008 		sc->sc_flags |= WM_F_PCIE;
   2009 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2010 		    && (sc->sc_type != WM_T_ICH10)
   2011 		    && (sc->sc_type != WM_T_PCH)
   2012 		    && (sc->sc_type != WM_T_PCH2)
   2013 		    && (sc->sc_type != WM_T_PCH_LPT)
   2014 		    && (sc->sc_type != WM_T_PCH_SPT)
   2015 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2016 			/* ICH* and PCH* have no PCIe capability registers */
   2017 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2018 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2019 				NULL) == 0)
   2020 				aprint_error_dev(sc->sc_dev,
   2021 				    "unable to find PCIe capability\n");
   2022 		}
   2023 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2024 	} else {
   2025 		reg = CSR_READ(sc, WMREG_STATUS);
   2026 		if (reg & STATUS_BUS64)
   2027 			sc->sc_flags |= WM_F_BUS64;
   2028 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2029 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2030 
   2031 			sc->sc_flags |= WM_F_PCIX;
   2032 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2033 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2034 				aprint_error_dev(sc->sc_dev,
   2035 				    "unable to find PCIX capability\n");
   2036 			else if (sc->sc_type != WM_T_82545_3 &&
   2037 				 sc->sc_type != WM_T_82546_3) {
   2038 				/*
   2039 				 * Work around a problem caused by the BIOS
   2040 				 * setting the max memory read byte count
   2041 				 * incorrectly.
   2042 				 */
   2043 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2044 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2045 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2046 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2047 
   2048 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2049 				    PCIX_CMD_BYTECNT_SHIFT;
   2050 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2051 				    PCIX_STATUS_MAXB_SHIFT;
   2052 				if (bytecnt > maxb) {
   2053 					aprint_verbose_dev(sc->sc_dev,
   2054 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2055 					    512 << bytecnt, 512 << maxb);
   2056 					pcix_cmd = (pcix_cmd &
   2057 					    ~PCIX_CMD_BYTECNT_MASK) |
   2058 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2059 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2060 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2061 					    pcix_cmd);
   2062 				}
   2063 			}
   2064 		}
   2065 		/*
   2066 		 * The quad port adapter is special; it has a PCIX-PCIX
   2067 		 * bridge on the board, and can run the secondary bus at
   2068 		 * a higher speed.
   2069 		 */
   2070 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2071 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2072 								      : 66;
   2073 		} else if (sc->sc_flags & WM_F_PCIX) {
   2074 			switch (reg & STATUS_PCIXSPD_MASK) {
   2075 			case STATUS_PCIXSPD_50_66:
   2076 				sc->sc_bus_speed = 66;
   2077 				break;
   2078 			case STATUS_PCIXSPD_66_100:
   2079 				sc->sc_bus_speed = 100;
   2080 				break;
   2081 			case STATUS_PCIXSPD_100_133:
   2082 				sc->sc_bus_speed = 133;
   2083 				break;
   2084 			default:
   2085 				aprint_error_dev(sc->sc_dev,
   2086 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2087 				    reg & STATUS_PCIXSPD_MASK);
   2088 				sc->sc_bus_speed = 66;
   2089 				break;
   2090 			}
   2091 		} else
   2092 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2093 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2094 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2095 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2096 	}
   2097 
   2098 	/* Disable ASPM L0s and/or L1 for workaround */
   2099 	wm_disable_aspm(sc);
   2100 
   2101 	/* clear interesting stat counters */
   2102 	CSR_READ(sc, WMREG_COLC);
   2103 	CSR_READ(sc, WMREG_RXERRC);
   2104 
   2105 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2106 	    || (sc->sc_type >= WM_T_ICH8))
   2107 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2108 	if (sc->sc_type >= WM_T_ICH8)
   2109 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2110 
   2111 	/* Set PHY, NVM mutex related stuff */
   2112 	switch (sc->sc_type) {
   2113 	case WM_T_82542_2_0:
   2114 	case WM_T_82542_2_1:
   2115 	case WM_T_82543:
   2116 	case WM_T_82544:
   2117 		/* Microwire */
   2118 		sc->nvm.read = wm_nvm_read_uwire;
   2119 		sc->sc_nvm_wordsize = 64;
   2120 		sc->sc_nvm_addrbits = 6;
   2121 		break;
   2122 	case WM_T_82540:
   2123 	case WM_T_82545:
   2124 	case WM_T_82545_3:
   2125 	case WM_T_82546:
   2126 	case WM_T_82546_3:
   2127 		/* Microwire */
   2128 		sc->nvm.read = wm_nvm_read_uwire;
   2129 		reg = CSR_READ(sc, WMREG_EECD);
   2130 		if (reg & EECD_EE_SIZE) {
   2131 			sc->sc_nvm_wordsize = 256;
   2132 			sc->sc_nvm_addrbits = 8;
   2133 		} else {
   2134 			sc->sc_nvm_wordsize = 64;
   2135 			sc->sc_nvm_addrbits = 6;
   2136 		}
   2137 		sc->sc_flags |= WM_F_LOCK_EECD;
   2138 		sc->nvm.acquire = wm_get_eecd;
   2139 		sc->nvm.release = wm_put_eecd;
   2140 		break;
   2141 	case WM_T_82541:
   2142 	case WM_T_82541_2:
   2143 	case WM_T_82547:
   2144 	case WM_T_82547_2:
   2145 		reg = CSR_READ(sc, WMREG_EECD);
   2146 		/*
   2147 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2148 		 * on 8254[17], so set flags and functios before calling it.
   2149 		 */
   2150 		sc->sc_flags |= WM_F_LOCK_EECD;
   2151 		sc->nvm.acquire = wm_get_eecd;
   2152 		sc->nvm.release = wm_put_eecd;
   2153 		if (reg & EECD_EE_TYPE) {
   2154 			/* SPI */
   2155 			sc->nvm.read = wm_nvm_read_spi;
   2156 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2157 			wm_nvm_set_addrbits_size_eecd(sc);
   2158 		} else {
   2159 			/* Microwire */
   2160 			sc->nvm.read = wm_nvm_read_uwire;
   2161 			if ((reg & EECD_EE_ABITS) != 0) {
   2162 				sc->sc_nvm_wordsize = 256;
   2163 				sc->sc_nvm_addrbits = 8;
   2164 			} else {
   2165 				sc->sc_nvm_wordsize = 64;
   2166 				sc->sc_nvm_addrbits = 6;
   2167 			}
   2168 		}
   2169 		break;
   2170 	case WM_T_82571:
   2171 	case WM_T_82572:
   2172 		/* SPI */
   2173 		sc->nvm.read = wm_nvm_read_eerd;
   2174 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2175 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2176 		wm_nvm_set_addrbits_size_eecd(sc);
   2177 		sc->phy.acquire = wm_get_swsm_semaphore;
   2178 		sc->phy.release = wm_put_swsm_semaphore;
   2179 		sc->nvm.acquire = wm_get_nvm_82571;
   2180 		sc->nvm.release = wm_put_nvm_82571;
   2181 		break;
   2182 	case WM_T_82573:
   2183 	case WM_T_82574:
   2184 	case WM_T_82583:
   2185 		sc->nvm.read = wm_nvm_read_eerd;
   2186 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2187 		if (sc->sc_type == WM_T_82573) {
   2188 			sc->phy.acquire = wm_get_swsm_semaphore;
   2189 			sc->phy.release = wm_put_swsm_semaphore;
   2190 			sc->nvm.acquire = wm_get_nvm_82571;
   2191 			sc->nvm.release = wm_put_nvm_82571;
   2192 		} else {
   2193 			/* Both PHY and NVM use the same semaphore. */
   2194 			sc->phy.acquire = sc->nvm.acquire
   2195 			    = wm_get_swfwhw_semaphore;
   2196 			sc->phy.release = sc->nvm.release
   2197 			    = wm_put_swfwhw_semaphore;
   2198 		}
   2199 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2200 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2201 			sc->sc_nvm_wordsize = 2048;
   2202 		} else {
   2203 			/* SPI */
   2204 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2205 			wm_nvm_set_addrbits_size_eecd(sc);
   2206 		}
   2207 		break;
   2208 	case WM_T_82575:
   2209 	case WM_T_82576:
   2210 	case WM_T_82580:
   2211 	case WM_T_I350:
   2212 	case WM_T_I354:
   2213 	case WM_T_80003:
   2214 		/* SPI */
   2215 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2216 		wm_nvm_set_addrbits_size_eecd(sc);
   2217 		if ((sc->sc_type == WM_T_80003)
   2218 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2219 			sc->nvm.read = wm_nvm_read_eerd;
   2220 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2221 		} else {
   2222 			sc->nvm.read = wm_nvm_read_spi;
   2223 			sc->sc_flags |= WM_F_LOCK_EECD;
   2224 		}
   2225 		sc->phy.acquire = wm_get_phy_82575;
   2226 		sc->phy.release = wm_put_phy_82575;
   2227 		sc->nvm.acquire = wm_get_nvm_80003;
   2228 		sc->nvm.release = wm_put_nvm_80003;
   2229 		break;
   2230 	case WM_T_ICH8:
   2231 	case WM_T_ICH9:
   2232 	case WM_T_ICH10:
   2233 	case WM_T_PCH:
   2234 	case WM_T_PCH2:
   2235 	case WM_T_PCH_LPT:
   2236 		sc->nvm.read = wm_nvm_read_ich8;
   2237 		/* FLASH */
   2238 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2239 		sc->sc_nvm_wordsize = 2048;
   2240 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2241 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2242 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2243 			aprint_error_dev(sc->sc_dev,
   2244 			    "can't map FLASH registers\n");
   2245 			goto out;
   2246 		}
   2247 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2248 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2249 		    ICH_FLASH_SECTOR_SIZE;
   2250 		sc->sc_ich8_flash_bank_size =
   2251 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2252 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2253 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2254 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2255 		sc->sc_flashreg_offset = 0;
   2256 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2257 		sc->phy.release = wm_put_swflag_ich8lan;
   2258 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2259 		sc->nvm.release = wm_put_nvm_ich8lan;
   2260 		break;
   2261 	case WM_T_PCH_SPT:
   2262 	case WM_T_PCH_CNP:
   2263 		sc->nvm.read = wm_nvm_read_spt;
   2264 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2265 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2266 		sc->sc_flasht = sc->sc_st;
   2267 		sc->sc_flashh = sc->sc_sh;
   2268 		sc->sc_ich8_flash_base = 0;
   2269 		sc->sc_nvm_wordsize =
   2270 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2271 		    * NVM_SIZE_MULTIPLIER;
   2272 		/* It is size in bytes, we want words */
   2273 		sc->sc_nvm_wordsize /= 2;
   2274 		/* assume 2 banks */
   2275 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2276 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2277 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2278 		sc->phy.release = wm_put_swflag_ich8lan;
   2279 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2280 		sc->nvm.release = wm_put_nvm_ich8lan;
   2281 		break;
   2282 	case WM_T_I210:
   2283 	case WM_T_I211:
   2284 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2285 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2286 		if (wm_nvm_flash_presence_i210(sc)) {
   2287 			sc->nvm.read = wm_nvm_read_eerd;
   2288 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2289 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2290 			wm_nvm_set_addrbits_size_eecd(sc);
   2291 		} else {
   2292 			sc->nvm.read = wm_nvm_read_invm;
   2293 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2294 			sc->sc_nvm_wordsize = INVM_SIZE;
   2295 		}
   2296 		sc->phy.acquire = wm_get_phy_82575;
   2297 		sc->phy.release = wm_put_phy_82575;
   2298 		sc->nvm.acquire = wm_get_nvm_80003;
   2299 		sc->nvm.release = wm_put_nvm_80003;
   2300 		break;
   2301 	default:
   2302 		break;
   2303 	}
   2304 
   2305 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2306 	switch (sc->sc_type) {
   2307 	case WM_T_82571:
   2308 	case WM_T_82572:
   2309 		reg = CSR_READ(sc, WMREG_SWSM2);
   2310 		if ((reg & SWSM2_LOCK) == 0) {
   2311 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2312 			force_clear_smbi = true;
   2313 		} else
   2314 			force_clear_smbi = false;
   2315 		break;
   2316 	case WM_T_82573:
   2317 	case WM_T_82574:
   2318 	case WM_T_82583:
   2319 		force_clear_smbi = true;
   2320 		break;
   2321 	default:
   2322 		force_clear_smbi = false;
   2323 		break;
   2324 	}
   2325 	if (force_clear_smbi) {
   2326 		reg = CSR_READ(sc, WMREG_SWSM);
   2327 		if ((reg & SWSM_SMBI) != 0)
   2328 			aprint_error_dev(sc->sc_dev,
   2329 			    "Please update the Bootagent\n");
   2330 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2331 	}
   2332 
   2333 	/*
   2334 	 * Defer printing the EEPROM type until after verifying the checksum
   2335 	 * This allows the EEPROM type to be printed correctly in the case
   2336 	 * that no EEPROM is attached.
   2337 	 */
   2338 	/*
   2339 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2340 	 * this for later, so we can fail future reads from the EEPROM.
   2341 	 */
   2342 	if (wm_nvm_validate_checksum(sc)) {
   2343 		/*
   2344 		 * Read twice again because some PCI-e parts fail the
   2345 		 * first check due to the link being in sleep state.
   2346 		 */
   2347 		if (wm_nvm_validate_checksum(sc))
   2348 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2349 	}
   2350 
   2351 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2352 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2353 	else {
   2354 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2355 		    sc->sc_nvm_wordsize);
   2356 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2357 			aprint_verbose("iNVM");
   2358 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2359 			aprint_verbose("FLASH(HW)");
   2360 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2361 			aprint_verbose("FLASH");
   2362 		else {
   2363 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2364 				eetype = "SPI";
   2365 			else
   2366 				eetype = "MicroWire";
   2367 			aprint_verbose("(%d address bits) %s EEPROM",
   2368 			    sc->sc_nvm_addrbits, eetype);
   2369 		}
   2370 	}
   2371 	wm_nvm_version(sc);
   2372 	aprint_verbose("\n");
   2373 
   2374 	/*
   2375 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2376 	 * incorrect.
   2377 	 */
   2378 	wm_gmii_setup_phytype(sc, 0, 0);
   2379 
   2380 	/* Reset the chip to a known state. */
   2381 	wm_reset(sc);
   2382 
   2383 	/*
   2384 	 * Check for I21[01] PLL workaround.
   2385 	 *
   2386 	 * Three cases:
   2387 	 * a) Chip is I211.
   2388 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2389 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2390 	 */
   2391 	if (sc->sc_type == WM_T_I211)
   2392 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2393 	if (sc->sc_type == WM_T_I210) {
   2394 		if (!wm_nvm_flash_presence_i210(sc))
   2395 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2396 		else if ((sc->sc_nvm_ver_major < 3)
   2397 		    || ((sc->sc_nvm_ver_major == 3)
   2398 			&& (sc->sc_nvm_ver_minor < 25))) {
   2399 			aprint_verbose_dev(sc->sc_dev,
   2400 			    "ROM image version %d.%d is older than 3.25\n",
   2401 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2402 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2403 		}
   2404 	}
   2405 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2406 		wm_pll_workaround_i210(sc);
   2407 
   2408 	wm_get_wakeup(sc);
   2409 
   2410 	/* Non-AMT based hardware can now take control from firmware */
   2411 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2412 		wm_get_hw_control(sc);
   2413 
   2414 	/*
   2415 	 * Read the Ethernet address from the EEPROM, if not first found
   2416 	 * in device properties.
   2417 	 */
   2418 	ea = prop_dictionary_get(dict, "mac-address");
   2419 	if (ea != NULL) {
   2420 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2421 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2422 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2423 	} else {
   2424 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2425 			aprint_error_dev(sc->sc_dev,
   2426 			    "unable to read Ethernet address\n");
   2427 			goto out;
   2428 		}
   2429 	}
   2430 
   2431 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2432 	    ether_sprintf(enaddr));
   2433 
   2434 	/*
   2435 	 * Read the config info from the EEPROM, and set up various
   2436 	 * bits in the control registers based on their contents.
   2437 	 */
   2438 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2439 	if (pn != NULL) {
   2440 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2441 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2442 	} else {
   2443 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2444 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2445 			goto out;
   2446 		}
   2447 	}
   2448 
   2449 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2450 	if (pn != NULL) {
   2451 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2452 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2453 	} else {
   2454 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2455 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2456 			goto out;
   2457 		}
   2458 	}
   2459 
   2460 	/* check for WM_F_WOL */
   2461 	switch (sc->sc_type) {
   2462 	case WM_T_82542_2_0:
   2463 	case WM_T_82542_2_1:
   2464 	case WM_T_82543:
   2465 		/* dummy? */
   2466 		eeprom_data = 0;
   2467 		apme_mask = NVM_CFG3_APME;
   2468 		break;
   2469 	case WM_T_82544:
   2470 		apme_mask = NVM_CFG2_82544_APM_EN;
   2471 		eeprom_data = cfg2;
   2472 		break;
   2473 	case WM_T_82546:
   2474 	case WM_T_82546_3:
   2475 	case WM_T_82571:
   2476 	case WM_T_82572:
   2477 	case WM_T_82573:
   2478 	case WM_T_82574:
   2479 	case WM_T_82583:
   2480 	case WM_T_80003:
   2481 	default:
   2482 		apme_mask = NVM_CFG3_APME;
   2483 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2484 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2485 		break;
   2486 	case WM_T_82575:
   2487 	case WM_T_82576:
   2488 	case WM_T_82580:
   2489 	case WM_T_I350:
   2490 	case WM_T_I354: /* XXX ok? */
   2491 	case WM_T_ICH8:
   2492 	case WM_T_ICH9:
   2493 	case WM_T_ICH10:
   2494 	case WM_T_PCH:
   2495 	case WM_T_PCH2:
   2496 	case WM_T_PCH_LPT:
   2497 	case WM_T_PCH_SPT:
   2498 	case WM_T_PCH_CNP:
   2499 		/* XXX The funcid should be checked on some devices */
   2500 		apme_mask = WUC_APME;
   2501 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2502 		break;
   2503 	}
   2504 
   2505 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2506 	if ((eeprom_data & apme_mask) != 0)
   2507 		sc->sc_flags |= WM_F_WOL;
   2508 
   2509 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2510 		/* Check NVM for autonegotiation */
   2511 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2512 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2513 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2514 		}
   2515 	}
   2516 
   2517 	/*
   2518 	 * XXX need special handling for some multiple port cards
   2519 	 * to disable a paticular port.
   2520 	 */
   2521 
   2522 	if (sc->sc_type >= WM_T_82544) {
   2523 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2524 		if (pn != NULL) {
   2525 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2526 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2527 		} else {
   2528 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2529 				aprint_error_dev(sc->sc_dev,
   2530 				    "unable to read SWDPIN\n");
   2531 				goto out;
   2532 			}
   2533 		}
   2534 	}
   2535 
   2536 	if (cfg1 & NVM_CFG1_ILOS)
   2537 		sc->sc_ctrl |= CTRL_ILOS;
   2538 
   2539 	/*
   2540 	 * XXX
   2541 	 * This code isn't correct because pin 2 and 3 are located
   2542 	 * in different position on newer chips. Check all datasheet.
   2543 	 *
   2544 	 * Until resolve this problem, check if a chip < 82580
   2545 	 */
   2546 	if (sc->sc_type <= WM_T_82580) {
   2547 		if (sc->sc_type >= WM_T_82544) {
   2548 			sc->sc_ctrl |=
   2549 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2550 			    CTRL_SWDPIO_SHIFT;
   2551 			sc->sc_ctrl |=
   2552 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2553 			    CTRL_SWDPINS_SHIFT;
   2554 		} else {
   2555 			sc->sc_ctrl |=
   2556 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2557 			    CTRL_SWDPIO_SHIFT;
   2558 		}
   2559 	}
   2560 
   2561 	/* XXX For other than 82580? */
   2562 	if (sc->sc_type == WM_T_82580) {
   2563 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2564 		if (nvmword & __BIT(13))
   2565 			sc->sc_ctrl |= CTRL_ILOS;
   2566 	}
   2567 
   2568 #if 0
   2569 	if (sc->sc_type >= WM_T_82544) {
   2570 		if (cfg1 & NVM_CFG1_IPS0)
   2571 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2572 		if (cfg1 & NVM_CFG1_IPS1)
   2573 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2574 		sc->sc_ctrl_ext |=
   2575 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2576 		    CTRL_EXT_SWDPIO_SHIFT;
   2577 		sc->sc_ctrl_ext |=
   2578 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2579 		    CTRL_EXT_SWDPINS_SHIFT;
   2580 	} else {
   2581 		sc->sc_ctrl_ext |=
   2582 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2583 		    CTRL_EXT_SWDPIO_SHIFT;
   2584 	}
   2585 #endif
   2586 
   2587 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2588 #if 0
   2589 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2590 #endif
   2591 
   2592 	if (sc->sc_type == WM_T_PCH) {
   2593 		uint16_t val;
   2594 
   2595 		/* Save the NVM K1 bit setting */
   2596 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2597 
   2598 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2599 			sc->sc_nvm_k1_enabled = 1;
   2600 		else
   2601 			sc->sc_nvm_k1_enabled = 0;
   2602 	}
   2603 
   2604 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2605 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2606 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2607 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2608 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2609 	    || sc->sc_type == WM_T_82573
   2610 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2611 		/* Copper only */
   2612 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2613 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2614 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2615 	    || (sc->sc_type ==WM_T_I211)) {
   2616 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2617 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2618 		switch (link_mode) {
   2619 		case CTRL_EXT_LINK_MODE_1000KX:
   2620 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2621 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2622 			break;
   2623 		case CTRL_EXT_LINK_MODE_SGMII:
   2624 			if (wm_sgmii_uses_mdio(sc)) {
   2625 				aprint_verbose_dev(sc->sc_dev,
   2626 				    "SGMII(MDIO)\n");
   2627 				sc->sc_flags |= WM_F_SGMII;
   2628 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2629 				break;
   2630 			}
   2631 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2632 			/*FALLTHROUGH*/
   2633 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2634 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2635 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2636 				if (link_mode
   2637 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2638 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2639 					sc->sc_flags |= WM_F_SGMII;
   2640 				} else {
   2641 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2642 					aprint_verbose_dev(sc->sc_dev,
   2643 					    "SERDES\n");
   2644 				}
   2645 				break;
   2646 			}
   2647 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2648 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2649 
   2650 			/* Change current link mode setting */
   2651 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2652 			switch (sc->sc_mediatype) {
   2653 			case WM_MEDIATYPE_COPPER:
   2654 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2655 				break;
   2656 			case WM_MEDIATYPE_SERDES:
   2657 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2658 				break;
   2659 			default:
   2660 				break;
   2661 			}
   2662 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2663 			break;
   2664 		case CTRL_EXT_LINK_MODE_GMII:
   2665 		default:
   2666 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2667 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2668 			break;
   2669 		}
   2670 
   2671 		reg &= ~CTRL_EXT_I2C_ENA;
   2672 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2673 			reg |= CTRL_EXT_I2C_ENA;
   2674 		else
   2675 			reg &= ~CTRL_EXT_I2C_ENA;
   2676 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2677 	} else if (sc->sc_type < WM_T_82543 ||
   2678 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2679 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2680 			aprint_error_dev(sc->sc_dev,
   2681 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2682 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2683 		}
   2684 	} else {
   2685 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2686 			aprint_error_dev(sc->sc_dev,
   2687 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2688 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2689 		}
   2690 	}
   2691 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2692 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2693 
   2694 	/* Set device properties (macflags) */
   2695 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2696 
   2697 	/* Initialize the media structures accordingly. */
   2698 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2699 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2700 	else
   2701 		wm_tbi_mediainit(sc); /* All others */
   2702 
   2703 	ifp = &sc->sc_ethercom.ec_if;
   2704 	xname = device_xname(sc->sc_dev);
   2705 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2706 	ifp->if_softc = sc;
   2707 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2708 #ifdef WM_MPSAFE
   2709 	ifp->if_extflags = IFEF_MPSAFE;
   2710 #endif
   2711 	ifp->if_ioctl = wm_ioctl;
   2712 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2713 		ifp->if_start = wm_nq_start;
   2714 		/*
   2715 		 * When the number of CPUs is one and the controller can use
   2716 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2717 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2718 		 * and the other is used for link status changing.
   2719 		 * In this situation, wm_nq_transmit() is disadvantageous
   2720 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2721 		 */
   2722 		if (wm_is_using_multiqueue(sc))
   2723 			ifp->if_transmit = wm_nq_transmit;
   2724 	} else {
   2725 		ifp->if_start = wm_start;
   2726 		/*
   2727 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2728 		 */
   2729 		if (wm_is_using_multiqueue(sc))
   2730 			ifp->if_transmit = wm_transmit;
   2731 	}
   2732 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2733 	ifp->if_init = wm_init;
   2734 	ifp->if_stop = wm_stop;
   2735 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2736 	IFQ_SET_READY(&ifp->if_snd);
   2737 
   2738 	/* Check for jumbo frame */
   2739 	switch (sc->sc_type) {
   2740 	case WM_T_82573:
   2741 		/* XXX limited to 9234 if ASPM is disabled */
   2742 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2743 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2744 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2745 		break;
   2746 	case WM_T_82571:
   2747 	case WM_T_82572:
   2748 	case WM_T_82574:
   2749 	case WM_T_82583:
   2750 	case WM_T_82575:
   2751 	case WM_T_82576:
   2752 	case WM_T_82580:
   2753 	case WM_T_I350:
   2754 	case WM_T_I354:
   2755 	case WM_T_I210:
   2756 	case WM_T_I211:
   2757 	case WM_T_80003:
   2758 	case WM_T_ICH9:
   2759 	case WM_T_ICH10:
   2760 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2761 	case WM_T_PCH_LPT:
   2762 	case WM_T_PCH_SPT:
   2763 	case WM_T_PCH_CNP:
   2764 		/* XXX limited to 9234 */
   2765 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2766 		break;
   2767 	case WM_T_PCH:
   2768 		/* XXX limited to 4096 */
   2769 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2770 		break;
   2771 	case WM_T_82542_2_0:
   2772 	case WM_T_82542_2_1:
   2773 	case WM_T_ICH8:
   2774 		/* No support for jumbo frame */
   2775 		break;
   2776 	default:
   2777 		/* ETHER_MAX_LEN_JUMBO */
   2778 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2779 		break;
   2780 	}
   2781 
   2782 	/* If we're a i82543 or greater, we can support VLANs. */
   2783 	if (sc->sc_type >= WM_T_82543)
   2784 		sc->sc_ethercom.ec_capabilities |=
   2785 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2786 
   2787 	/*
   2788 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2789 	 * on i82543 and later.
   2790 	 */
   2791 	if (sc->sc_type >= WM_T_82543) {
   2792 		ifp->if_capabilities |=
   2793 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2794 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2795 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2796 		    IFCAP_CSUM_TCPv6_Tx |
   2797 		    IFCAP_CSUM_UDPv6_Tx;
   2798 	}
   2799 
   2800 	/*
   2801 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2802 	 *
   2803 	 *	82541GI (8086:1076) ... no
   2804 	 *	82572EI (8086:10b9) ... yes
   2805 	 */
   2806 	if (sc->sc_type >= WM_T_82571) {
   2807 		ifp->if_capabilities |=
   2808 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2809 	}
   2810 
   2811 	/*
   2812 	 * If we're a i82544 or greater (except i82547), we can do
   2813 	 * TCP segmentation offload.
   2814 	 */
   2815 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2816 		ifp->if_capabilities |= IFCAP_TSOv4;
   2817 	}
   2818 
   2819 	if (sc->sc_type >= WM_T_82571) {
   2820 		ifp->if_capabilities |= IFCAP_TSOv6;
   2821 	}
   2822 
   2823 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2824 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2825 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2826 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2827 
   2828 #ifdef WM_MPSAFE
   2829 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2830 #else
   2831 	sc->sc_core_lock = NULL;
   2832 #endif
   2833 
   2834 	/* Attach the interface. */
   2835 	error = if_initialize(ifp);
   2836 	if (error != 0) {
   2837 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2838 		    error);
   2839 		return; /* Error */
   2840 	}
   2841 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2842 	ether_ifattach(ifp, enaddr);
   2843 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2844 	if_register(ifp);
   2845 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2846 	    RND_FLAG_DEFAULT);
   2847 
   2848 #ifdef WM_EVENT_COUNTERS
   2849 	/* Attach event counters. */
   2850 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2851 	    NULL, xname, "linkintr");
   2852 
   2853 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2854 	    NULL, xname, "tx_xoff");
   2855 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2856 	    NULL, xname, "tx_xon");
   2857 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2858 	    NULL, xname, "rx_xoff");
   2859 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2860 	    NULL, xname, "rx_xon");
   2861 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2862 	    NULL, xname, "rx_macctl");
   2863 #endif /* WM_EVENT_COUNTERS */
   2864 
   2865 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2866 		pmf_class_network_register(self, ifp);
   2867 	else
   2868 		aprint_error_dev(self, "couldn't establish power handler\n");
   2869 
   2870 	sc->sc_flags |= WM_F_ATTACHED;
   2871  out:
   2872 	return;
   2873 }
   2874 
   2875 /* The detach function (ca_detach) */
   2876 static int
   2877 wm_detach(device_t self, int flags __unused)
   2878 {
   2879 	struct wm_softc *sc = device_private(self);
   2880 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2881 	int i;
   2882 
   2883 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2884 		return 0;
   2885 
   2886 	/* Stop the interface. Callouts are stopped in it. */
   2887 	wm_stop(ifp, 1);
   2888 
   2889 	pmf_device_deregister(self);
   2890 
   2891 #ifdef WM_EVENT_COUNTERS
   2892 	evcnt_detach(&sc->sc_ev_linkintr);
   2893 
   2894 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2895 	evcnt_detach(&sc->sc_ev_tx_xon);
   2896 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2897 	evcnt_detach(&sc->sc_ev_rx_xon);
   2898 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2899 #endif /* WM_EVENT_COUNTERS */
   2900 
   2901 	/* Tell the firmware about the release */
   2902 	WM_CORE_LOCK(sc);
   2903 	wm_release_manageability(sc);
   2904 	wm_release_hw_control(sc);
   2905 	wm_enable_wakeup(sc);
   2906 	WM_CORE_UNLOCK(sc);
   2907 
   2908 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2909 
   2910 	/* Delete all remaining media. */
   2911 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2912 
   2913 	ether_ifdetach(ifp);
   2914 	if_detach(ifp);
   2915 	if_percpuq_destroy(sc->sc_ipq);
   2916 
   2917 	/* Unload RX dmamaps and free mbufs */
   2918 	for (i = 0; i < sc->sc_nqueues; i++) {
   2919 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2920 		mutex_enter(rxq->rxq_lock);
   2921 		wm_rxdrain(rxq);
   2922 		mutex_exit(rxq->rxq_lock);
   2923 	}
   2924 	/* Must unlock here */
   2925 
   2926 	/* Disestablish the interrupt handler */
   2927 	for (i = 0; i < sc->sc_nintrs; i++) {
   2928 		if (sc->sc_ihs[i] != NULL) {
   2929 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2930 			sc->sc_ihs[i] = NULL;
   2931 		}
   2932 	}
   2933 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2934 
   2935 	wm_free_txrx_queues(sc);
   2936 
   2937 	/* Unmap the registers */
   2938 	if (sc->sc_ss) {
   2939 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2940 		sc->sc_ss = 0;
   2941 	}
   2942 	if (sc->sc_ios) {
   2943 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2944 		sc->sc_ios = 0;
   2945 	}
   2946 	if (sc->sc_flashs) {
   2947 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2948 		sc->sc_flashs = 0;
   2949 	}
   2950 
   2951 	if (sc->sc_core_lock)
   2952 		mutex_obj_free(sc->sc_core_lock);
   2953 	if (sc->sc_ich_phymtx)
   2954 		mutex_obj_free(sc->sc_ich_phymtx);
   2955 	if (sc->sc_ich_nvmmtx)
   2956 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2957 
   2958 	return 0;
   2959 }
   2960 
   2961 static bool
   2962 wm_suspend(device_t self, const pmf_qual_t *qual)
   2963 {
   2964 	struct wm_softc *sc = device_private(self);
   2965 
   2966 	wm_release_manageability(sc);
   2967 	wm_release_hw_control(sc);
   2968 	wm_enable_wakeup(sc);
   2969 
   2970 	return true;
   2971 }
   2972 
   2973 static bool
   2974 wm_resume(device_t self, const pmf_qual_t *qual)
   2975 {
   2976 	struct wm_softc *sc = device_private(self);
   2977 
   2978 	/* Disable ASPM L0s and/or L1 for workaround */
   2979 	wm_disable_aspm(sc);
   2980 	wm_init_manageability(sc);
   2981 
   2982 	return true;
   2983 }
   2984 
   2985 /*
   2986  * wm_watchdog:		[ifnet interface function]
   2987  *
   2988  *	Watchdog timer handler.
   2989  */
   2990 static void
   2991 wm_watchdog(struct ifnet *ifp)
   2992 {
   2993 	int qid;
   2994 	struct wm_softc *sc = ifp->if_softc;
   2995 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   2996 
   2997 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2998 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2999 
   3000 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3001 	}
   3002 
   3003 	/*
   3004 	 * IF any of queues hanged up, reset the interface.
   3005 	 */
   3006 	if (hang_queue != 0) {
   3007 		(void) wm_init(ifp);
   3008 
   3009 		/*
   3010 		 * There are still some upper layer processing which call
   3011 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3012 		 */
   3013 		/* Try to get more packets going. */
   3014 		ifp->if_start(ifp);
   3015 	}
   3016 }
   3017 
   3018 
   3019 static void
   3020 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3021 {
   3022 
   3023 	mutex_enter(txq->txq_lock);
   3024 	if (txq->txq_sending &&
   3025 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
   3026 		wm_watchdog_txq_locked(ifp, txq, hang);
   3027 	}
   3028 	mutex_exit(txq->txq_lock);
   3029 }
   3030 
   3031 static void
   3032 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3033     uint16_t *hang)
   3034 {
   3035 	struct wm_softc *sc = ifp->if_softc;
   3036 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3037 
   3038 	KASSERT(mutex_owned(txq->txq_lock));
   3039 
   3040 	/*
   3041 	 * Since we're using delayed interrupts, sweep up
   3042 	 * before we report an error.
   3043 	 */
   3044 	wm_txeof(txq, UINT_MAX);
   3045 
   3046 	if (txq->txq_sending)
   3047 		*hang |= __BIT(wmq->wmq_id);
   3048 
   3049 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3050 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3051 		    device_xname(sc->sc_dev));
   3052 	} else {
   3053 #ifdef WM_DEBUG
   3054 		int i, j;
   3055 		struct wm_txsoft *txs;
   3056 #endif
   3057 		log(LOG_ERR,
   3058 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3059 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3060 		    txq->txq_next);
   3061 		ifp->if_oerrors++;
   3062 #ifdef WM_DEBUG
   3063 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3064 		    i = WM_NEXTTXS(txq, i)) {
   3065 		    txs = &txq->txq_soft[i];
   3066 		    printf("txs %d tx %d -> %d\n",
   3067 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3068 		    for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3069 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3070 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3071 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3072 				    printf("\t %#08x%08x\n",
   3073 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3074 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3075 			    } else {
   3076 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3077 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3078 					txq->txq_descs[j].wtx_addr.wa_low);
   3079 				    printf("\t %#04x%02x%02x%08x\n",
   3080 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3081 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3082 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3083 					txq->txq_descs[j].wtx_cmdlen);
   3084 			    }
   3085 			if (j == txs->txs_lastdesc)
   3086 				break;
   3087 			}
   3088 		}
   3089 #endif
   3090 	}
   3091 }
   3092 
   3093 /*
   3094  * wm_tick:
   3095  *
   3096  *	One second timer, used to check link status, sweep up
   3097  *	completed transmit jobs, etc.
   3098  */
   3099 static void
   3100 wm_tick(void *arg)
   3101 {
   3102 	struct wm_softc *sc = arg;
   3103 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3104 #ifndef WM_MPSAFE
   3105 	int s = splnet();
   3106 #endif
   3107 
   3108 	WM_CORE_LOCK(sc);
   3109 
   3110 	if (sc->sc_core_stopping) {
   3111 		WM_CORE_UNLOCK(sc);
   3112 #ifndef WM_MPSAFE
   3113 		splx(s);
   3114 #endif
   3115 		return;
   3116 	}
   3117 
   3118 	if (sc->sc_type >= WM_T_82542_2_1) {
   3119 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3120 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3121 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3122 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3123 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3124 	}
   3125 
   3126 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3127 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3128 	    + CSR_READ(sc, WMREG_CRCERRS)
   3129 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3130 	    + CSR_READ(sc, WMREG_SYMERRC)
   3131 	    + CSR_READ(sc, WMREG_RXERRC)
   3132 	    + CSR_READ(sc, WMREG_SEC)
   3133 	    + CSR_READ(sc, WMREG_CEXTERR)
   3134 	    + CSR_READ(sc, WMREG_RLEC);
   3135 	/*
   3136 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3137 	 * memory. It does not mean the number of dropped packet. Because
   3138 	 * ethernet controller can receive packets in such case if there is
   3139 	 * space in phy's FIFO.
   3140 	 *
   3141 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3142 	 * own EVCNT instead of if_iqdrops.
   3143 	 */
   3144 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3145 
   3146 	if (sc->sc_flags & WM_F_HAS_MII)
   3147 		mii_tick(&sc->sc_mii);
   3148 	else if ((sc->sc_type >= WM_T_82575)
   3149 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3150 		wm_serdes_tick(sc);
   3151 	else
   3152 		wm_tbi_tick(sc);
   3153 
   3154 	WM_CORE_UNLOCK(sc);
   3155 
   3156 	wm_watchdog(ifp);
   3157 
   3158 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3159 }
   3160 
   3161 static int
   3162 wm_ifflags_cb(struct ethercom *ec)
   3163 {
   3164 	struct ifnet *ifp = &ec->ec_if;
   3165 	struct wm_softc *sc = ifp->if_softc;
   3166 	int rc = 0;
   3167 
   3168 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3169 		device_xname(sc->sc_dev), __func__));
   3170 
   3171 	WM_CORE_LOCK(sc);
   3172 
   3173 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3174 	sc->sc_if_flags = ifp->if_flags;
   3175 
   3176 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3177 		rc = ENETRESET;
   3178 		goto out;
   3179 	}
   3180 
   3181 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3182 		wm_set_filter(sc);
   3183 
   3184 	wm_set_vlan(sc);
   3185 
   3186 out:
   3187 	WM_CORE_UNLOCK(sc);
   3188 
   3189 	return rc;
   3190 }
   3191 
   3192 /*
   3193  * wm_ioctl:		[ifnet interface function]
   3194  *
   3195  *	Handle control requests from the operator.
   3196  */
   3197 static int
   3198 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3199 {
   3200 	struct wm_softc *sc = ifp->if_softc;
   3201 	struct ifreq *ifr = (struct ifreq *) data;
   3202 	struct ifaddr *ifa = (struct ifaddr *)data;
   3203 	struct sockaddr_dl *sdl;
   3204 	int s, error;
   3205 
   3206 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3207 		device_xname(sc->sc_dev), __func__));
   3208 
   3209 #ifndef WM_MPSAFE
   3210 	s = splnet();
   3211 #endif
   3212 	switch (cmd) {
   3213 	case SIOCSIFMEDIA:
   3214 	case SIOCGIFMEDIA:
   3215 		WM_CORE_LOCK(sc);
   3216 		/* Flow control requires full-duplex mode. */
   3217 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3218 		    (ifr->ifr_media & IFM_FDX) == 0)
   3219 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3220 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3221 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3222 				/* We can do both TXPAUSE and RXPAUSE. */
   3223 				ifr->ifr_media |=
   3224 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3225 			}
   3226 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3227 		}
   3228 		WM_CORE_UNLOCK(sc);
   3229 #ifdef WM_MPSAFE
   3230 		s = splnet();
   3231 #endif
   3232 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3233 #ifdef WM_MPSAFE
   3234 		splx(s);
   3235 #endif
   3236 		break;
   3237 	case SIOCINITIFADDR:
   3238 		WM_CORE_LOCK(sc);
   3239 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3240 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3241 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3242 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3243 			/* unicast address is first multicast entry */
   3244 			wm_set_filter(sc);
   3245 			error = 0;
   3246 			WM_CORE_UNLOCK(sc);
   3247 			break;
   3248 		}
   3249 		WM_CORE_UNLOCK(sc);
   3250 		/*FALLTHROUGH*/
   3251 	default:
   3252 #ifdef WM_MPSAFE
   3253 		s = splnet();
   3254 #endif
   3255 		/* It may call wm_start, so unlock here */
   3256 		error = ether_ioctl(ifp, cmd, data);
   3257 #ifdef WM_MPSAFE
   3258 		splx(s);
   3259 #endif
   3260 		if (error != ENETRESET)
   3261 			break;
   3262 
   3263 		error = 0;
   3264 
   3265 		if (cmd == SIOCSIFCAP)
   3266 			error = (*ifp->if_init)(ifp);
   3267 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3268 			;
   3269 		else if (ifp->if_flags & IFF_RUNNING) {
   3270 			/*
   3271 			 * Multicast list has changed; set the hardware filter
   3272 			 * accordingly.
   3273 			 */
   3274 			WM_CORE_LOCK(sc);
   3275 			wm_set_filter(sc);
   3276 			WM_CORE_UNLOCK(sc);
   3277 		}
   3278 		break;
   3279 	}
   3280 
   3281 #ifndef WM_MPSAFE
   3282 	splx(s);
   3283 #endif
   3284 	return error;
   3285 }
   3286 
   3287 /* MAC address related */
   3288 
   3289 /*
   3290  * Get the offset of MAC address and return it.
   3291  * If error occured, use offset 0.
   3292  */
   3293 static uint16_t
   3294 wm_check_alt_mac_addr(struct wm_softc *sc)
   3295 {
   3296 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3297 	uint16_t offset = NVM_OFF_MACADDR;
   3298 
   3299 	/* Try to read alternative MAC address pointer */
   3300 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3301 		return 0;
   3302 
   3303 	/* Check pointer if it's valid or not. */
   3304 	if ((offset == 0x0000) || (offset == 0xffff))
   3305 		return 0;
   3306 
   3307 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3308 	/*
   3309 	 * Check whether alternative MAC address is valid or not.
   3310 	 * Some cards have non 0xffff pointer but those don't use
   3311 	 * alternative MAC address in reality.
   3312 	 *
   3313 	 * Check whether the broadcast bit is set or not.
   3314 	 */
   3315 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3316 		if (((myea[0] & 0xff) & 0x01) == 0)
   3317 			return offset; /* Found */
   3318 
   3319 	/* Not found */
   3320 	return 0;
   3321 }
   3322 
   3323 static int
   3324 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3325 {
   3326 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3327 	uint16_t offset = NVM_OFF_MACADDR;
   3328 	int do_invert = 0;
   3329 
   3330 	switch (sc->sc_type) {
   3331 	case WM_T_82580:
   3332 	case WM_T_I350:
   3333 	case WM_T_I354:
   3334 		/* EEPROM Top Level Partitioning */
   3335 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3336 		break;
   3337 	case WM_T_82571:
   3338 	case WM_T_82575:
   3339 	case WM_T_82576:
   3340 	case WM_T_80003:
   3341 	case WM_T_I210:
   3342 	case WM_T_I211:
   3343 		offset = wm_check_alt_mac_addr(sc);
   3344 		if (offset == 0)
   3345 			if ((sc->sc_funcid & 0x01) == 1)
   3346 				do_invert = 1;
   3347 		break;
   3348 	default:
   3349 		if ((sc->sc_funcid & 0x01) == 1)
   3350 			do_invert = 1;
   3351 		break;
   3352 	}
   3353 
   3354 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3355 		goto bad;
   3356 
   3357 	enaddr[0] = myea[0] & 0xff;
   3358 	enaddr[1] = myea[0] >> 8;
   3359 	enaddr[2] = myea[1] & 0xff;
   3360 	enaddr[3] = myea[1] >> 8;
   3361 	enaddr[4] = myea[2] & 0xff;
   3362 	enaddr[5] = myea[2] >> 8;
   3363 
   3364 	/*
   3365 	 * Toggle the LSB of the MAC address on the second port
   3366 	 * of some dual port cards.
   3367 	 */
   3368 	if (do_invert != 0)
   3369 		enaddr[5] ^= 1;
   3370 
   3371 	return 0;
   3372 
   3373  bad:
   3374 	return -1;
   3375 }
   3376 
   3377 /*
   3378  * wm_set_ral:
   3379  *
   3380  *	Set an entery in the receive address list.
   3381  */
   3382 static void
   3383 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3384 {
   3385 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3386 	uint32_t wlock_mac;
   3387 	int rv;
   3388 
   3389 	if (enaddr != NULL) {
   3390 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3391 		    (enaddr[3] << 24);
   3392 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3393 		ral_hi |= RAL_AV;
   3394 	} else {
   3395 		ral_lo = 0;
   3396 		ral_hi = 0;
   3397 	}
   3398 
   3399 	switch (sc->sc_type) {
   3400 	case WM_T_82542_2_0:
   3401 	case WM_T_82542_2_1:
   3402 	case WM_T_82543:
   3403 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3404 		CSR_WRITE_FLUSH(sc);
   3405 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3406 		CSR_WRITE_FLUSH(sc);
   3407 		break;
   3408 	case WM_T_PCH2:
   3409 	case WM_T_PCH_LPT:
   3410 	case WM_T_PCH_SPT:
   3411 	case WM_T_PCH_CNP:
   3412 		if (idx == 0) {
   3413 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3414 			CSR_WRITE_FLUSH(sc);
   3415 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3416 			CSR_WRITE_FLUSH(sc);
   3417 			return;
   3418 		}
   3419 		if (sc->sc_type != WM_T_PCH2) {
   3420 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3421 			    FWSM_WLOCK_MAC);
   3422 			addrl = WMREG_SHRAL(idx - 1);
   3423 			addrh = WMREG_SHRAH(idx - 1);
   3424 		} else {
   3425 			wlock_mac = 0;
   3426 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3427 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3428 		}
   3429 
   3430 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3431 			rv = wm_get_swflag_ich8lan(sc);
   3432 			if (rv != 0)
   3433 				return;
   3434 			CSR_WRITE(sc, addrl, ral_lo);
   3435 			CSR_WRITE_FLUSH(sc);
   3436 			CSR_WRITE(sc, addrh, ral_hi);
   3437 			CSR_WRITE_FLUSH(sc);
   3438 			wm_put_swflag_ich8lan(sc);
   3439 		}
   3440 
   3441 		break;
   3442 	default:
   3443 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3444 		CSR_WRITE_FLUSH(sc);
   3445 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3446 		CSR_WRITE_FLUSH(sc);
   3447 		break;
   3448 	}
   3449 }
   3450 
   3451 /*
   3452  * wm_mchash:
   3453  *
   3454  *	Compute the hash of the multicast address for the 4096-bit
   3455  *	multicast filter.
   3456  */
   3457 static uint32_t
   3458 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3459 {
   3460 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3461 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3462 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3463 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3464 	uint32_t hash;
   3465 
   3466 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3467 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3468 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3469 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3470 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3471 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3472 		return (hash & 0x3ff);
   3473 	}
   3474 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3475 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3476 
   3477 	return (hash & 0xfff);
   3478 }
   3479 
   3480 /*
   3481  * wm_set_filter:
   3482  *
   3483  *	Set up the receive filter.
   3484  */
   3485 static void
   3486 wm_set_filter(struct wm_softc *sc)
   3487 {
   3488 	struct ethercom *ec = &sc->sc_ethercom;
   3489 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3490 	struct ether_multi *enm;
   3491 	struct ether_multistep step;
   3492 	bus_addr_t mta_reg;
   3493 	uint32_t hash, reg, bit;
   3494 	int i, size, ralmax;
   3495 
   3496 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3497 		device_xname(sc->sc_dev), __func__));
   3498 
   3499 	if (sc->sc_type >= WM_T_82544)
   3500 		mta_reg = WMREG_CORDOVA_MTA;
   3501 	else
   3502 		mta_reg = WMREG_MTA;
   3503 
   3504 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3505 
   3506 	if (ifp->if_flags & IFF_BROADCAST)
   3507 		sc->sc_rctl |= RCTL_BAM;
   3508 	if (ifp->if_flags & IFF_PROMISC) {
   3509 		sc->sc_rctl |= RCTL_UPE;
   3510 		goto allmulti;
   3511 	}
   3512 
   3513 	/*
   3514 	 * Set the station address in the first RAL slot, and
   3515 	 * clear the remaining slots.
   3516 	 */
   3517 	if (sc->sc_type == WM_T_ICH8)
   3518 		size = WM_RAL_TABSIZE_ICH8 -1;
   3519 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3520 	    || (sc->sc_type == WM_T_PCH))
   3521 		size = WM_RAL_TABSIZE_ICH8;
   3522 	else if (sc->sc_type == WM_T_PCH2)
   3523 		size = WM_RAL_TABSIZE_PCH2;
   3524 	else if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3525 	    || (sc->sc_type == WM_T_PCH_CNP))
   3526 		size = WM_RAL_TABSIZE_PCH_LPT;
   3527 	else if (sc->sc_type == WM_T_82575)
   3528 		size = WM_RAL_TABSIZE_82575;
   3529 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3530 		size = WM_RAL_TABSIZE_82576;
   3531 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3532 		size = WM_RAL_TABSIZE_I350;
   3533 	else
   3534 		size = WM_RAL_TABSIZE;
   3535 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3536 
   3537 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3538 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3539 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3540 		switch (i) {
   3541 		case 0:
   3542 			/* We can use all entries */
   3543 			ralmax = size;
   3544 			break;
   3545 		case 1:
   3546 			/* Only RAR[0] */
   3547 			ralmax = 1;
   3548 			break;
   3549 		default:
   3550 			/* available SHRA + RAR[0] */
   3551 			ralmax = i + 1;
   3552 		}
   3553 	} else
   3554 		ralmax = size;
   3555 	for (i = 1; i < size; i++) {
   3556 		if (i < ralmax)
   3557 			wm_set_ral(sc, NULL, i);
   3558 	}
   3559 
   3560 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3561 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3562 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3563 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3564 		size = WM_ICH8_MC_TABSIZE;
   3565 	else
   3566 		size = WM_MC_TABSIZE;
   3567 	/* Clear out the multicast table. */
   3568 	for (i = 0; i < size; i++) {
   3569 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3570 		CSR_WRITE_FLUSH(sc);
   3571 	}
   3572 
   3573 	ETHER_LOCK(ec);
   3574 	ETHER_FIRST_MULTI(step, ec, enm);
   3575 	while (enm != NULL) {
   3576 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3577 			ETHER_UNLOCK(ec);
   3578 			/*
   3579 			 * We must listen to a range of multicast addresses.
   3580 			 * For now, just accept all multicasts, rather than
   3581 			 * trying to set only those filter bits needed to match
   3582 			 * the range.  (At this time, the only use of address
   3583 			 * ranges is for IP multicast routing, for which the
   3584 			 * range is big enough to require all bits set.)
   3585 			 */
   3586 			goto allmulti;
   3587 		}
   3588 
   3589 		hash = wm_mchash(sc, enm->enm_addrlo);
   3590 
   3591 		reg = (hash >> 5);
   3592 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3593 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3594 		    || (sc->sc_type == WM_T_PCH2)
   3595 		    || (sc->sc_type == WM_T_PCH_LPT)
   3596 		    || (sc->sc_type == WM_T_PCH_SPT)
   3597 		    || (sc->sc_type == WM_T_PCH_CNP))
   3598 			reg &= 0x1f;
   3599 		else
   3600 			reg &= 0x7f;
   3601 		bit = hash & 0x1f;
   3602 
   3603 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3604 		hash |= 1U << bit;
   3605 
   3606 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3607 			/*
   3608 			 * 82544 Errata 9: Certain register cannot be written
   3609 			 * with particular alignments in PCI-X bus operation
   3610 			 * (FCAH, MTA and VFTA).
   3611 			 */
   3612 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3613 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3614 			CSR_WRITE_FLUSH(sc);
   3615 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3616 			CSR_WRITE_FLUSH(sc);
   3617 		} else {
   3618 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3619 			CSR_WRITE_FLUSH(sc);
   3620 		}
   3621 
   3622 		ETHER_NEXT_MULTI(step, enm);
   3623 	}
   3624 	ETHER_UNLOCK(ec);
   3625 
   3626 	ifp->if_flags &= ~IFF_ALLMULTI;
   3627 	goto setit;
   3628 
   3629  allmulti:
   3630 	ifp->if_flags |= IFF_ALLMULTI;
   3631 	sc->sc_rctl |= RCTL_MPE;
   3632 
   3633  setit:
   3634 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3635 }
   3636 
   3637 /* Reset and init related */
   3638 
   3639 static void
   3640 wm_set_vlan(struct wm_softc *sc)
   3641 {
   3642 
   3643 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3644 		device_xname(sc->sc_dev), __func__));
   3645 
   3646 	/* Deal with VLAN enables. */
   3647 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3648 		sc->sc_ctrl |= CTRL_VME;
   3649 	else
   3650 		sc->sc_ctrl &= ~CTRL_VME;
   3651 
   3652 	/* Write the control registers. */
   3653 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3654 }
   3655 
   3656 static void
   3657 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3658 {
   3659 	uint32_t gcr;
   3660 	pcireg_t ctrl2;
   3661 
   3662 	gcr = CSR_READ(sc, WMREG_GCR);
   3663 
   3664 	/* Only take action if timeout value is defaulted to 0 */
   3665 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3666 		goto out;
   3667 
   3668 	if ((gcr & GCR_CAP_VER2) == 0) {
   3669 		gcr |= GCR_CMPL_TMOUT_10MS;
   3670 		goto out;
   3671 	}
   3672 
   3673 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3674 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3675 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3676 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3677 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3678 
   3679 out:
   3680 	/* Disable completion timeout resend */
   3681 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3682 
   3683 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3684 }
   3685 
   3686 void
   3687 wm_get_auto_rd_done(struct wm_softc *sc)
   3688 {
   3689 	int i;
   3690 
   3691 	/* wait for eeprom to reload */
   3692 	switch (sc->sc_type) {
   3693 	case WM_T_82571:
   3694 	case WM_T_82572:
   3695 	case WM_T_82573:
   3696 	case WM_T_82574:
   3697 	case WM_T_82583:
   3698 	case WM_T_82575:
   3699 	case WM_T_82576:
   3700 	case WM_T_82580:
   3701 	case WM_T_I350:
   3702 	case WM_T_I354:
   3703 	case WM_T_I210:
   3704 	case WM_T_I211:
   3705 	case WM_T_80003:
   3706 	case WM_T_ICH8:
   3707 	case WM_T_ICH9:
   3708 		for (i = 0; i < 10; i++) {
   3709 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3710 				break;
   3711 			delay(1000);
   3712 		}
   3713 		if (i == 10) {
   3714 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3715 			    "complete\n", device_xname(sc->sc_dev));
   3716 		}
   3717 		break;
   3718 	default:
   3719 		break;
   3720 	}
   3721 }
   3722 
   3723 void
   3724 wm_lan_init_done(struct wm_softc *sc)
   3725 {
   3726 	uint32_t reg = 0;
   3727 	int i;
   3728 
   3729 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3730 		device_xname(sc->sc_dev), __func__));
   3731 
   3732 	/* Wait for eeprom to reload */
   3733 	switch (sc->sc_type) {
   3734 	case WM_T_ICH10:
   3735 	case WM_T_PCH:
   3736 	case WM_T_PCH2:
   3737 	case WM_T_PCH_LPT:
   3738 	case WM_T_PCH_SPT:
   3739 	case WM_T_PCH_CNP:
   3740 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3741 			reg = CSR_READ(sc, WMREG_STATUS);
   3742 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3743 				break;
   3744 			delay(100);
   3745 		}
   3746 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3747 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3748 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3749 		}
   3750 		break;
   3751 	default:
   3752 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3753 		    __func__);
   3754 		break;
   3755 	}
   3756 
   3757 	reg &= ~STATUS_LAN_INIT_DONE;
   3758 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3759 }
   3760 
   3761 void
   3762 wm_get_cfg_done(struct wm_softc *sc)
   3763 {
   3764 	int mask;
   3765 	uint32_t reg;
   3766 	int i;
   3767 
   3768 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3769 		device_xname(sc->sc_dev), __func__));
   3770 
   3771 	/* Wait for eeprom to reload */
   3772 	switch (sc->sc_type) {
   3773 	case WM_T_82542_2_0:
   3774 	case WM_T_82542_2_1:
   3775 		/* null */
   3776 		break;
   3777 	case WM_T_82543:
   3778 	case WM_T_82544:
   3779 	case WM_T_82540:
   3780 	case WM_T_82545:
   3781 	case WM_T_82545_3:
   3782 	case WM_T_82546:
   3783 	case WM_T_82546_3:
   3784 	case WM_T_82541:
   3785 	case WM_T_82541_2:
   3786 	case WM_T_82547:
   3787 	case WM_T_82547_2:
   3788 	case WM_T_82573:
   3789 	case WM_T_82574:
   3790 	case WM_T_82583:
   3791 		/* generic */
   3792 		delay(10*1000);
   3793 		break;
   3794 	case WM_T_80003:
   3795 	case WM_T_82571:
   3796 	case WM_T_82572:
   3797 	case WM_T_82575:
   3798 	case WM_T_82576:
   3799 	case WM_T_82580:
   3800 	case WM_T_I350:
   3801 	case WM_T_I354:
   3802 	case WM_T_I210:
   3803 	case WM_T_I211:
   3804 		if (sc->sc_type == WM_T_82571) {
   3805 			/* Only 82571 shares port 0 */
   3806 			mask = EEMNGCTL_CFGDONE_0;
   3807 		} else
   3808 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3809 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3810 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3811 				break;
   3812 			delay(1000);
   3813 		}
   3814 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3815 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3816 				device_xname(sc->sc_dev), __func__));
   3817 		}
   3818 		break;
   3819 	case WM_T_ICH8:
   3820 	case WM_T_ICH9:
   3821 	case WM_T_ICH10:
   3822 	case WM_T_PCH:
   3823 	case WM_T_PCH2:
   3824 	case WM_T_PCH_LPT:
   3825 	case WM_T_PCH_SPT:
   3826 	case WM_T_PCH_CNP:
   3827 		delay(10*1000);
   3828 		if (sc->sc_type >= WM_T_ICH10)
   3829 			wm_lan_init_done(sc);
   3830 		else
   3831 			wm_get_auto_rd_done(sc);
   3832 
   3833 		/* Clear PHY Reset Asserted bit */
   3834 		reg = CSR_READ(sc, WMREG_STATUS);
   3835 		if ((reg & STATUS_PHYRA) != 0)
   3836 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3837 		break;
   3838 	default:
   3839 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3840 		    __func__);
   3841 		break;
   3842 	}
   3843 }
   3844 
   3845 void
   3846 wm_phy_post_reset(struct wm_softc *sc)
   3847 {
   3848 	uint32_t reg;
   3849 
   3850 	/* This function is only for ICH8 and newer. */
   3851 	if (sc->sc_type < WM_T_ICH8)
   3852 		return;
   3853 
   3854 	if (wm_phy_resetisblocked(sc)) {
   3855 		/* XXX */
   3856 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3857 		return;
   3858 	}
   3859 
   3860 	/* Allow time for h/w to get to quiescent state after reset */
   3861 	delay(10*1000);
   3862 
   3863 	/* Perform any necessary post-reset workarounds */
   3864 	if (sc->sc_type == WM_T_PCH)
   3865 		wm_hv_phy_workaround_ich8lan(sc);
   3866 	else if (sc->sc_type == WM_T_PCH2)
   3867 		wm_lv_phy_workaround_ich8lan(sc);
   3868 
   3869 	/* Clear the host wakeup bit after lcd reset */
   3870 	if (sc->sc_type >= WM_T_PCH) {
   3871 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3872 		    BM_PORT_GEN_CFG);
   3873 		reg &= ~BM_WUC_HOST_WU_BIT;
   3874 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3875 		    BM_PORT_GEN_CFG, reg);
   3876 	}
   3877 
   3878 	/* Configure the LCD with the extended configuration region in NVM */
   3879 	wm_init_lcd_from_nvm(sc);
   3880 
   3881 	/* XXX Configure the LCD with the OEM bits in NVM */
   3882 
   3883 	if (sc->sc_type == WM_T_PCH2) {
   3884 		/* Ungate automatic PHY configuration on non-managed 82579 */
   3885 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   3886 			delay(10 * 1000);
   3887 			wm_gate_hw_phy_config_ich8lan(sc, false);
   3888 		}
   3889 		/* XXX Set EEE LPI Update Timer to 200usec */
   3890 	}
   3891 }
   3892 
   3893 /* Only for PCH and newer */
   3894 static int
   3895 wm_write_smbus_addr(struct wm_softc *sc)
   3896 {
   3897 	uint32_t strap, freq;
   3898 	uint16_t phy_data;
   3899 	int rv;
   3900 
   3901 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3902 		device_xname(sc->sc_dev), __func__));
   3903 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   3904 
   3905 	strap = CSR_READ(sc, WMREG_STRAP);
   3906 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3907 
   3908 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   3909 	if (rv != 0)
   3910 		return -1;
   3911 
   3912 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3913 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3914 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3915 
   3916 	if (sc->sc_phytype == WMPHY_I217) {
   3917 		/* Restore SMBus frequency */
   3918 		if (freq --) {
   3919 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3920 			    | HV_SMB_ADDR_FREQ_HIGH);
   3921 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3922 			    HV_SMB_ADDR_FREQ_LOW);
   3923 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3924 			    HV_SMB_ADDR_FREQ_HIGH);
   3925 		} else {
   3926 			DPRINTF(WM_DEBUG_INIT,
   3927 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3928 				device_xname(sc->sc_dev), __func__));
   3929 		}
   3930 	}
   3931 
   3932 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   3933 	    phy_data);
   3934 }
   3935 
   3936 void
   3937 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3938 {
   3939 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3940 	uint16_t phy_page = 0;
   3941 
   3942 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3943 		device_xname(sc->sc_dev), __func__));
   3944 
   3945 	switch (sc->sc_type) {
   3946 	case WM_T_ICH8:
   3947 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3948 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3949 			return;
   3950 
   3951 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3952 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3953 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3954 			break;
   3955 		}
   3956 		/* FALLTHROUGH */
   3957 	case WM_T_PCH:
   3958 	case WM_T_PCH2:
   3959 	case WM_T_PCH_LPT:
   3960 	case WM_T_PCH_SPT:
   3961 	case WM_T_PCH_CNP:
   3962 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3963 		break;
   3964 	default:
   3965 		return;
   3966 	}
   3967 
   3968 	sc->phy.acquire(sc);
   3969 
   3970 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3971 	if ((reg & sw_cfg_mask) == 0)
   3972 		goto release;
   3973 
   3974 	/*
   3975 	 * Make sure HW does not configure LCD from PHY extended configuration
   3976 	 * before SW configuration
   3977 	 */
   3978 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3979 	if ((sc->sc_type < WM_T_PCH2)
   3980 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3981 		goto release;
   3982 
   3983 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3984 		device_xname(sc->sc_dev), __func__));
   3985 	/* word_addr is in DWORD */
   3986 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3987 
   3988 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3989 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3990 	if (cnf_size == 0)
   3991 		goto release;
   3992 
   3993 	if (((sc->sc_type == WM_T_PCH)
   3994 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3995 	    || (sc->sc_type > WM_T_PCH)) {
   3996 		/*
   3997 		 * HW configures the SMBus address and LEDs when the OEM and
   3998 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3999 		 * are cleared, SW will configure them instead.
   4000 		 */
   4001 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4002 			device_xname(sc->sc_dev), __func__));
   4003 		wm_write_smbus_addr(sc);
   4004 
   4005 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4006 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   4007 	}
   4008 
   4009 	/* Configure LCD from extended configuration region. */
   4010 	for (i = 0; i < cnf_size; i++) {
   4011 		uint16_t reg_data, reg_addr;
   4012 
   4013 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4014 			goto release;
   4015 
   4016 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4017 			goto release;
   4018 
   4019 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4020 			phy_page = reg_data;
   4021 
   4022 		reg_addr &= IGPHY_MAXREGADDR;
   4023 		reg_addr |= phy_page;
   4024 
   4025 		KASSERT(sc->phy.writereg_locked != NULL);
   4026 		sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr, reg_data);
   4027 	}
   4028 
   4029 release:
   4030 	sc->phy.release(sc);
   4031 	return;
   4032 }
   4033 
   4034 
   4035 /* Init hardware bits */
   4036 void
   4037 wm_initialize_hardware_bits(struct wm_softc *sc)
   4038 {
   4039 	uint32_t tarc0, tarc1, reg;
   4040 
   4041 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4042 		device_xname(sc->sc_dev), __func__));
   4043 
   4044 	/* For 82571 variant, 80003 and ICHs */
   4045 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4046 	    || (sc->sc_type >= WM_T_80003)) {
   4047 
   4048 		/* Transmit Descriptor Control 0 */
   4049 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4050 		reg |= TXDCTL_COUNT_DESC;
   4051 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4052 
   4053 		/* Transmit Descriptor Control 1 */
   4054 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4055 		reg |= TXDCTL_COUNT_DESC;
   4056 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4057 
   4058 		/* TARC0 */
   4059 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4060 		switch (sc->sc_type) {
   4061 		case WM_T_82571:
   4062 		case WM_T_82572:
   4063 		case WM_T_82573:
   4064 		case WM_T_82574:
   4065 		case WM_T_82583:
   4066 		case WM_T_80003:
   4067 			/* Clear bits 30..27 */
   4068 			tarc0 &= ~__BITS(30, 27);
   4069 			break;
   4070 		default:
   4071 			break;
   4072 		}
   4073 
   4074 		switch (sc->sc_type) {
   4075 		case WM_T_82571:
   4076 		case WM_T_82572:
   4077 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4078 
   4079 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4080 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4081 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4082 			/* 8257[12] Errata No.7 */
   4083 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4084 
   4085 			/* TARC1 bit 28 */
   4086 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4087 				tarc1 &= ~__BIT(28);
   4088 			else
   4089 				tarc1 |= __BIT(28);
   4090 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4091 
   4092 			/*
   4093 			 * 8257[12] Errata No.13
   4094 			 * Disable Dyamic Clock Gating.
   4095 			 */
   4096 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4097 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4098 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4099 			break;
   4100 		case WM_T_82573:
   4101 		case WM_T_82574:
   4102 		case WM_T_82583:
   4103 			if ((sc->sc_type == WM_T_82574)
   4104 			    || (sc->sc_type == WM_T_82583))
   4105 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4106 
   4107 			/* Extended Device Control */
   4108 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4109 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4110 			reg |= __BIT(22);	/* Set bit 22 */
   4111 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4112 
   4113 			/* Device Control */
   4114 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4115 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4116 
   4117 			/* PCIe Control Register */
   4118 			/*
   4119 			 * 82573 Errata (unknown).
   4120 			 *
   4121 			 * 82574 Errata 25 and 82583 Errata 12
   4122 			 * "Dropped Rx Packets":
   4123 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4124 			 */
   4125 			reg = CSR_READ(sc, WMREG_GCR);
   4126 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4127 			CSR_WRITE(sc, WMREG_GCR, reg);
   4128 
   4129 			if ((sc->sc_type == WM_T_82574)
   4130 			    || (sc->sc_type == WM_T_82583)) {
   4131 				/*
   4132 				 * Document says this bit must be set for
   4133 				 * proper operation.
   4134 				 */
   4135 				reg = CSR_READ(sc, WMREG_GCR);
   4136 				reg |= __BIT(22);
   4137 				CSR_WRITE(sc, WMREG_GCR, reg);
   4138 
   4139 				/*
   4140 				 * Apply workaround for hardware errata
   4141 				 * documented in errata docs Fixes issue where
   4142 				 * some error prone or unreliable PCIe
   4143 				 * completions are occurring, particularly
   4144 				 * with ASPM enabled. Without fix, issue can
   4145 				 * cause Tx timeouts.
   4146 				 */
   4147 				reg = CSR_READ(sc, WMREG_GCR2);
   4148 				reg |= __BIT(0);
   4149 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4150 			}
   4151 			break;
   4152 		case WM_T_80003:
   4153 			/* TARC0 */
   4154 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4155 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4156 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4157 
   4158 			/* TARC1 bit 28 */
   4159 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4160 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4161 				tarc1 &= ~__BIT(28);
   4162 			else
   4163 				tarc1 |= __BIT(28);
   4164 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4165 			break;
   4166 		case WM_T_ICH8:
   4167 		case WM_T_ICH9:
   4168 		case WM_T_ICH10:
   4169 		case WM_T_PCH:
   4170 		case WM_T_PCH2:
   4171 		case WM_T_PCH_LPT:
   4172 		case WM_T_PCH_SPT:
   4173 		case WM_T_PCH_CNP:
   4174 			/* TARC0 */
   4175 			if (sc->sc_type == WM_T_ICH8) {
   4176 				/* Set TARC0 bits 29 and 28 */
   4177 				tarc0 |= __BITS(29, 28);
   4178 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4179 				tarc0 |= __BIT(29);
   4180 				/*
   4181 				 *  Drop bit 28. From Linux.
   4182 				 * See I218/I219 spec update
   4183 				 * "5. Buffer Overrun While the I219 is
   4184 				 * Processing DMA Transactions"
   4185 				 */
   4186 				tarc0 &= ~__BIT(28);
   4187 			}
   4188 			/* Set TARC0 bits 23,24,26,27 */
   4189 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4190 
   4191 			/* CTRL_EXT */
   4192 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4193 			reg |= __BIT(22);	/* Set bit 22 */
   4194 			/*
   4195 			 * Enable PHY low-power state when MAC is at D3
   4196 			 * w/o WoL
   4197 			 */
   4198 			if (sc->sc_type >= WM_T_PCH)
   4199 				reg |= CTRL_EXT_PHYPDEN;
   4200 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4201 
   4202 			/* TARC1 */
   4203 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4204 			/* bit 28 */
   4205 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4206 				tarc1 &= ~__BIT(28);
   4207 			else
   4208 				tarc1 |= __BIT(28);
   4209 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4210 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4211 
   4212 			/* Device Status */
   4213 			if (sc->sc_type == WM_T_ICH8) {
   4214 				reg = CSR_READ(sc, WMREG_STATUS);
   4215 				reg &= ~__BIT(31);
   4216 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4217 
   4218 			}
   4219 
   4220 			/* IOSFPC */
   4221 			if (sc->sc_type == WM_T_PCH_SPT) {
   4222 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4223 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4224 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4225 			}
   4226 			/*
   4227 			 * Work-around descriptor data corruption issue during
   4228 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4229 			 * capability.
   4230 			 */
   4231 			reg = CSR_READ(sc, WMREG_RFCTL);
   4232 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4233 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4234 			break;
   4235 		default:
   4236 			break;
   4237 		}
   4238 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4239 
   4240 		switch (sc->sc_type) {
   4241 		/*
   4242 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4243 		 * Avoid RSS Hash Value bug.
   4244 		 */
   4245 		case WM_T_82571:
   4246 		case WM_T_82572:
   4247 		case WM_T_82573:
   4248 		case WM_T_80003:
   4249 		case WM_T_ICH8:
   4250 			reg = CSR_READ(sc, WMREG_RFCTL);
   4251 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4252 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4253 			break;
   4254 		case WM_T_82574:
   4255 			/* use extened Rx descriptor. */
   4256 			reg = CSR_READ(sc, WMREG_RFCTL);
   4257 			reg |= WMREG_RFCTL_EXSTEN;
   4258 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4259 			break;
   4260 		default:
   4261 			break;
   4262 		}
   4263 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4264 		/*
   4265 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4266 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4267 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4268 		 * Correctly by the Device"
   4269 		 *
   4270 		 * I354(C2000) Errata AVR53:
   4271 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4272 		 * Hang"
   4273 		 */
   4274 		reg = CSR_READ(sc, WMREG_RFCTL);
   4275 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4276 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4277 	}
   4278 }
   4279 
   4280 static uint32_t
   4281 wm_rxpbs_adjust_82580(uint32_t val)
   4282 {
   4283 	uint32_t rv = 0;
   4284 
   4285 	if (val < __arraycount(wm_82580_rxpbs_table))
   4286 		rv = wm_82580_rxpbs_table[val];
   4287 
   4288 	return rv;
   4289 }
   4290 
   4291 /*
   4292  * wm_reset_phy:
   4293  *
   4294  *	generic PHY reset function.
   4295  *	Same as e1000_phy_hw_reset_generic()
   4296  */
   4297 static void
   4298 wm_reset_phy(struct wm_softc *sc)
   4299 {
   4300 	uint32_t reg;
   4301 
   4302 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4303 		device_xname(sc->sc_dev), __func__));
   4304 	if (wm_phy_resetisblocked(sc))
   4305 		return;
   4306 
   4307 	sc->phy.acquire(sc);
   4308 
   4309 	reg = CSR_READ(sc, WMREG_CTRL);
   4310 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4311 	CSR_WRITE_FLUSH(sc);
   4312 
   4313 	delay(sc->phy.reset_delay_us);
   4314 
   4315 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4316 	CSR_WRITE_FLUSH(sc);
   4317 
   4318 	delay(150);
   4319 
   4320 	sc->phy.release(sc);
   4321 
   4322 	wm_get_cfg_done(sc);
   4323 	wm_phy_post_reset(sc);
   4324 }
   4325 
   4326 /*
   4327  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4328  * so it is enough to check sc->sc_queue[0] only.
   4329  */
   4330 static void
   4331 wm_flush_desc_rings(struct wm_softc *sc)
   4332 {
   4333 	pcireg_t preg;
   4334 	uint32_t reg;
   4335 	struct wm_txqueue *txq;
   4336 	wiseman_txdesc_t *txd;
   4337 	int nexttx;
   4338 	uint32_t rctl;
   4339 
   4340 	/* First, disable MULR fix in FEXTNVM11 */
   4341 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4342 	reg |= FEXTNVM11_DIS_MULRFIX;
   4343 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4344 
   4345 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4346 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4347 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4348 		return;
   4349 
   4350 	/* TX */
   4351 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4352 	    device_xname(sc->sc_dev), preg, reg);
   4353 	reg = CSR_READ(sc, WMREG_TCTL);
   4354 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4355 
   4356 	txq = &sc->sc_queue[0].wmq_txq;
   4357 	nexttx = txq->txq_next;
   4358 	txd = &txq->txq_descs[nexttx];
   4359 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4360 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4361 	txd->wtx_fields.wtxu_status = 0;
   4362 	txd->wtx_fields.wtxu_options = 0;
   4363 	txd->wtx_fields.wtxu_vlan = 0;
   4364 
   4365 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4366 	    BUS_SPACE_BARRIER_WRITE);
   4367 
   4368 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4369 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4370 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4371 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4372 	delay(250);
   4373 
   4374 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4375 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4376 		return;
   4377 
   4378 	/* RX */
   4379 	printf("%s: Need RX flush (reg = %08x)\n",
   4380 	    device_xname(sc->sc_dev), preg);
   4381 	rctl = CSR_READ(sc, WMREG_RCTL);
   4382 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4383 	CSR_WRITE_FLUSH(sc);
   4384 	delay(150);
   4385 
   4386 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4387 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4388 	reg &= 0xffffc000;
   4389 	/*
   4390 	 * update thresholds: prefetch threshold to 31, host threshold
   4391 	 * to 1 and make sure the granularity is "descriptors" and not
   4392 	 * "cache lines"
   4393 	 */
   4394 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4395 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4396 
   4397 	/*
   4398 	 * momentarily enable the RX ring for the changes to take
   4399 	 * effect
   4400 	 */
   4401 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4402 	CSR_WRITE_FLUSH(sc);
   4403 	delay(150);
   4404 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4405 }
   4406 
   4407 /*
   4408  * wm_reset:
   4409  *
   4410  *	Reset the i82542 chip.
   4411  */
   4412 static void
   4413 wm_reset(struct wm_softc *sc)
   4414 {
   4415 	int phy_reset = 0;
   4416 	int i, error = 0;
   4417 	uint32_t reg;
   4418 	uint16_t kmreg;
   4419 	int rv;
   4420 
   4421 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4422 		device_xname(sc->sc_dev), __func__));
   4423 	KASSERT(sc->sc_type != 0);
   4424 
   4425 	/*
   4426 	 * Allocate on-chip memory according to the MTU size.
   4427 	 * The Packet Buffer Allocation register must be written
   4428 	 * before the chip is reset.
   4429 	 */
   4430 	switch (sc->sc_type) {
   4431 	case WM_T_82547:
   4432 	case WM_T_82547_2:
   4433 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4434 		    PBA_22K : PBA_30K;
   4435 		for (i = 0; i < sc->sc_nqueues; i++) {
   4436 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4437 			txq->txq_fifo_head = 0;
   4438 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4439 			txq->txq_fifo_size =
   4440 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4441 			txq->txq_fifo_stall = 0;
   4442 		}
   4443 		break;
   4444 	case WM_T_82571:
   4445 	case WM_T_82572:
   4446 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4447 	case WM_T_80003:
   4448 		sc->sc_pba = PBA_32K;
   4449 		break;
   4450 	case WM_T_82573:
   4451 		sc->sc_pba = PBA_12K;
   4452 		break;
   4453 	case WM_T_82574:
   4454 	case WM_T_82583:
   4455 		sc->sc_pba = PBA_20K;
   4456 		break;
   4457 	case WM_T_82576:
   4458 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4459 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4460 		break;
   4461 	case WM_T_82580:
   4462 	case WM_T_I350:
   4463 	case WM_T_I354:
   4464 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4465 		break;
   4466 	case WM_T_I210:
   4467 	case WM_T_I211:
   4468 		sc->sc_pba = PBA_34K;
   4469 		break;
   4470 	case WM_T_ICH8:
   4471 		/* Workaround for a bit corruption issue in FIFO memory */
   4472 		sc->sc_pba = PBA_8K;
   4473 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4474 		break;
   4475 	case WM_T_ICH9:
   4476 	case WM_T_ICH10:
   4477 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4478 		    PBA_14K : PBA_10K;
   4479 		break;
   4480 	case WM_T_PCH:
   4481 	case WM_T_PCH2:	/* XXX 14K? */
   4482 	case WM_T_PCH_LPT:
   4483 	case WM_T_PCH_SPT:
   4484 	case WM_T_PCH_CNP:
   4485 		sc->sc_pba = PBA_26K;
   4486 		break;
   4487 	default:
   4488 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4489 		    PBA_40K : PBA_48K;
   4490 		break;
   4491 	}
   4492 	/*
   4493 	 * Only old or non-multiqueue devices have the PBA register
   4494 	 * XXX Need special handling for 82575.
   4495 	 */
   4496 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4497 	    || (sc->sc_type == WM_T_82575))
   4498 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4499 
   4500 	/* Prevent the PCI-E bus from sticking */
   4501 	if (sc->sc_flags & WM_F_PCIE) {
   4502 		int timeout = 800;
   4503 
   4504 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4505 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4506 
   4507 		while (timeout--) {
   4508 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4509 			    == 0)
   4510 				break;
   4511 			delay(100);
   4512 		}
   4513 		if (timeout == 0)
   4514 			device_printf(sc->sc_dev,
   4515 			    "failed to disable busmastering\n");
   4516 	}
   4517 
   4518 	/* Set the completion timeout for interface */
   4519 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4520 	    || (sc->sc_type == WM_T_82580)
   4521 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4522 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4523 		wm_set_pcie_completion_timeout(sc);
   4524 
   4525 	/* Clear interrupt */
   4526 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4527 	if (wm_is_using_msix(sc)) {
   4528 		if (sc->sc_type != WM_T_82574) {
   4529 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4530 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4531 		} else
   4532 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4533 	}
   4534 
   4535 	/* Stop the transmit and receive processes. */
   4536 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4537 	sc->sc_rctl &= ~RCTL_EN;
   4538 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4539 	CSR_WRITE_FLUSH(sc);
   4540 
   4541 	/* XXX set_tbi_sbp_82543() */
   4542 
   4543 	delay(10*1000);
   4544 
   4545 	/* Must acquire the MDIO ownership before MAC reset */
   4546 	switch (sc->sc_type) {
   4547 	case WM_T_82573:
   4548 	case WM_T_82574:
   4549 	case WM_T_82583:
   4550 		error = wm_get_hw_semaphore_82573(sc);
   4551 		break;
   4552 	default:
   4553 		break;
   4554 	}
   4555 
   4556 	/*
   4557 	 * 82541 Errata 29? & 82547 Errata 28?
   4558 	 * See also the description about PHY_RST bit in CTRL register
   4559 	 * in 8254x_GBe_SDM.pdf.
   4560 	 */
   4561 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4562 		CSR_WRITE(sc, WMREG_CTRL,
   4563 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4564 		CSR_WRITE_FLUSH(sc);
   4565 		delay(5000);
   4566 	}
   4567 
   4568 	switch (sc->sc_type) {
   4569 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4570 	case WM_T_82541:
   4571 	case WM_T_82541_2:
   4572 	case WM_T_82547:
   4573 	case WM_T_82547_2:
   4574 		/*
   4575 		 * On some chipsets, a reset through a memory-mapped write
   4576 		 * cycle can cause the chip to reset before completing the
   4577 		 * write cycle. This causes major headache that can be avoided
   4578 		 * by issuing the reset via indirect register writes through
   4579 		 * I/O space.
   4580 		 *
   4581 		 * So, if we successfully mapped the I/O BAR at attach time,
   4582 		 * use that. Otherwise, try our luck with a memory-mapped
   4583 		 * reset.
   4584 		 */
   4585 		if (sc->sc_flags & WM_F_IOH_VALID)
   4586 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4587 		else
   4588 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4589 		break;
   4590 	case WM_T_82545_3:
   4591 	case WM_T_82546_3:
   4592 		/* Use the shadow control register on these chips. */
   4593 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4594 		break;
   4595 	case WM_T_80003:
   4596 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4597 		sc->phy.acquire(sc);
   4598 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4599 		sc->phy.release(sc);
   4600 		break;
   4601 	case WM_T_ICH8:
   4602 	case WM_T_ICH9:
   4603 	case WM_T_ICH10:
   4604 	case WM_T_PCH:
   4605 	case WM_T_PCH2:
   4606 	case WM_T_PCH_LPT:
   4607 	case WM_T_PCH_SPT:
   4608 	case WM_T_PCH_CNP:
   4609 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4610 		if (wm_phy_resetisblocked(sc) == false) {
   4611 			/*
   4612 			 * Gate automatic PHY configuration by hardware on
   4613 			 * non-managed 82579
   4614 			 */
   4615 			if ((sc->sc_type == WM_T_PCH2)
   4616 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4617 				== 0))
   4618 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4619 
   4620 			reg |= CTRL_PHY_RESET;
   4621 			phy_reset = 1;
   4622 		} else
   4623 			printf("XXX reset is blocked!!!\n");
   4624 		sc->phy.acquire(sc);
   4625 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4626 		/* Don't insert a completion barrier when reset */
   4627 		delay(20*1000);
   4628 		mutex_exit(sc->sc_ich_phymtx);
   4629 		break;
   4630 	case WM_T_82580:
   4631 	case WM_T_I350:
   4632 	case WM_T_I354:
   4633 	case WM_T_I210:
   4634 	case WM_T_I211:
   4635 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4636 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4637 			CSR_WRITE_FLUSH(sc);
   4638 		delay(5000);
   4639 		break;
   4640 	case WM_T_82542_2_0:
   4641 	case WM_T_82542_2_1:
   4642 	case WM_T_82543:
   4643 	case WM_T_82540:
   4644 	case WM_T_82545:
   4645 	case WM_T_82546:
   4646 	case WM_T_82571:
   4647 	case WM_T_82572:
   4648 	case WM_T_82573:
   4649 	case WM_T_82574:
   4650 	case WM_T_82575:
   4651 	case WM_T_82576:
   4652 	case WM_T_82583:
   4653 	default:
   4654 		/* Everything else can safely use the documented method. */
   4655 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4656 		break;
   4657 	}
   4658 
   4659 	/* Must release the MDIO ownership after MAC reset */
   4660 	switch (sc->sc_type) {
   4661 	case WM_T_82573:
   4662 	case WM_T_82574:
   4663 	case WM_T_82583:
   4664 		if (error == 0)
   4665 			wm_put_hw_semaphore_82573(sc);
   4666 		break;
   4667 	default:
   4668 		break;
   4669 	}
   4670 
   4671 	/* Set Phy Config Counter to 50msec */
   4672 	if (sc->sc_type == WM_T_PCH2) {
   4673 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   4674 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   4675 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   4676 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   4677 	}
   4678 
   4679 	if (phy_reset != 0)
   4680 		wm_get_cfg_done(sc);
   4681 
   4682 	/* reload EEPROM */
   4683 	switch (sc->sc_type) {
   4684 	case WM_T_82542_2_0:
   4685 	case WM_T_82542_2_1:
   4686 	case WM_T_82543:
   4687 	case WM_T_82544:
   4688 		delay(10);
   4689 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4690 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4691 		CSR_WRITE_FLUSH(sc);
   4692 		delay(2000);
   4693 		break;
   4694 	case WM_T_82540:
   4695 	case WM_T_82545:
   4696 	case WM_T_82545_3:
   4697 	case WM_T_82546:
   4698 	case WM_T_82546_3:
   4699 		delay(5*1000);
   4700 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4701 		break;
   4702 	case WM_T_82541:
   4703 	case WM_T_82541_2:
   4704 	case WM_T_82547:
   4705 	case WM_T_82547_2:
   4706 		delay(20000);
   4707 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4708 		break;
   4709 	case WM_T_82571:
   4710 	case WM_T_82572:
   4711 	case WM_T_82573:
   4712 	case WM_T_82574:
   4713 	case WM_T_82583:
   4714 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4715 			delay(10);
   4716 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4717 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4718 			CSR_WRITE_FLUSH(sc);
   4719 		}
   4720 		/* check EECD_EE_AUTORD */
   4721 		wm_get_auto_rd_done(sc);
   4722 		/*
   4723 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4724 		 * is set.
   4725 		 */
   4726 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4727 		    || (sc->sc_type == WM_T_82583))
   4728 			delay(25*1000);
   4729 		break;
   4730 	case WM_T_82575:
   4731 	case WM_T_82576:
   4732 	case WM_T_82580:
   4733 	case WM_T_I350:
   4734 	case WM_T_I354:
   4735 	case WM_T_I210:
   4736 	case WM_T_I211:
   4737 	case WM_T_80003:
   4738 		/* check EECD_EE_AUTORD */
   4739 		wm_get_auto_rd_done(sc);
   4740 		break;
   4741 	case WM_T_ICH8:
   4742 	case WM_T_ICH9:
   4743 	case WM_T_ICH10:
   4744 	case WM_T_PCH:
   4745 	case WM_T_PCH2:
   4746 	case WM_T_PCH_LPT:
   4747 	case WM_T_PCH_SPT:
   4748 	case WM_T_PCH_CNP:
   4749 		break;
   4750 	default:
   4751 		panic("%s: unknown type\n", __func__);
   4752 	}
   4753 
   4754 	/* Check whether EEPROM is present or not */
   4755 	switch (sc->sc_type) {
   4756 	case WM_T_82575:
   4757 	case WM_T_82576:
   4758 	case WM_T_82580:
   4759 	case WM_T_I350:
   4760 	case WM_T_I354:
   4761 	case WM_T_ICH8:
   4762 	case WM_T_ICH9:
   4763 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4764 			/* Not found */
   4765 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4766 			if (sc->sc_type == WM_T_82575)
   4767 				wm_reset_init_script_82575(sc);
   4768 		}
   4769 		break;
   4770 	default:
   4771 		break;
   4772 	}
   4773 
   4774 	if (phy_reset != 0)
   4775 		wm_phy_post_reset(sc);
   4776 
   4777 	if ((sc->sc_type == WM_T_82580)
   4778 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4779 		/* clear global device reset status bit */
   4780 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4781 	}
   4782 
   4783 	/* Clear any pending interrupt events. */
   4784 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4785 	reg = CSR_READ(sc, WMREG_ICR);
   4786 	if (wm_is_using_msix(sc)) {
   4787 		if (sc->sc_type != WM_T_82574) {
   4788 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4789 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4790 		} else
   4791 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4792 	}
   4793 
   4794 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4795 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4796 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4797 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4798 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4799 		reg |= KABGTXD_BGSQLBIAS;
   4800 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4801 	}
   4802 
   4803 	/* reload sc_ctrl */
   4804 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4805 
   4806 	if (sc->sc_type == WM_T_I354) {
   4807 #if 0
   4808 		/* I354 uses an external PHY */
   4809 		wm_set_eee_i354(sc);
   4810 #endif
   4811 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4812 		wm_set_eee_i350(sc);
   4813 
   4814 	/*
   4815 	 * For PCH, this write will make sure that any noise will be detected
   4816 	 * as a CRC error and be dropped rather than show up as a bad packet
   4817 	 * to the DMA engine
   4818 	 */
   4819 	if (sc->sc_type == WM_T_PCH)
   4820 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4821 
   4822 	if (sc->sc_type >= WM_T_82544)
   4823 		CSR_WRITE(sc, WMREG_WUC, 0);
   4824 
   4825 	wm_reset_mdicnfg_82580(sc);
   4826 
   4827 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4828 		wm_pll_workaround_i210(sc);
   4829 
   4830 	if (sc->sc_type == WM_T_80003) {
   4831 		/* default to TRUE to enable the MDIC W/A */
   4832 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   4833 
   4834 		rv = wm_kmrn_readreg(sc,
   4835 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   4836 		if (rv == 0) {
   4837 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   4838 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   4839 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   4840 			else
   4841 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   4842 		}
   4843 	}
   4844 }
   4845 
   4846 /*
   4847  * wm_add_rxbuf:
   4848  *
   4849  *	Add a receive buffer to the indiciated descriptor.
   4850  */
   4851 static int
   4852 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4853 {
   4854 	struct wm_softc *sc = rxq->rxq_sc;
   4855 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4856 	struct mbuf *m;
   4857 	int error;
   4858 
   4859 	KASSERT(mutex_owned(rxq->rxq_lock));
   4860 
   4861 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4862 	if (m == NULL)
   4863 		return ENOBUFS;
   4864 
   4865 	MCLGET(m, M_DONTWAIT);
   4866 	if ((m->m_flags & M_EXT) == 0) {
   4867 		m_freem(m);
   4868 		return ENOBUFS;
   4869 	}
   4870 
   4871 	if (rxs->rxs_mbuf != NULL)
   4872 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4873 
   4874 	rxs->rxs_mbuf = m;
   4875 
   4876 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4877 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4878 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4879 	if (error) {
   4880 		/* XXX XXX XXX */
   4881 		aprint_error_dev(sc->sc_dev,
   4882 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   4883 		panic("wm_add_rxbuf");
   4884 	}
   4885 
   4886 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4887 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4888 
   4889 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4890 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4891 			wm_init_rxdesc(rxq, idx);
   4892 	} else
   4893 		wm_init_rxdesc(rxq, idx);
   4894 
   4895 	return 0;
   4896 }
   4897 
   4898 /*
   4899  * wm_rxdrain:
   4900  *
   4901  *	Drain the receive queue.
   4902  */
   4903 static void
   4904 wm_rxdrain(struct wm_rxqueue *rxq)
   4905 {
   4906 	struct wm_softc *sc = rxq->rxq_sc;
   4907 	struct wm_rxsoft *rxs;
   4908 	int i;
   4909 
   4910 	KASSERT(mutex_owned(rxq->rxq_lock));
   4911 
   4912 	for (i = 0; i < WM_NRXDESC; i++) {
   4913 		rxs = &rxq->rxq_soft[i];
   4914 		if (rxs->rxs_mbuf != NULL) {
   4915 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4916 			m_freem(rxs->rxs_mbuf);
   4917 			rxs->rxs_mbuf = NULL;
   4918 		}
   4919 	}
   4920 }
   4921 
   4922 /*
   4923  * Setup registers for RSS.
   4924  *
   4925  * XXX not yet VMDq support
   4926  */
   4927 static void
   4928 wm_init_rss(struct wm_softc *sc)
   4929 {
   4930 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4931 	int i;
   4932 
   4933 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   4934 
   4935 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4936 		int qid, reta_ent;
   4937 
   4938 		qid  = i % sc->sc_nqueues;
   4939 		switch (sc->sc_type) {
   4940 		case WM_T_82574:
   4941 			reta_ent = __SHIFTIN(qid,
   4942 			    RETA_ENT_QINDEX_MASK_82574);
   4943 			break;
   4944 		case WM_T_82575:
   4945 			reta_ent = __SHIFTIN(qid,
   4946 			    RETA_ENT_QINDEX1_MASK_82575);
   4947 			break;
   4948 		default:
   4949 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4950 			break;
   4951 		}
   4952 
   4953 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4954 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4955 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4956 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4957 	}
   4958 
   4959 	rss_getkey((uint8_t *)rss_key);
   4960 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4961 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4962 
   4963 	if (sc->sc_type == WM_T_82574)
   4964 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4965 	else
   4966 		mrqc = MRQC_ENABLE_RSS_MQ;
   4967 
   4968 	/*
   4969 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4970 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4971 	 */
   4972 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4973 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4974 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4975 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4976 
   4977 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4978 }
   4979 
   4980 /*
   4981  * Adjust TX and RX queue numbers which the system actulally uses.
   4982  *
   4983  * The numbers are affected by below parameters.
   4984  *     - The nubmer of hardware queues
   4985  *     - The number of MSI-X vectors (= "nvectors" argument)
   4986  *     - ncpu
   4987  */
   4988 static void
   4989 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4990 {
   4991 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4992 
   4993 	if (nvectors < 2) {
   4994 		sc->sc_nqueues = 1;
   4995 		return;
   4996 	}
   4997 
   4998 	switch (sc->sc_type) {
   4999 	case WM_T_82572:
   5000 		hw_ntxqueues = 2;
   5001 		hw_nrxqueues = 2;
   5002 		break;
   5003 	case WM_T_82574:
   5004 		hw_ntxqueues = 2;
   5005 		hw_nrxqueues = 2;
   5006 		break;
   5007 	case WM_T_82575:
   5008 		hw_ntxqueues = 4;
   5009 		hw_nrxqueues = 4;
   5010 		break;
   5011 	case WM_T_82576:
   5012 		hw_ntxqueues = 16;
   5013 		hw_nrxqueues = 16;
   5014 		break;
   5015 	case WM_T_82580:
   5016 	case WM_T_I350:
   5017 	case WM_T_I354:
   5018 		hw_ntxqueues = 8;
   5019 		hw_nrxqueues = 8;
   5020 		break;
   5021 	case WM_T_I210:
   5022 		hw_ntxqueues = 4;
   5023 		hw_nrxqueues = 4;
   5024 		break;
   5025 	case WM_T_I211:
   5026 		hw_ntxqueues = 2;
   5027 		hw_nrxqueues = 2;
   5028 		break;
   5029 		/*
   5030 		 * As below ethernet controllers does not support MSI-X,
   5031 		 * this driver let them not use multiqueue.
   5032 		 *     - WM_T_80003
   5033 		 *     - WM_T_ICH8
   5034 		 *     - WM_T_ICH9
   5035 		 *     - WM_T_ICH10
   5036 		 *     - WM_T_PCH
   5037 		 *     - WM_T_PCH2
   5038 		 *     - WM_T_PCH_LPT
   5039 		 */
   5040 	default:
   5041 		hw_ntxqueues = 1;
   5042 		hw_nrxqueues = 1;
   5043 		break;
   5044 	}
   5045 
   5046 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5047 
   5048 	/*
   5049 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5050 	 * the number of queues used actually.
   5051 	 */
   5052 	if (nvectors < hw_nqueues + 1)
   5053 		sc->sc_nqueues = nvectors - 1;
   5054 	else
   5055 		sc->sc_nqueues = hw_nqueues;
   5056 
   5057 	/*
   5058 	 * As queues more then cpus cannot improve scaling, we limit
   5059 	 * the number of queues used actually.
   5060 	 */
   5061 	if (ncpu < sc->sc_nqueues)
   5062 		sc->sc_nqueues = ncpu;
   5063 }
   5064 
   5065 static inline bool
   5066 wm_is_using_msix(struct wm_softc *sc)
   5067 {
   5068 
   5069 	return (sc->sc_nintrs > 1);
   5070 }
   5071 
   5072 static inline bool
   5073 wm_is_using_multiqueue(struct wm_softc *sc)
   5074 {
   5075 
   5076 	return (sc->sc_nqueues > 1);
   5077 }
   5078 
   5079 static int
   5080 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5081 {
   5082 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5083 	wmq->wmq_id = qidx;
   5084 	wmq->wmq_intr_idx = intr_idx;
   5085 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5086 #ifdef WM_MPSAFE
   5087 	    | SOFTINT_MPSAFE
   5088 #endif
   5089 	    , wm_handle_queue, wmq);
   5090 	if (wmq->wmq_si != NULL)
   5091 		return 0;
   5092 
   5093 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5094 	    wmq->wmq_id);
   5095 
   5096 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5097 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5098 	return ENOMEM;
   5099 }
   5100 
   5101 /*
   5102  * Both single interrupt MSI and INTx can use this function.
   5103  */
   5104 static int
   5105 wm_setup_legacy(struct wm_softc *sc)
   5106 {
   5107 	pci_chipset_tag_t pc = sc->sc_pc;
   5108 	const char *intrstr = NULL;
   5109 	char intrbuf[PCI_INTRSTR_LEN];
   5110 	int error;
   5111 
   5112 	error = wm_alloc_txrx_queues(sc);
   5113 	if (error) {
   5114 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5115 		    error);
   5116 		return ENOMEM;
   5117 	}
   5118 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5119 	    sizeof(intrbuf));
   5120 #ifdef WM_MPSAFE
   5121 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5122 #endif
   5123 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5124 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5125 	if (sc->sc_ihs[0] == NULL) {
   5126 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5127 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5128 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5129 		return ENOMEM;
   5130 	}
   5131 
   5132 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5133 	sc->sc_nintrs = 1;
   5134 
   5135 	return wm_softint_establish(sc, 0, 0);
   5136 }
   5137 
   5138 static int
   5139 wm_setup_msix(struct wm_softc *sc)
   5140 {
   5141 	void *vih;
   5142 	kcpuset_t *affinity;
   5143 	int qidx, error, intr_idx, txrx_established;
   5144 	pci_chipset_tag_t pc = sc->sc_pc;
   5145 	const char *intrstr = NULL;
   5146 	char intrbuf[PCI_INTRSTR_LEN];
   5147 	char intr_xname[INTRDEVNAMEBUF];
   5148 
   5149 	if (sc->sc_nqueues < ncpu) {
   5150 		/*
   5151 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5152 		 * interrupts start from CPU#1.
   5153 		 */
   5154 		sc->sc_affinity_offset = 1;
   5155 	} else {
   5156 		/*
   5157 		 * In this case, this device use all CPUs. So, we unify
   5158 		 * affinitied cpu_index to msix vector number for readability.
   5159 		 */
   5160 		sc->sc_affinity_offset = 0;
   5161 	}
   5162 
   5163 	error = wm_alloc_txrx_queues(sc);
   5164 	if (error) {
   5165 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5166 		    error);
   5167 		return ENOMEM;
   5168 	}
   5169 
   5170 	kcpuset_create(&affinity, false);
   5171 	intr_idx = 0;
   5172 
   5173 	/*
   5174 	 * TX and RX
   5175 	 */
   5176 	txrx_established = 0;
   5177 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5178 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5179 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5180 
   5181 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5182 		    sizeof(intrbuf));
   5183 #ifdef WM_MPSAFE
   5184 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5185 		    PCI_INTR_MPSAFE, true);
   5186 #endif
   5187 		memset(intr_xname, 0, sizeof(intr_xname));
   5188 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5189 		    device_xname(sc->sc_dev), qidx);
   5190 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5191 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5192 		if (vih == NULL) {
   5193 			aprint_error_dev(sc->sc_dev,
   5194 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5195 			    intrstr ? " at " : "",
   5196 			    intrstr ? intrstr : "");
   5197 
   5198 			goto fail;
   5199 		}
   5200 		kcpuset_zero(affinity);
   5201 		/* Round-robin affinity */
   5202 		kcpuset_set(affinity, affinity_to);
   5203 		error = interrupt_distribute(vih, affinity, NULL);
   5204 		if (error == 0) {
   5205 			aprint_normal_dev(sc->sc_dev,
   5206 			    "for TX and RX interrupting at %s affinity to %u\n",
   5207 			    intrstr, affinity_to);
   5208 		} else {
   5209 			aprint_normal_dev(sc->sc_dev,
   5210 			    "for TX and RX interrupting at %s\n", intrstr);
   5211 		}
   5212 		sc->sc_ihs[intr_idx] = vih;
   5213 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5214 			goto fail;
   5215 		txrx_established++;
   5216 		intr_idx++;
   5217 	}
   5218 
   5219 	/*
   5220 	 * LINK
   5221 	 */
   5222 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5223 	    sizeof(intrbuf));
   5224 #ifdef WM_MPSAFE
   5225 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5226 #endif
   5227 	memset(intr_xname, 0, sizeof(intr_xname));
   5228 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5229 	    device_xname(sc->sc_dev));
   5230 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5231 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5232 	if (vih == NULL) {
   5233 		aprint_error_dev(sc->sc_dev,
   5234 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5235 		    intrstr ? " at " : "",
   5236 		    intrstr ? intrstr : "");
   5237 
   5238 		goto fail;
   5239 	}
   5240 	/* keep default affinity to LINK interrupt */
   5241 	aprint_normal_dev(sc->sc_dev,
   5242 	    "for LINK interrupting at %s\n", intrstr);
   5243 	sc->sc_ihs[intr_idx] = vih;
   5244 	sc->sc_link_intr_idx = intr_idx;
   5245 
   5246 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5247 	kcpuset_destroy(affinity);
   5248 	return 0;
   5249 
   5250  fail:
   5251 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5252 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5253 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5254 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5255 	}
   5256 
   5257 	kcpuset_destroy(affinity);
   5258 	return ENOMEM;
   5259 }
   5260 
   5261 static void
   5262 wm_unset_stopping_flags(struct wm_softc *sc)
   5263 {
   5264 	int i;
   5265 
   5266 	KASSERT(WM_CORE_LOCKED(sc));
   5267 
   5268 	/*
   5269 	 * must unset stopping flags in ascending order.
   5270 	 */
   5271 	for (i = 0; i < sc->sc_nqueues; i++) {
   5272 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5273 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5274 
   5275 		mutex_enter(txq->txq_lock);
   5276 		txq->txq_stopping = false;
   5277 		mutex_exit(txq->txq_lock);
   5278 
   5279 		mutex_enter(rxq->rxq_lock);
   5280 		rxq->rxq_stopping = false;
   5281 		mutex_exit(rxq->rxq_lock);
   5282 	}
   5283 
   5284 	sc->sc_core_stopping = false;
   5285 }
   5286 
   5287 static void
   5288 wm_set_stopping_flags(struct wm_softc *sc)
   5289 {
   5290 	int i;
   5291 
   5292 	KASSERT(WM_CORE_LOCKED(sc));
   5293 
   5294 	sc->sc_core_stopping = true;
   5295 
   5296 	/*
   5297 	 * must set stopping flags in ascending order.
   5298 	 */
   5299 	for (i = 0; i < sc->sc_nqueues; i++) {
   5300 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5301 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5302 
   5303 		mutex_enter(rxq->rxq_lock);
   5304 		rxq->rxq_stopping = true;
   5305 		mutex_exit(rxq->rxq_lock);
   5306 
   5307 		mutex_enter(txq->txq_lock);
   5308 		txq->txq_stopping = true;
   5309 		mutex_exit(txq->txq_lock);
   5310 	}
   5311 }
   5312 
   5313 /*
   5314  * write interrupt interval value to ITR or EITR
   5315  */
   5316 static void
   5317 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5318 {
   5319 
   5320 	if (!wmq->wmq_set_itr)
   5321 		return;
   5322 
   5323 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5324 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5325 
   5326 		/*
   5327 		 * 82575 doesn't have CNT_INGR field.
   5328 		 * So, overwrite counter field by software.
   5329 		 */
   5330 		if (sc->sc_type == WM_T_82575)
   5331 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5332 		else
   5333 			eitr |= EITR_CNT_INGR;
   5334 
   5335 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5336 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5337 		/*
   5338 		 * 82574 has both ITR and EITR. SET EITR when we use
   5339 		 * the multi queue function with MSI-X.
   5340 		 */
   5341 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5342 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5343 	} else {
   5344 		KASSERT(wmq->wmq_id == 0);
   5345 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5346 	}
   5347 
   5348 	wmq->wmq_set_itr = false;
   5349 }
   5350 
   5351 /*
   5352  * TODO
   5353  * Below dynamic calculation of itr is almost the same as linux igb,
   5354  * however it does not fit to wm(4). So, we will have been disable AIM
   5355  * until we will find appropriate calculation of itr.
   5356  */
   5357 /*
   5358  * calculate interrupt interval value to be going to write register in
   5359  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5360  */
   5361 static void
   5362 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5363 {
   5364 #ifdef NOTYET
   5365 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5366 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5367 	uint32_t avg_size = 0;
   5368 	uint32_t new_itr;
   5369 
   5370 	if (rxq->rxq_packets)
   5371 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5372 	if (txq->txq_packets)
   5373 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5374 
   5375 	if (avg_size == 0) {
   5376 		new_itr = 450; /* restore default value */
   5377 		goto out;
   5378 	}
   5379 
   5380 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5381 	avg_size += 24;
   5382 
   5383 	/* Don't starve jumbo frames */
   5384 	avg_size = uimin(avg_size, 3000);
   5385 
   5386 	/* Give a little boost to mid-size frames */
   5387 	if ((avg_size > 300) && (avg_size < 1200))
   5388 		new_itr = avg_size / 3;
   5389 	else
   5390 		new_itr = avg_size / 2;
   5391 
   5392 out:
   5393 	/*
   5394 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5395 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5396 	 */
   5397 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5398 		new_itr *= 4;
   5399 
   5400 	if (new_itr != wmq->wmq_itr) {
   5401 		wmq->wmq_itr = new_itr;
   5402 		wmq->wmq_set_itr = true;
   5403 	} else
   5404 		wmq->wmq_set_itr = false;
   5405 
   5406 	rxq->rxq_packets = 0;
   5407 	rxq->rxq_bytes = 0;
   5408 	txq->txq_packets = 0;
   5409 	txq->txq_bytes = 0;
   5410 #endif
   5411 }
   5412 
   5413 /*
   5414  * wm_init:		[ifnet interface function]
   5415  *
   5416  *	Initialize the interface.
   5417  */
   5418 static int
   5419 wm_init(struct ifnet *ifp)
   5420 {
   5421 	struct wm_softc *sc = ifp->if_softc;
   5422 	int ret;
   5423 
   5424 	WM_CORE_LOCK(sc);
   5425 	ret = wm_init_locked(ifp);
   5426 	WM_CORE_UNLOCK(sc);
   5427 
   5428 	return ret;
   5429 }
   5430 
   5431 static int
   5432 wm_init_locked(struct ifnet *ifp)
   5433 {
   5434 	struct wm_softc *sc = ifp->if_softc;
   5435 	int i, j, trynum, error = 0;
   5436 	uint32_t reg;
   5437 
   5438 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5439 		device_xname(sc->sc_dev), __func__));
   5440 	KASSERT(WM_CORE_LOCKED(sc));
   5441 
   5442 	/*
   5443 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5444 	 * There is a small but measurable benefit to avoiding the adjusment
   5445 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5446 	 * on such platforms.  One possibility is that the DMA itself is
   5447 	 * slightly more efficient if the front of the entire packet (instead
   5448 	 * of the front of the headers) is aligned.
   5449 	 *
   5450 	 * Note we must always set align_tweak to 0 if we are using
   5451 	 * jumbo frames.
   5452 	 */
   5453 #ifdef __NO_STRICT_ALIGNMENT
   5454 	sc->sc_align_tweak = 0;
   5455 #else
   5456 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5457 		sc->sc_align_tweak = 0;
   5458 	else
   5459 		sc->sc_align_tweak = 2;
   5460 #endif /* __NO_STRICT_ALIGNMENT */
   5461 
   5462 	/* Cancel any pending I/O. */
   5463 	wm_stop_locked(ifp, 0);
   5464 
   5465 	/* update statistics before reset */
   5466 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5467 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5468 
   5469 	/* PCH_SPT hardware workaround */
   5470 	if (sc->sc_type == WM_T_PCH_SPT)
   5471 		wm_flush_desc_rings(sc);
   5472 
   5473 	/* Reset the chip to a known state. */
   5474 	wm_reset(sc);
   5475 
   5476 	/*
   5477 	 * AMT based hardware can now take control from firmware
   5478 	 * Do this after reset.
   5479 	 */
   5480 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5481 		wm_get_hw_control(sc);
   5482 
   5483 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5484 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5485 		wm_legacy_irq_quirk_spt(sc);
   5486 
   5487 	/* Init hardware bits */
   5488 	wm_initialize_hardware_bits(sc);
   5489 
   5490 	/* Reset the PHY. */
   5491 	if (sc->sc_flags & WM_F_HAS_MII)
   5492 		wm_gmii_reset(sc);
   5493 
   5494 	if (sc->sc_type >= WM_T_ICH8) {
   5495 		reg = CSR_READ(sc, WMREG_GCR);
   5496 		/*
   5497 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5498 		 * default after reset.
   5499 		 */
   5500 		if (sc->sc_type == WM_T_ICH8)
   5501 			reg |= GCR_NO_SNOOP_ALL;
   5502 		else
   5503 			reg &= ~GCR_NO_SNOOP_ALL;
   5504 		CSR_WRITE(sc, WMREG_GCR, reg);
   5505 	}
   5506 	if ((sc->sc_type >= WM_T_ICH8)
   5507 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5508 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5509 
   5510 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5511 		reg |= CTRL_EXT_RO_DIS;
   5512 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5513 	}
   5514 
   5515 	/* Calculate (E)ITR value */
   5516 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5517 		/*
   5518 		 * For NEWQUEUE's EITR (except for 82575).
   5519 		 * 82575's EITR should be set same throttling value as other
   5520 		 * old controllers' ITR because the interrupt/sec calculation
   5521 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5522 		 *
   5523 		 * 82574's EITR should be set same throttling value as ITR.
   5524 		 *
   5525 		 * For N interrupts/sec, set this value to:
   5526 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5527 		 */
   5528 		sc->sc_itr_init = 450;
   5529 	} else if (sc->sc_type >= WM_T_82543) {
   5530 		/*
   5531 		 * Set up the interrupt throttling register (units of 256ns)
   5532 		 * Note that a footnote in Intel's documentation says this
   5533 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5534 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5535 		 * that that is also true for the 1024ns units of the other
   5536 		 * interrupt-related timer registers -- so, really, we ought
   5537 		 * to divide this value by 4 when the link speed is low.
   5538 		 *
   5539 		 * XXX implement this division at link speed change!
   5540 		 */
   5541 
   5542 		/*
   5543 		 * For N interrupts/sec, set this value to:
   5544 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5545 		 * absolute and packet timer values to this value
   5546 		 * divided by 4 to get "simple timer" behavior.
   5547 		 */
   5548 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5549 	}
   5550 
   5551 	error = wm_init_txrx_queues(sc);
   5552 	if (error)
   5553 		goto out;
   5554 
   5555 	/*
   5556 	 * Clear out the VLAN table -- we don't use it (yet).
   5557 	 */
   5558 	CSR_WRITE(sc, WMREG_VET, 0);
   5559 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5560 		trynum = 10; /* Due to hw errata */
   5561 	else
   5562 		trynum = 1;
   5563 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5564 		for (j = 0; j < trynum; j++)
   5565 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5566 
   5567 	/*
   5568 	 * Set up flow-control parameters.
   5569 	 *
   5570 	 * XXX Values could probably stand some tuning.
   5571 	 */
   5572 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5573 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5574 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5575 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5576 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5577 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5578 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5579 	}
   5580 
   5581 	sc->sc_fcrtl = FCRTL_DFLT;
   5582 	if (sc->sc_type < WM_T_82543) {
   5583 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5584 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5585 	} else {
   5586 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5587 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5588 	}
   5589 
   5590 	if (sc->sc_type == WM_T_80003)
   5591 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5592 	else
   5593 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5594 
   5595 	/* Writes the control register. */
   5596 	wm_set_vlan(sc);
   5597 
   5598 	if (sc->sc_flags & WM_F_HAS_MII) {
   5599 		uint16_t kmreg;
   5600 
   5601 		switch (sc->sc_type) {
   5602 		case WM_T_80003:
   5603 		case WM_T_ICH8:
   5604 		case WM_T_ICH9:
   5605 		case WM_T_ICH10:
   5606 		case WM_T_PCH:
   5607 		case WM_T_PCH2:
   5608 		case WM_T_PCH_LPT:
   5609 		case WM_T_PCH_SPT:
   5610 		case WM_T_PCH_CNP:
   5611 			/*
   5612 			 * Set the mac to wait the maximum time between each
   5613 			 * iteration and increase the max iterations when
   5614 			 * polling the phy; this fixes erroneous timeouts at
   5615 			 * 10Mbps.
   5616 			 */
   5617 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5618 			    0xFFFF);
   5619 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5620 			    &kmreg);
   5621 			kmreg |= 0x3F;
   5622 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5623 			    kmreg);
   5624 			break;
   5625 		default:
   5626 			break;
   5627 		}
   5628 
   5629 		if (sc->sc_type == WM_T_80003) {
   5630 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5631 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5632 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5633 
   5634 			/* Bypass RX and TX FIFO's */
   5635 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5636 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5637 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5638 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5639 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5640 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5641 		}
   5642 	}
   5643 #if 0
   5644 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5645 #endif
   5646 
   5647 	/* Set up checksum offload parameters. */
   5648 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5649 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5650 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5651 		reg |= RXCSUM_IPOFL;
   5652 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5653 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5654 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5655 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5656 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5657 
   5658 	/* Set registers about MSI-X */
   5659 	if (wm_is_using_msix(sc)) {
   5660 		uint32_t ivar;
   5661 		struct wm_queue *wmq;
   5662 		int qid, qintr_idx;
   5663 
   5664 		if (sc->sc_type == WM_T_82575) {
   5665 			/* Interrupt control */
   5666 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5667 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5668 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5669 
   5670 			/* TX and RX */
   5671 			for (i = 0; i < sc->sc_nqueues; i++) {
   5672 				wmq = &sc->sc_queue[i];
   5673 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5674 				    EITR_TX_QUEUE(wmq->wmq_id)
   5675 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5676 			}
   5677 			/* Link status */
   5678 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5679 			    EITR_OTHER);
   5680 		} else if (sc->sc_type == WM_T_82574) {
   5681 			/* Interrupt control */
   5682 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5683 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5684 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5685 
   5686 			/*
   5687 			 * workaround issue with spurious interrupts
   5688 			 * in MSI-X mode.
   5689 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5690 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5691 			 */
   5692 			reg = CSR_READ(sc, WMREG_RFCTL);
   5693 			reg |= WMREG_RFCTL_ACKDIS;
   5694 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5695 
   5696 			ivar = 0;
   5697 			/* TX and RX */
   5698 			for (i = 0; i < sc->sc_nqueues; i++) {
   5699 				wmq = &sc->sc_queue[i];
   5700 				qid = wmq->wmq_id;
   5701 				qintr_idx = wmq->wmq_intr_idx;
   5702 
   5703 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5704 				    IVAR_TX_MASK_Q_82574(qid));
   5705 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5706 				    IVAR_RX_MASK_Q_82574(qid));
   5707 			}
   5708 			/* Link status */
   5709 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5710 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5711 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5712 		} else {
   5713 			/* Interrupt control */
   5714 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5715 			    | GPIE_EIAME | GPIE_PBA);
   5716 
   5717 			switch (sc->sc_type) {
   5718 			case WM_T_82580:
   5719 			case WM_T_I350:
   5720 			case WM_T_I354:
   5721 			case WM_T_I210:
   5722 			case WM_T_I211:
   5723 				/* TX and RX */
   5724 				for (i = 0; i < sc->sc_nqueues; i++) {
   5725 					wmq = &sc->sc_queue[i];
   5726 					qid = wmq->wmq_id;
   5727 					qintr_idx = wmq->wmq_intr_idx;
   5728 
   5729 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5730 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5731 					ivar |= __SHIFTIN((qintr_idx
   5732 						| IVAR_VALID),
   5733 					    IVAR_TX_MASK_Q(qid));
   5734 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5735 					ivar |= __SHIFTIN((qintr_idx
   5736 						| IVAR_VALID),
   5737 					    IVAR_RX_MASK_Q(qid));
   5738 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5739 				}
   5740 				break;
   5741 			case WM_T_82576:
   5742 				/* TX and RX */
   5743 				for (i = 0; i < sc->sc_nqueues; i++) {
   5744 					wmq = &sc->sc_queue[i];
   5745 					qid = wmq->wmq_id;
   5746 					qintr_idx = wmq->wmq_intr_idx;
   5747 
   5748 					ivar = CSR_READ(sc,
   5749 					    WMREG_IVAR_Q_82576(qid));
   5750 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5751 					ivar |= __SHIFTIN((qintr_idx
   5752 						| IVAR_VALID),
   5753 					    IVAR_TX_MASK_Q_82576(qid));
   5754 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5755 					ivar |= __SHIFTIN((qintr_idx
   5756 						| IVAR_VALID),
   5757 					    IVAR_RX_MASK_Q_82576(qid));
   5758 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5759 					    ivar);
   5760 				}
   5761 				break;
   5762 			default:
   5763 				break;
   5764 			}
   5765 
   5766 			/* Link status */
   5767 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5768 			    IVAR_MISC_OTHER);
   5769 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5770 		}
   5771 
   5772 		if (wm_is_using_multiqueue(sc)) {
   5773 			wm_init_rss(sc);
   5774 
   5775 			/*
   5776 			** NOTE: Receive Full-Packet Checksum Offload
   5777 			** is mutually exclusive with Multiqueue. However
   5778 			** this is not the same as TCP/IP checksums which
   5779 			** still work.
   5780 			*/
   5781 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5782 			reg |= RXCSUM_PCSD;
   5783 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5784 		}
   5785 	}
   5786 
   5787 	/* Set up the interrupt registers. */
   5788 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5789 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5790 	    ICR_RXO | ICR_RXT0;
   5791 	if (wm_is_using_msix(sc)) {
   5792 		uint32_t mask;
   5793 		struct wm_queue *wmq;
   5794 
   5795 		switch (sc->sc_type) {
   5796 		case WM_T_82574:
   5797 			mask = 0;
   5798 			for (i = 0; i < sc->sc_nqueues; i++) {
   5799 				wmq = &sc->sc_queue[i];
   5800 				mask |= ICR_TXQ(wmq->wmq_id);
   5801 				mask |= ICR_RXQ(wmq->wmq_id);
   5802 			}
   5803 			mask |= ICR_OTHER;
   5804 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5805 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5806 			break;
   5807 		default:
   5808 			if (sc->sc_type == WM_T_82575) {
   5809 				mask = 0;
   5810 				for (i = 0; i < sc->sc_nqueues; i++) {
   5811 					wmq = &sc->sc_queue[i];
   5812 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5813 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5814 				}
   5815 				mask |= EITR_OTHER;
   5816 			} else {
   5817 				mask = 0;
   5818 				for (i = 0; i < sc->sc_nqueues; i++) {
   5819 					wmq = &sc->sc_queue[i];
   5820 					mask |= 1 << wmq->wmq_intr_idx;
   5821 				}
   5822 				mask |= 1 << sc->sc_link_intr_idx;
   5823 			}
   5824 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5825 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5826 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5827 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5828 			break;
   5829 		}
   5830 	} else
   5831 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5832 
   5833 	/* Set up the inter-packet gap. */
   5834 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5835 
   5836 	if (sc->sc_type >= WM_T_82543) {
   5837 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5838 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5839 			wm_itrs_writereg(sc, wmq);
   5840 		}
   5841 		/*
   5842 		 * Link interrupts occur much less than TX
   5843 		 * interrupts and RX interrupts. So, we don't
   5844 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5845 		 * FreeBSD's if_igb.
   5846 		 */
   5847 	}
   5848 
   5849 	/* Set the VLAN ethernetype. */
   5850 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5851 
   5852 	/*
   5853 	 * Set up the transmit control register; we start out with
   5854 	 * a collision distance suitable for FDX, but update it whe
   5855 	 * we resolve the media type.
   5856 	 */
   5857 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5858 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5859 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5860 	if (sc->sc_type >= WM_T_82571)
   5861 		sc->sc_tctl |= TCTL_MULR;
   5862 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5863 
   5864 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5865 		/* Write TDT after TCTL.EN is set. See the document. */
   5866 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5867 	}
   5868 
   5869 	if (sc->sc_type == WM_T_80003) {
   5870 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5871 		reg &= ~TCTL_EXT_GCEX_MASK;
   5872 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5873 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5874 	}
   5875 
   5876 	/* Set the media. */
   5877 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5878 		goto out;
   5879 
   5880 	/* Configure for OS presence */
   5881 	wm_init_manageability(sc);
   5882 
   5883 	/*
   5884 	 * Set up the receive control register; we actually program the
   5885 	 * register when we set the receive filter. Use multicast address
   5886 	 * offset type 0.
   5887 	 *
   5888 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   5889 	 * don't enable that feature.
   5890 	 */
   5891 	sc->sc_mchash_type = 0;
   5892 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5893 	    | RCTL_MO(sc->sc_mchash_type);
   5894 
   5895 	/*
   5896 	 * 82574 use one buffer extended Rx descriptor.
   5897 	 */
   5898 	if (sc->sc_type == WM_T_82574)
   5899 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5900 
   5901 	/*
   5902 	 * The I350 has a bug where it always strips the CRC whether
   5903 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5904 	 */
   5905 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5906 	    || (sc->sc_type == WM_T_I210))
   5907 		sc->sc_rctl |= RCTL_SECRC;
   5908 
   5909 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5910 	    && (ifp->if_mtu > ETHERMTU)) {
   5911 		sc->sc_rctl |= RCTL_LPE;
   5912 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5913 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5914 	}
   5915 
   5916 	if (MCLBYTES == 2048)
   5917 		sc->sc_rctl |= RCTL_2k;
   5918 	else {
   5919 		if (sc->sc_type >= WM_T_82543) {
   5920 			switch (MCLBYTES) {
   5921 			case 4096:
   5922 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5923 				break;
   5924 			case 8192:
   5925 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5926 				break;
   5927 			case 16384:
   5928 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5929 				break;
   5930 			default:
   5931 				panic("wm_init: MCLBYTES %d unsupported",
   5932 				    MCLBYTES);
   5933 				break;
   5934 			}
   5935 		} else
   5936 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   5937 	}
   5938 
   5939 	/* Enable ECC */
   5940 	switch (sc->sc_type) {
   5941 	case WM_T_82571:
   5942 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5943 		reg |= PBA_ECC_CORR_EN;
   5944 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5945 		break;
   5946 	case WM_T_PCH_LPT:
   5947 	case WM_T_PCH_SPT:
   5948 	case WM_T_PCH_CNP:
   5949 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5950 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5951 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5952 
   5953 		sc->sc_ctrl |= CTRL_MEHE;
   5954 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5955 		break;
   5956 	default:
   5957 		break;
   5958 	}
   5959 
   5960 	/*
   5961 	 * Set the receive filter.
   5962 	 *
   5963 	 * For 82575 and 82576, the RX descriptors must be initialized after
   5964 	 * the setting of RCTL.EN in wm_set_filter()
   5965 	 */
   5966 	wm_set_filter(sc);
   5967 
   5968 	/* On 575 and later set RDT only if RX enabled */
   5969 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5970 		int qidx;
   5971 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5972 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5973 			for (i = 0; i < WM_NRXDESC; i++) {
   5974 				mutex_enter(rxq->rxq_lock);
   5975 				wm_init_rxdesc(rxq, i);
   5976 				mutex_exit(rxq->rxq_lock);
   5977 
   5978 			}
   5979 		}
   5980 	}
   5981 
   5982 	wm_unset_stopping_flags(sc);
   5983 
   5984 	/* Start the one second link check clock. */
   5985 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5986 
   5987 	/* ...all done! */
   5988 	ifp->if_flags |= IFF_RUNNING;
   5989 	ifp->if_flags &= ~IFF_OACTIVE;
   5990 
   5991  out:
   5992 	sc->sc_if_flags = ifp->if_flags;
   5993 	if (error)
   5994 		log(LOG_ERR, "%s: interface not running\n",
   5995 		    device_xname(sc->sc_dev));
   5996 	return error;
   5997 }
   5998 
   5999 /*
   6000  * wm_stop:		[ifnet interface function]
   6001  *
   6002  *	Stop transmission on the interface.
   6003  */
   6004 static void
   6005 wm_stop(struct ifnet *ifp, int disable)
   6006 {
   6007 	struct wm_softc *sc = ifp->if_softc;
   6008 
   6009 	WM_CORE_LOCK(sc);
   6010 	wm_stop_locked(ifp, disable);
   6011 	WM_CORE_UNLOCK(sc);
   6012 }
   6013 
   6014 static void
   6015 wm_stop_locked(struct ifnet *ifp, int disable)
   6016 {
   6017 	struct wm_softc *sc = ifp->if_softc;
   6018 	struct wm_txsoft *txs;
   6019 	int i, qidx;
   6020 
   6021 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6022 		device_xname(sc->sc_dev), __func__));
   6023 	KASSERT(WM_CORE_LOCKED(sc));
   6024 
   6025 	wm_set_stopping_flags(sc);
   6026 
   6027 	/* Stop the one second clock. */
   6028 	callout_stop(&sc->sc_tick_ch);
   6029 
   6030 	/* Stop the 82547 Tx FIFO stall check timer. */
   6031 	if (sc->sc_type == WM_T_82547)
   6032 		callout_stop(&sc->sc_txfifo_ch);
   6033 
   6034 	if (sc->sc_flags & WM_F_HAS_MII) {
   6035 		/* Down the MII. */
   6036 		mii_down(&sc->sc_mii);
   6037 	} else {
   6038 #if 0
   6039 		/* Should we clear PHY's status properly? */
   6040 		wm_reset(sc);
   6041 #endif
   6042 	}
   6043 
   6044 	/* Stop the transmit and receive processes. */
   6045 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6046 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6047 	sc->sc_rctl &= ~RCTL_EN;
   6048 
   6049 	/*
   6050 	 * Clear the interrupt mask to ensure the device cannot assert its
   6051 	 * interrupt line.
   6052 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6053 	 * service any currently pending or shared interrupt.
   6054 	 */
   6055 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6056 	sc->sc_icr = 0;
   6057 	if (wm_is_using_msix(sc)) {
   6058 		if (sc->sc_type != WM_T_82574) {
   6059 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6060 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6061 		} else
   6062 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6063 	}
   6064 
   6065 	/* Release any queued transmit buffers. */
   6066 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6067 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6068 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6069 		mutex_enter(txq->txq_lock);
   6070 		txq->txq_sending = false; /* ensure watchdog disabled */
   6071 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6072 			txs = &txq->txq_soft[i];
   6073 			if (txs->txs_mbuf != NULL) {
   6074 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6075 				m_freem(txs->txs_mbuf);
   6076 				txs->txs_mbuf = NULL;
   6077 			}
   6078 		}
   6079 		mutex_exit(txq->txq_lock);
   6080 	}
   6081 
   6082 	/* Mark the interface as down and cancel the watchdog timer. */
   6083 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6084 
   6085 	if (disable) {
   6086 		for (i = 0; i < sc->sc_nqueues; i++) {
   6087 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6088 			mutex_enter(rxq->rxq_lock);
   6089 			wm_rxdrain(rxq);
   6090 			mutex_exit(rxq->rxq_lock);
   6091 		}
   6092 	}
   6093 
   6094 #if 0 /* notyet */
   6095 	if (sc->sc_type >= WM_T_82544)
   6096 		CSR_WRITE(sc, WMREG_WUC, 0);
   6097 #endif
   6098 }
   6099 
   6100 static void
   6101 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6102 {
   6103 	struct mbuf *m;
   6104 	int i;
   6105 
   6106 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6107 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6108 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6109 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6110 		    m->m_data, m->m_len, m->m_flags);
   6111 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6112 	    i, i == 1 ? "" : "s");
   6113 }
   6114 
   6115 /*
   6116  * wm_82547_txfifo_stall:
   6117  *
   6118  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6119  *	reset the FIFO pointers, and restart packet transmission.
   6120  */
   6121 static void
   6122 wm_82547_txfifo_stall(void *arg)
   6123 {
   6124 	struct wm_softc *sc = arg;
   6125 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6126 
   6127 	mutex_enter(txq->txq_lock);
   6128 
   6129 	if (txq->txq_stopping)
   6130 		goto out;
   6131 
   6132 	if (txq->txq_fifo_stall) {
   6133 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6134 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6135 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6136 			/*
   6137 			 * Packets have drained.  Stop transmitter, reset
   6138 			 * FIFO pointers, restart transmitter, and kick
   6139 			 * the packet queue.
   6140 			 */
   6141 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6142 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6143 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6144 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6145 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6146 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6147 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6148 			CSR_WRITE_FLUSH(sc);
   6149 
   6150 			txq->txq_fifo_head = 0;
   6151 			txq->txq_fifo_stall = 0;
   6152 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6153 		} else {
   6154 			/*
   6155 			 * Still waiting for packets to drain; try again in
   6156 			 * another tick.
   6157 			 */
   6158 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6159 		}
   6160 	}
   6161 
   6162 out:
   6163 	mutex_exit(txq->txq_lock);
   6164 }
   6165 
   6166 /*
   6167  * wm_82547_txfifo_bugchk:
   6168  *
   6169  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6170  *	prevent enqueueing a packet that would wrap around the end
   6171  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6172  *
   6173  *	We do this by checking the amount of space before the end
   6174  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6175  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6176  *	the internal FIFO pointers to the beginning, and restart
   6177  *	transmission on the interface.
   6178  */
   6179 #define	WM_FIFO_HDR		0x10
   6180 #define	WM_82547_PAD_LEN	0x3e0
   6181 static int
   6182 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6183 {
   6184 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6185 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6186 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6187 
   6188 	/* Just return if already stalled. */
   6189 	if (txq->txq_fifo_stall)
   6190 		return 1;
   6191 
   6192 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6193 		/* Stall only occurs in half-duplex mode. */
   6194 		goto send_packet;
   6195 	}
   6196 
   6197 	if (len >= WM_82547_PAD_LEN + space) {
   6198 		txq->txq_fifo_stall = 1;
   6199 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6200 		return 1;
   6201 	}
   6202 
   6203  send_packet:
   6204 	txq->txq_fifo_head += len;
   6205 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6206 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6207 
   6208 	return 0;
   6209 }
   6210 
   6211 static int
   6212 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6213 {
   6214 	int error;
   6215 
   6216 	/*
   6217 	 * Allocate the control data structures, and create and load the
   6218 	 * DMA map for it.
   6219 	 *
   6220 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6221 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6222 	 * both sets within the same 4G segment.
   6223 	 */
   6224 	if (sc->sc_type < WM_T_82544)
   6225 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6226 	else
   6227 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6228 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6229 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6230 	else
   6231 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6232 
   6233 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6234 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6235 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6236 		aprint_error_dev(sc->sc_dev,
   6237 		    "unable to allocate TX control data, error = %d\n",
   6238 		    error);
   6239 		goto fail_0;
   6240 	}
   6241 
   6242 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6243 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6244 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6245 		aprint_error_dev(sc->sc_dev,
   6246 		    "unable to map TX control data, error = %d\n", error);
   6247 		goto fail_1;
   6248 	}
   6249 
   6250 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6251 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6252 		aprint_error_dev(sc->sc_dev,
   6253 		    "unable to create TX control data DMA map, error = %d\n",
   6254 		    error);
   6255 		goto fail_2;
   6256 	}
   6257 
   6258 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6259 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6260 		aprint_error_dev(sc->sc_dev,
   6261 		    "unable to load TX control data DMA map, error = %d\n",
   6262 		    error);
   6263 		goto fail_3;
   6264 	}
   6265 
   6266 	return 0;
   6267 
   6268  fail_3:
   6269 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6270  fail_2:
   6271 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6272 	    WM_TXDESCS_SIZE(txq));
   6273  fail_1:
   6274 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6275  fail_0:
   6276 	return error;
   6277 }
   6278 
   6279 static void
   6280 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6281 {
   6282 
   6283 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6284 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6285 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6286 	    WM_TXDESCS_SIZE(txq));
   6287 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6288 }
   6289 
   6290 static int
   6291 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6292 {
   6293 	int error;
   6294 	size_t rxq_descs_size;
   6295 
   6296 	/*
   6297 	 * Allocate the control data structures, and create and load the
   6298 	 * DMA map for it.
   6299 	 *
   6300 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6301 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6302 	 * both sets within the same 4G segment.
   6303 	 */
   6304 	rxq->rxq_ndesc = WM_NRXDESC;
   6305 	if (sc->sc_type == WM_T_82574)
   6306 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6307 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6308 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6309 	else
   6310 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6311 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6312 
   6313 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6314 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6315 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6316 		aprint_error_dev(sc->sc_dev,
   6317 		    "unable to allocate RX control data, error = %d\n",
   6318 		    error);
   6319 		goto fail_0;
   6320 	}
   6321 
   6322 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6323 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6324 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6325 		aprint_error_dev(sc->sc_dev,
   6326 		    "unable to map RX control data, error = %d\n", error);
   6327 		goto fail_1;
   6328 	}
   6329 
   6330 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6331 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6332 		aprint_error_dev(sc->sc_dev,
   6333 		    "unable to create RX control data DMA map, error = %d\n",
   6334 		    error);
   6335 		goto fail_2;
   6336 	}
   6337 
   6338 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6339 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6340 		aprint_error_dev(sc->sc_dev,
   6341 		    "unable to load RX control data DMA map, error = %d\n",
   6342 		    error);
   6343 		goto fail_3;
   6344 	}
   6345 
   6346 	return 0;
   6347 
   6348  fail_3:
   6349 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6350  fail_2:
   6351 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6352 	    rxq_descs_size);
   6353  fail_1:
   6354 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6355  fail_0:
   6356 	return error;
   6357 }
   6358 
   6359 static void
   6360 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6361 {
   6362 
   6363 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6364 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6365 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6366 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6367 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6368 }
   6369 
   6370 
   6371 static int
   6372 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6373 {
   6374 	int i, error;
   6375 
   6376 	/* Create the transmit buffer DMA maps. */
   6377 	WM_TXQUEUELEN(txq) =
   6378 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6379 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6380 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6381 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6382 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6383 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6384 			aprint_error_dev(sc->sc_dev,
   6385 			    "unable to create Tx DMA map %d, error = %d\n",
   6386 			    i, error);
   6387 			goto fail;
   6388 		}
   6389 	}
   6390 
   6391 	return 0;
   6392 
   6393  fail:
   6394 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6395 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6396 			bus_dmamap_destroy(sc->sc_dmat,
   6397 			    txq->txq_soft[i].txs_dmamap);
   6398 	}
   6399 	return error;
   6400 }
   6401 
   6402 static void
   6403 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6404 {
   6405 	int i;
   6406 
   6407 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6408 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6409 			bus_dmamap_destroy(sc->sc_dmat,
   6410 			    txq->txq_soft[i].txs_dmamap);
   6411 	}
   6412 }
   6413 
   6414 static int
   6415 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6416 {
   6417 	int i, error;
   6418 
   6419 	/* Create the receive buffer DMA maps. */
   6420 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6421 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6422 			    MCLBYTES, 0, 0,
   6423 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6424 			aprint_error_dev(sc->sc_dev,
   6425 			    "unable to create Rx DMA map %d error = %d\n",
   6426 			    i, error);
   6427 			goto fail;
   6428 		}
   6429 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6430 	}
   6431 
   6432 	return 0;
   6433 
   6434  fail:
   6435 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6436 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6437 			bus_dmamap_destroy(sc->sc_dmat,
   6438 			    rxq->rxq_soft[i].rxs_dmamap);
   6439 	}
   6440 	return error;
   6441 }
   6442 
   6443 static void
   6444 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6445 {
   6446 	int i;
   6447 
   6448 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6449 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6450 			bus_dmamap_destroy(sc->sc_dmat,
   6451 			    rxq->rxq_soft[i].rxs_dmamap);
   6452 	}
   6453 }
   6454 
   6455 /*
   6456  * wm_alloc_quques:
   6457  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6458  */
   6459 static int
   6460 wm_alloc_txrx_queues(struct wm_softc *sc)
   6461 {
   6462 	int i, error, tx_done, rx_done;
   6463 
   6464 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6465 	    KM_SLEEP);
   6466 	if (sc->sc_queue == NULL) {
   6467 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6468 		error = ENOMEM;
   6469 		goto fail_0;
   6470 	}
   6471 
   6472 	/*
   6473 	 * For transmission
   6474 	 */
   6475 	error = 0;
   6476 	tx_done = 0;
   6477 	for (i = 0; i < sc->sc_nqueues; i++) {
   6478 #ifdef WM_EVENT_COUNTERS
   6479 		int j;
   6480 		const char *xname;
   6481 #endif
   6482 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6483 		txq->txq_sc = sc;
   6484 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6485 
   6486 		error = wm_alloc_tx_descs(sc, txq);
   6487 		if (error)
   6488 			break;
   6489 		error = wm_alloc_tx_buffer(sc, txq);
   6490 		if (error) {
   6491 			wm_free_tx_descs(sc, txq);
   6492 			break;
   6493 		}
   6494 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6495 		if (txq->txq_interq == NULL) {
   6496 			wm_free_tx_descs(sc, txq);
   6497 			wm_free_tx_buffer(sc, txq);
   6498 			error = ENOMEM;
   6499 			break;
   6500 		}
   6501 
   6502 #ifdef WM_EVENT_COUNTERS
   6503 		xname = device_xname(sc->sc_dev);
   6504 
   6505 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6506 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6507 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6508 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6509 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6510 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6511 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6512 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6513 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6514 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6515 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6516 
   6517 		for (j = 0; j < WM_NTXSEGS; j++) {
   6518 			snprintf(txq->txq_txseg_evcnt_names[j],
   6519 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6520 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6521 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6522 		}
   6523 
   6524 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6525 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6526 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6527 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6528 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6529 #endif /* WM_EVENT_COUNTERS */
   6530 
   6531 		tx_done++;
   6532 	}
   6533 	if (error)
   6534 		goto fail_1;
   6535 
   6536 	/*
   6537 	 * For recieve
   6538 	 */
   6539 	error = 0;
   6540 	rx_done = 0;
   6541 	for (i = 0; i < sc->sc_nqueues; i++) {
   6542 #ifdef WM_EVENT_COUNTERS
   6543 		const char *xname;
   6544 #endif
   6545 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6546 		rxq->rxq_sc = sc;
   6547 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6548 
   6549 		error = wm_alloc_rx_descs(sc, rxq);
   6550 		if (error)
   6551 			break;
   6552 
   6553 		error = wm_alloc_rx_buffer(sc, rxq);
   6554 		if (error) {
   6555 			wm_free_rx_descs(sc, rxq);
   6556 			break;
   6557 		}
   6558 
   6559 #ifdef WM_EVENT_COUNTERS
   6560 		xname = device_xname(sc->sc_dev);
   6561 
   6562 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6563 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6564 
   6565 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6566 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6567 #endif /* WM_EVENT_COUNTERS */
   6568 
   6569 		rx_done++;
   6570 	}
   6571 	if (error)
   6572 		goto fail_2;
   6573 
   6574 	return 0;
   6575 
   6576  fail_2:
   6577 	for (i = 0; i < rx_done; i++) {
   6578 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6579 		wm_free_rx_buffer(sc, rxq);
   6580 		wm_free_rx_descs(sc, rxq);
   6581 		if (rxq->rxq_lock)
   6582 			mutex_obj_free(rxq->rxq_lock);
   6583 	}
   6584  fail_1:
   6585 	for (i = 0; i < tx_done; i++) {
   6586 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6587 		pcq_destroy(txq->txq_interq);
   6588 		wm_free_tx_buffer(sc, txq);
   6589 		wm_free_tx_descs(sc, txq);
   6590 		if (txq->txq_lock)
   6591 			mutex_obj_free(txq->txq_lock);
   6592 	}
   6593 
   6594 	kmem_free(sc->sc_queue,
   6595 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6596  fail_0:
   6597 	return error;
   6598 }
   6599 
   6600 /*
   6601  * wm_free_quques:
   6602  *	Free {tx,rx}descs and {tx,rx} buffers
   6603  */
   6604 static void
   6605 wm_free_txrx_queues(struct wm_softc *sc)
   6606 {
   6607 	int i;
   6608 
   6609 	for (i = 0; i < sc->sc_nqueues; i++) {
   6610 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6611 
   6612 #ifdef WM_EVENT_COUNTERS
   6613 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6614 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6615 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6616 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6617 #endif /* WM_EVENT_COUNTERS */
   6618 
   6619 		wm_free_rx_buffer(sc, rxq);
   6620 		wm_free_rx_descs(sc, rxq);
   6621 		if (rxq->rxq_lock)
   6622 			mutex_obj_free(rxq->rxq_lock);
   6623 	}
   6624 
   6625 	for (i = 0; i < sc->sc_nqueues; i++) {
   6626 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6627 		struct mbuf *m;
   6628 #ifdef WM_EVENT_COUNTERS
   6629 		int j;
   6630 
   6631 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6632 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6633 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6634 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6635 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6636 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6637 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6638 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6639 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6640 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6641 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6642 
   6643 		for (j = 0; j < WM_NTXSEGS; j++)
   6644 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6645 
   6646 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6647 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6648 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6649 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6650 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6651 #endif /* WM_EVENT_COUNTERS */
   6652 
   6653 		/* drain txq_interq */
   6654 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6655 			m_freem(m);
   6656 		pcq_destroy(txq->txq_interq);
   6657 
   6658 		wm_free_tx_buffer(sc, txq);
   6659 		wm_free_tx_descs(sc, txq);
   6660 		if (txq->txq_lock)
   6661 			mutex_obj_free(txq->txq_lock);
   6662 	}
   6663 
   6664 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6665 }
   6666 
   6667 static void
   6668 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6669 {
   6670 
   6671 	KASSERT(mutex_owned(txq->txq_lock));
   6672 
   6673 	/* Initialize the transmit descriptor ring. */
   6674 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6675 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6676 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6677 	txq->txq_free = WM_NTXDESC(txq);
   6678 	txq->txq_next = 0;
   6679 }
   6680 
   6681 static void
   6682 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6683     struct wm_txqueue *txq)
   6684 {
   6685 
   6686 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6687 		device_xname(sc->sc_dev), __func__));
   6688 	KASSERT(mutex_owned(txq->txq_lock));
   6689 
   6690 	if (sc->sc_type < WM_T_82543) {
   6691 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6692 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6693 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6694 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6695 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6696 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6697 	} else {
   6698 		int qid = wmq->wmq_id;
   6699 
   6700 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6701 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6702 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6703 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6704 
   6705 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6706 			/*
   6707 			 * Don't write TDT before TCTL.EN is set.
   6708 			 * See the document.
   6709 			 */
   6710 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6711 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6712 			    | TXDCTL_WTHRESH(0));
   6713 		else {
   6714 			/* XXX should update with AIM? */
   6715 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6716 			if (sc->sc_type >= WM_T_82540) {
   6717 				/* should be same */
   6718 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6719 			}
   6720 
   6721 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6722 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6723 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6724 		}
   6725 	}
   6726 }
   6727 
   6728 static void
   6729 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6730 {
   6731 	int i;
   6732 
   6733 	KASSERT(mutex_owned(txq->txq_lock));
   6734 
   6735 	/* Initialize the transmit job descriptors. */
   6736 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6737 		txq->txq_soft[i].txs_mbuf = NULL;
   6738 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6739 	txq->txq_snext = 0;
   6740 	txq->txq_sdirty = 0;
   6741 }
   6742 
   6743 static void
   6744 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6745     struct wm_txqueue *txq)
   6746 {
   6747 
   6748 	KASSERT(mutex_owned(txq->txq_lock));
   6749 
   6750 	/*
   6751 	 * Set up some register offsets that are different between
   6752 	 * the i82542 and the i82543 and later chips.
   6753 	 */
   6754 	if (sc->sc_type < WM_T_82543)
   6755 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6756 	else
   6757 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6758 
   6759 	wm_init_tx_descs(sc, txq);
   6760 	wm_init_tx_regs(sc, wmq, txq);
   6761 	wm_init_tx_buffer(sc, txq);
   6762 
   6763 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   6764 	txq->txq_sending = false;
   6765 }
   6766 
   6767 static void
   6768 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6769     struct wm_rxqueue *rxq)
   6770 {
   6771 
   6772 	KASSERT(mutex_owned(rxq->rxq_lock));
   6773 
   6774 	/*
   6775 	 * Initialize the receive descriptor and receive job
   6776 	 * descriptor rings.
   6777 	 */
   6778 	if (sc->sc_type < WM_T_82543) {
   6779 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6780 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6781 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6782 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6783 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6784 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6785 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6786 
   6787 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6788 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6789 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6790 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6791 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6792 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6793 	} else {
   6794 		int qid = wmq->wmq_id;
   6795 
   6796 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6797 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6798 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   6799 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6800 
   6801 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6802 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6803 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6804 
   6805 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6806 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6807 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6808 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6809 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6810 			    | RXDCTL_WTHRESH(1));
   6811 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6812 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6813 		} else {
   6814 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6815 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6816 			/* XXX should update with AIM? */
   6817 			CSR_WRITE(sc, WMREG_RDTR,
   6818 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   6819 			/* MUST be same */
   6820 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6821 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6822 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6823 		}
   6824 	}
   6825 }
   6826 
   6827 static int
   6828 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6829 {
   6830 	struct wm_rxsoft *rxs;
   6831 	int error, i;
   6832 
   6833 	KASSERT(mutex_owned(rxq->rxq_lock));
   6834 
   6835 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6836 		rxs = &rxq->rxq_soft[i];
   6837 		if (rxs->rxs_mbuf == NULL) {
   6838 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6839 				log(LOG_ERR, "%s: unable to allocate or map "
   6840 				    "rx buffer %d, error = %d\n",
   6841 				    device_xname(sc->sc_dev), i, error);
   6842 				/*
   6843 				 * XXX Should attempt to run with fewer receive
   6844 				 * XXX buffers instead of just failing.
   6845 				 */
   6846 				wm_rxdrain(rxq);
   6847 				return ENOMEM;
   6848 			}
   6849 		} else {
   6850 			/*
   6851 			 * For 82575 and 82576, the RX descriptors must be
   6852 			 * initialized after the setting of RCTL.EN in
   6853 			 * wm_set_filter()
   6854 			 */
   6855 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6856 				wm_init_rxdesc(rxq, i);
   6857 		}
   6858 	}
   6859 	rxq->rxq_ptr = 0;
   6860 	rxq->rxq_discard = 0;
   6861 	WM_RXCHAIN_RESET(rxq);
   6862 
   6863 	return 0;
   6864 }
   6865 
   6866 static int
   6867 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6868     struct wm_rxqueue *rxq)
   6869 {
   6870 
   6871 	KASSERT(mutex_owned(rxq->rxq_lock));
   6872 
   6873 	/*
   6874 	 * Set up some register offsets that are different between
   6875 	 * the i82542 and the i82543 and later chips.
   6876 	 */
   6877 	if (sc->sc_type < WM_T_82543)
   6878 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6879 	else
   6880 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6881 
   6882 	wm_init_rx_regs(sc, wmq, rxq);
   6883 	return wm_init_rx_buffer(sc, rxq);
   6884 }
   6885 
   6886 /*
   6887  * wm_init_quques:
   6888  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6889  */
   6890 static int
   6891 wm_init_txrx_queues(struct wm_softc *sc)
   6892 {
   6893 	int i, error = 0;
   6894 
   6895 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6896 		device_xname(sc->sc_dev), __func__));
   6897 
   6898 	for (i = 0; i < sc->sc_nqueues; i++) {
   6899 		struct wm_queue *wmq = &sc->sc_queue[i];
   6900 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6901 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6902 
   6903 		/*
   6904 		 * TODO
   6905 		 * Currently, use constant variable instead of AIM.
   6906 		 * Furthermore, the interrupt interval of multiqueue which use
   6907 		 * polling mode is less than default value.
   6908 		 * More tuning and AIM are required.
   6909 		 */
   6910 		if (wm_is_using_multiqueue(sc))
   6911 			wmq->wmq_itr = 50;
   6912 		else
   6913 			wmq->wmq_itr = sc->sc_itr_init;
   6914 		wmq->wmq_set_itr = true;
   6915 
   6916 		mutex_enter(txq->txq_lock);
   6917 		wm_init_tx_queue(sc, wmq, txq);
   6918 		mutex_exit(txq->txq_lock);
   6919 
   6920 		mutex_enter(rxq->rxq_lock);
   6921 		error = wm_init_rx_queue(sc, wmq, rxq);
   6922 		mutex_exit(rxq->rxq_lock);
   6923 		if (error)
   6924 			break;
   6925 	}
   6926 
   6927 	return error;
   6928 }
   6929 
   6930 /*
   6931  * wm_tx_offload:
   6932  *
   6933  *	Set up TCP/IP checksumming parameters for the
   6934  *	specified packet.
   6935  */
   6936 static int
   6937 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6938     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6939 {
   6940 	struct mbuf *m0 = txs->txs_mbuf;
   6941 	struct livengood_tcpip_ctxdesc *t;
   6942 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6943 	uint32_t ipcse;
   6944 	struct ether_header *eh;
   6945 	int offset, iphl;
   6946 	uint8_t fields;
   6947 
   6948 	/*
   6949 	 * XXX It would be nice if the mbuf pkthdr had offset
   6950 	 * fields for the protocol headers.
   6951 	 */
   6952 
   6953 	eh = mtod(m0, struct ether_header *);
   6954 	switch (htons(eh->ether_type)) {
   6955 	case ETHERTYPE_IP:
   6956 	case ETHERTYPE_IPV6:
   6957 		offset = ETHER_HDR_LEN;
   6958 		break;
   6959 
   6960 	case ETHERTYPE_VLAN:
   6961 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6962 		break;
   6963 
   6964 	default:
   6965 		/*
   6966 		 * Don't support this protocol or encapsulation.
   6967 		 */
   6968 		*fieldsp = 0;
   6969 		*cmdp = 0;
   6970 		return 0;
   6971 	}
   6972 
   6973 	if ((m0->m_pkthdr.csum_flags &
   6974 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6975 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6976 	} else
   6977 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   6978 
   6979 	ipcse = offset + iphl - 1;
   6980 
   6981 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6982 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6983 	seg = 0;
   6984 	fields = 0;
   6985 
   6986 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6987 		int hlen = offset + iphl;
   6988 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6989 
   6990 		if (__predict_false(m0->m_len <
   6991 				    (hlen + sizeof(struct tcphdr)))) {
   6992 			/*
   6993 			 * TCP/IP headers are not in the first mbuf; we need
   6994 			 * to do this the slow and painful way. Let's just
   6995 			 * hope this doesn't happen very often.
   6996 			 */
   6997 			struct tcphdr th;
   6998 
   6999 			WM_Q_EVCNT_INCR(txq, tsopain);
   7000 
   7001 			m_copydata(m0, hlen, sizeof(th), &th);
   7002 			if (v4) {
   7003 				struct ip ip;
   7004 
   7005 				m_copydata(m0, offset, sizeof(ip), &ip);
   7006 				ip.ip_len = 0;
   7007 				m_copyback(m0,
   7008 				    offset + offsetof(struct ip, ip_len),
   7009 				    sizeof(ip.ip_len), &ip.ip_len);
   7010 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7011 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7012 			} else {
   7013 				struct ip6_hdr ip6;
   7014 
   7015 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7016 				ip6.ip6_plen = 0;
   7017 				m_copyback(m0,
   7018 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7019 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7020 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7021 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7022 			}
   7023 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7024 			    sizeof(th.th_sum), &th.th_sum);
   7025 
   7026 			hlen += th.th_off << 2;
   7027 		} else {
   7028 			/*
   7029 			 * TCP/IP headers are in the first mbuf; we can do
   7030 			 * this the easy way.
   7031 			 */
   7032 			struct tcphdr *th;
   7033 
   7034 			if (v4) {
   7035 				struct ip *ip =
   7036 				    (void *)(mtod(m0, char *) + offset);
   7037 				th = (void *)(mtod(m0, char *) + hlen);
   7038 
   7039 				ip->ip_len = 0;
   7040 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7041 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7042 			} else {
   7043 				struct ip6_hdr *ip6 =
   7044 				    (void *)(mtod(m0, char *) + offset);
   7045 				th = (void *)(mtod(m0, char *) + hlen);
   7046 
   7047 				ip6->ip6_plen = 0;
   7048 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7049 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7050 			}
   7051 			hlen += th->th_off << 2;
   7052 		}
   7053 
   7054 		if (v4) {
   7055 			WM_Q_EVCNT_INCR(txq, tso);
   7056 			cmdlen |= WTX_TCPIP_CMD_IP;
   7057 		} else {
   7058 			WM_Q_EVCNT_INCR(txq, tso6);
   7059 			ipcse = 0;
   7060 		}
   7061 		cmd |= WTX_TCPIP_CMD_TSE;
   7062 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7063 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7064 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7065 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7066 	}
   7067 
   7068 	/*
   7069 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7070 	 * offload feature, if we load the context descriptor, we
   7071 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7072 	 */
   7073 
   7074 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7075 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7076 	    WTX_TCPIP_IPCSE(ipcse);
   7077 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7078 		WM_Q_EVCNT_INCR(txq, ipsum);
   7079 		fields |= WTX_IXSM;
   7080 	}
   7081 
   7082 	offset += iphl;
   7083 
   7084 	if (m0->m_pkthdr.csum_flags &
   7085 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7086 		WM_Q_EVCNT_INCR(txq, tusum);
   7087 		fields |= WTX_TXSM;
   7088 		tucs = WTX_TCPIP_TUCSS(offset) |
   7089 		    WTX_TCPIP_TUCSO(offset +
   7090 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7091 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7092 	} else if ((m0->m_pkthdr.csum_flags &
   7093 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7094 		WM_Q_EVCNT_INCR(txq, tusum6);
   7095 		fields |= WTX_TXSM;
   7096 		tucs = WTX_TCPIP_TUCSS(offset) |
   7097 		    WTX_TCPIP_TUCSO(offset +
   7098 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7099 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7100 	} else {
   7101 		/* Just initialize it to a valid TCP context. */
   7102 		tucs = WTX_TCPIP_TUCSS(offset) |
   7103 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7104 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7105 	}
   7106 
   7107 	/*
   7108 	 * We don't have to write context descriptor for every packet
   7109 	 * except for 82574. For 82574, we must write context descriptor
   7110 	 * for every packet when we use two descriptor queues.
   7111 	 * It would be overhead to write context descriptor for every packet,
   7112 	 * however it does not cause problems.
   7113 	 */
   7114 	/* Fill in the context descriptor. */
   7115 	t = (struct livengood_tcpip_ctxdesc *)
   7116 	    &txq->txq_descs[txq->txq_next];
   7117 	t->tcpip_ipcs = htole32(ipcs);
   7118 	t->tcpip_tucs = htole32(tucs);
   7119 	t->tcpip_cmdlen = htole32(cmdlen);
   7120 	t->tcpip_seg = htole32(seg);
   7121 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7122 
   7123 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7124 	txs->txs_ndesc++;
   7125 
   7126 	*cmdp = cmd;
   7127 	*fieldsp = fields;
   7128 
   7129 	return 0;
   7130 }
   7131 
   7132 static inline int
   7133 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7134 {
   7135 	struct wm_softc *sc = ifp->if_softc;
   7136 	u_int cpuid = cpu_index(curcpu());
   7137 
   7138 	/*
   7139 	 * Currently, simple distribute strategy.
   7140 	 * TODO:
   7141 	 * distribute by flowid(RSS has value).
   7142 	 */
   7143 	return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   7144 }
   7145 
   7146 /*
   7147  * wm_start:		[ifnet interface function]
   7148  *
   7149  *	Start packet transmission on the interface.
   7150  */
   7151 static void
   7152 wm_start(struct ifnet *ifp)
   7153 {
   7154 	struct wm_softc *sc = ifp->if_softc;
   7155 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7156 
   7157 #ifdef WM_MPSAFE
   7158 	KASSERT(if_is_mpsafe(ifp));
   7159 #endif
   7160 	/*
   7161 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7162 	 */
   7163 
   7164 	mutex_enter(txq->txq_lock);
   7165 	if (!txq->txq_stopping)
   7166 		wm_start_locked(ifp);
   7167 	mutex_exit(txq->txq_lock);
   7168 }
   7169 
   7170 static void
   7171 wm_start_locked(struct ifnet *ifp)
   7172 {
   7173 	struct wm_softc *sc = ifp->if_softc;
   7174 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7175 
   7176 	wm_send_common_locked(ifp, txq, false);
   7177 }
   7178 
   7179 static int
   7180 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7181 {
   7182 	int qid;
   7183 	struct wm_softc *sc = ifp->if_softc;
   7184 	struct wm_txqueue *txq;
   7185 
   7186 	qid = wm_select_txqueue(ifp, m);
   7187 	txq = &sc->sc_queue[qid].wmq_txq;
   7188 
   7189 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7190 		m_freem(m);
   7191 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7192 		return ENOBUFS;
   7193 	}
   7194 
   7195 	/*
   7196 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7197 	 */
   7198 	ifp->if_obytes += m->m_pkthdr.len;
   7199 	if (m->m_flags & M_MCAST)
   7200 		ifp->if_omcasts++;
   7201 
   7202 	if (mutex_tryenter(txq->txq_lock)) {
   7203 		if (!txq->txq_stopping)
   7204 			wm_transmit_locked(ifp, txq);
   7205 		mutex_exit(txq->txq_lock);
   7206 	}
   7207 
   7208 	return 0;
   7209 }
   7210 
   7211 static void
   7212 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7213 {
   7214 
   7215 	wm_send_common_locked(ifp, txq, true);
   7216 }
   7217 
   7218 static void
   7219 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7220     bool is_transmit)
   7221 {
   7222 	struct wm_softc *sc = ifp->if_softc;
   7223 	struct mbuf *m0;
   7224 	struct wm_txsoft *txs;
   7225 	bus_dmamap_t dmamap;
   7226 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7227 	bus_addr_t curaddr;
   7228 	bus_size_t seglen, curlen;
   7229 	uint32_t cksumcmd;
   7230 	uint8_t cksumfields;
   7231 	bool remap = true;
   7232 
   7233 	KASSERT(mutex_owned(txq->txq_lock));
   7234 
   7235 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7236 		return;
   7237 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7238 		return;
   7239 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7240 		return;
   7241 
   7242 	/* Remember the previous number of free descriptors. */
   7243 	ofree = txq->txq_free;
   7244 
   7245 	/*
   7246 	 * Loop through the send queue, setting up transmit descriptors
   7247 	 * until we drain the queue, or use up all available transmit
   7248 	 * descriptors.
   7249 	 */
   7250 	for (;;) {
   7251 		m0 = NULL;
   7252 
   7253 		/* Get a work queue entry. */
   7254 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7255 			wm_txeof(txq, UINT_MAX);
   7256 			if (txq->txq_sfree == 0) {
   7257 				DPRINTF(WM_DEBUG_TX,
   7258 				    ("%s: TX: no free job descriptors\n",
   7259 					device_xname(sc->sc_dev)));
   7260 				WM_Q_EVCNT_INCR(txq, txsstall);
   7261 				break;
   7262 			}
   7263 		}
   7264 
   7265 		/* Grab a packet off the queue. */
   7266 		if (is_transmit)
   7267 			m0 = pcq_get(txq->txq_interq);
   7268 		else
   7269 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7270 		if (m0 == NULL)
   7271 			break;
   7272 
   7273 		DPRINTF(WM_DEBUG_TX,
   7274 		    ("%s: TX: have packet to transmit: %p\n",
   7275 			device_xname(sc->sc_dev), m0));
   7276 
   7277 		txs = &txq->txq_soft[txq->txq_snext];
   7278 		dmamap = txs->txs_dmamap;
   7279 
   7280 		use_tso = (m0->m_pkthdr.csum_flags &
   7281 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7282 
   7283 		/*
   7284 		 * So says the Linux driver:
   7285 		 * The controller does a simple calculation to make sure
   7286 		 * there is enough room in the FIFO before initiating the
   7287 		 * DMA for each buffer. The calc is:
   7288 		 *	4 = ceil(buffer len / MSS)
   7289 		 * To make sure we don't overrun the FIFO, adjust the max
   7290 		 * buffer len if the MSS drops.
   7291 		 */
   7292 		dmamap->dm_maxsegsz =
   7293 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7294 		    ? m0->m_pkthdr.segsz << 2
   7295 		    : WTX_MAX_LEN;
   7296 
   7297 		/*
   7298 		 * Load the DMA map.  If this fails, the packet either
   7299 		 * didn't fit in the allotted number of segments, or we
   7300 		 * were short on resources.  For the too-many-segments
   7301 		 * case, we simply report an error and drop the packet,
   7302 		 * since we can't sanely copy a jumbo packet to a single
   7303 		 * buffer.
   7304 		 */
   7305 retry:
   7306 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7307 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7308 		if (__predict_false(error)) {
   7309 			if (error == EFBIG) {
   7310 				if (remap == true) {
   7311 					struct mbuf *m;
   7312 
   7313 					remap = false;
   7314 					m = m_defrag(m0, M_NOWAIT);
   7315 					if (m != NULL) {
   7316 						WM_Q_EVCNT_INCR(txq, defrag);
   7317 						m0 = m;
   7318 						goto retry;
   7319 					}
   7320 				}
   7321 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7322 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7323 				    "DMA segments, dropping...\n",
   7324 				    device_xname(sc->sc_dev));
   7325 				wm_dump_mbuf_chain(sc, m0);
   7326 				m_freem(m0);
   7327 				continue;
   7328 			}
   7329 			/*  Short on resources, just stop for now. */
   7330 			DPRINTF(WM_DEBUG_TX,
   7331 			    ("%s: TX: dmamap load failed: %d\n",
   7332 				device_xname(sc->sc_dev), error));
   7333 			break;
   7334 		}
   7335 
   7336 		segs_needed = dmamap->dm_nsegs;
   7337 		if (use_tso) {
   7338 			/* For sentinel descriptor; see below. */
   7339 			segs_needed++;
   7340 		}
   7341 
   7342 		/*
   7343 		 * Ensure we have enough descriptors free to describe
   7344 		 * the packet. Note, we always reserve one descriptor
   7345 		 * at the end of the ring due to the semantics of the
   7346 		 * TDT register, plus one more in the event we need
   7347 		 * to load offload context.
   7348 		 */
   7349 		if (segs_needed > txq->txq_free - 2) {
   7350 			/*
   7351 			 * Not enough free descriptors to transmit this
   7352 			 * packet.  We haven't committed anything yet,
   7353 			 * so just unload the DMA map, put the packet
   7354 			 * pack on the queue, and punt. Notify the upper
   7355 			 * layer that there are no more slots left.
   7356 			 */
   7357 			DPRINTF(WM_DEBUG_TX,
   7358 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7359 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7360 				segs_needed, txq->txq_free - 1));
   7361 			if (!is_transmit)
   7362 				ifp->if_flags |= IFF_OACTIVE;
   7363 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7364 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7365 			WM_Q_EVCNT_INCR(txq, txdstall);
   7366 			break;
   7367 		}
   7368 
   7369 		/*
   7370 		 * Check for 82547 Tx FIFO bug. We need to do this
   7371 		 * once we know we can transmit the packet, since we
   7372 		 * do some internal FIFO space accounting here.
   7373 		 */
   7374 		if (sc->sc_type == WM_T_82547 &&
   7375 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7376 			DPRINTF(WM_DEBUG_TX,
   7377 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7378 				device_xname(sc->sc_dev)));
   7379 			if (!is_transmit)
   7380 				ifp->if_flags |= IFF_OACTIVE;
   7381 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7382 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7383 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7384 			break;
   7385 		}
   7386 
   7387 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7388 
   7389 		DPRINTF(WM_DEBUG_TX,
   7390 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7391 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7392 
   7393 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7394 
   7395 		/*
   7396 		 * Store a pointer to the packet so that we can free it
   7397 		 * later.
   7398 		 *
   7399 		 * Initially, we consider the number of descriptors the
   7400 		 * packet uses the number of DMA segments.  This may be
   7401 		 * incremented by 1 if we do checksum offload (a descriptor
   7402 		 * is used to set the checksum context).
   7403 		 */
   7404 		txs->txs_mbuf = m0;
   7405 		txs->txs_firstdesc = txq->txq_next;
   7406 		txs->txs_ndesc = segs_needed;
   7407 
   7408 		/* Set up offload parameters for this packet. */
   7409 		if (m0->m_pkthdr.csum_flags &
   7410 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7411 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7412 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7413 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7414 					  &cksumfields) != 0) {
   7415 				/* Error message already displayed. */
   7416 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7417 				continue;
   7418 			}
   7419 		} else {
   7420 			cksumcmd = 0;
   7421 			cksumfields = 0;
   7422 		}
   7423 
   7424 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7425 
   7426 		/* Sync the DMA map. */
   7427 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7428 		    BUS_DMASYNC_PREWRITE);
   7429 
   7430 		/* Initialize the transmit descriptor. */
   7431 		for (nexttx = txq->txq_next, seg = 0;
   7432 		     seg < dmamap->dm_nsegs; seg++) {
   7433 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7434 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7435 			     seglen != 0;
   7436 			     curaddr += curlen, seglen -= curlen,
   7437 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7438 				curlen = seglen;
   7439 
   7440 				/*
   7441 				 * So says the Linux driver:
   7442 				 * Work around for premature descriptor
   7443 				 * write-backs in TSO mode.  Append a
   7444 				 * 4-byte sentinel descriptor.
   7445 				 */
   7446 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7447 				    curlen > 8)
   7448 					curlen -= 4;
   7449 
   7450 				wm_set_dma_addr(
   7451 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7452 				txq->txq_descs[nexttx].wtx_cmdlen
   7453 				    = htole32(cksumcmd | curlen);
   7454 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7455 				    = 0;
   7456 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7457 				    = cksumfields;
   7458 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7459 				lasttx = nexttx;
   7460 
   7461 				DPRINTF(WM_DEBUG_TX,
   7462 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7463 					"len %#04zx\n",
   7464 					device_xname(sc->sc_dev), nexttx,
   7465 					(uint64_t)curaddr, curlen));
   7466 			}
   7467 		}
   7468 
   7469 		KASSERT(lasttx != -1);
   7470 
   7471 		/*
   7472 		 * Set up the command byte on the last descriptor of
   7473 		 * the packet. If we're in the interrupt delay window,
   7474 		 * delay the interrupt.
   7475 		 */
   7476 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7477 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7478 
   7479 		/*
   7480 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7481 		 * up the descriptor to encapsulate the packet for us.
   7482 		 *
   7483 		 * This is only valid on the last descriptor of the packet.
   7484 		 */
   7485 		if (vlan_has_tag(m0)) {
   7486 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7487 			    htole32(WTX_CMD_VLE);
   7488 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7489 			    = htole16(vlan_get_tag(m0));
   7490 		}
   7491 
   7492 		txs->txs_lastdesc = lasttx;
   7493 
   7494 		DPRINTF(WM_DEBUG_TX,
   7495 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7496 			device_xname(sc->sc_dev),
   7497 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7498 
   7499 		/* Sync the descriptors we're using. */
   7500 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7501 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7502 
   7503 		/* Give the packet to the chip. */
   7504 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7505 
   7506 		DPRINTF(WM_DEBUG_TX,
   7507 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7508 
   7509 		DPRINTF(WM_DEBUG_TX,
   7510 		    ("%s: TX: finished transmitting packet, job %d\n",
   7511 			device_xname(sc->sc_dev), txq->txq_snext));
   7512 
   7513 		/* Advance the tx pointer. */
   7514 		txq->txq_free -= txs->txs_ndesc;
   7515 		txq->txq_next = nexttx;
   7516 
   7517 		txq->txq_sfree--;
   7518 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7519 
   7520 		/* Pass the packet to any BPF listeners. */
   7521 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7522 	}
   7523 
   7524 	if (m0 != NULL) {
   7525 		if (!is_transmit)
   7526 			ifp->if_flags |= IFF_OACTIVE;
   7527 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7528 		WM_Q_EVCNT_INCR(txq, descdrop);
   7529 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7530 			__func__));
   7531 		m_freem(m0);
   7532 	}
   7533 
   7534 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7535 		/* No more slots; notify upper layer. */
   7536 		if (!is_transmit)
   7537 			ifp->if_flags |= IFF_OACTIVE;
   7538 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7539 	}
   7540 
   7541 	if (txq->txq_free != ofree) {
   7542 		/* Set a watchdog timer in case the chip flakes out. */
   7543 		txq->txq_lastsent = time_uptime;
   7544 		txq->txq_sending = true;
   7545 	}
   7546 }
   7547 
   7548 /*
   7549  * wm_nq_tx_offload:
   7550  *
   7551  *	Set up TCP/IP checksumming parameters for the
   7552  *	specified packet, for NEWQUEUE devices
   7553  */
   7554 static int
   7555 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7556     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7557 {
   7558 	struct mbuf *m0 = txs->txs_mbuf;
   7559 	uint32_t vl_len, mssidx, cmdc;
   7560 	struct ether_header *eh;
   7561 	int offset, iphl;
   7562 
   7563 	/*
   7564 	 * XXX It would be nice if the mbuf pkthdr had offset
   7565 	 * fields for the protocol headers.
   7566 	 */
   7567 	*cmdlenp = 0;
   7568 	*fieldsp = 0;
   7569 
   7570 	eh = mtod(m0, struct ether_header *);
   7571 	switch (htons(eh->ether_type)) {
   7572 	case ETHERTYPE_IP:
   7573 	case ETHERTYPE_IPV6:
   7574 		offset = ETHER_HDR_LEN;
   7575 		break;
   7576 
   7577 	case ETHERTYPE_VLAN:
   7578 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7579 		break;
   7580 
   7581 	default:
   7582 		/* Don't support this protocol or encapsulation. */
   7583 		*do_csum = false;
   7584 		return 0;
   7585 	}
   7586 	*do_csum = true;
   7587 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7588 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7589 
   7590 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7591 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7592 
   7593 	if ((m0->m_pkthdr.csum_flags &
   7594 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7595 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7596 	} else {
   7597 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7598 	}
   7599 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7600 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7601 
   7602 	if (vlan_has_tag(m0)) {
   7603 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7604 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7605 		*cmdlenp |= NQTX_CMD_VLE;
   7606 	}
   7607 
   7608 	mssidx = 0;
   7609 
   7610 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7611 		int hlen = offset + iphl;
   7612 		int tcp_hlen;
   7613 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7614 
   7615 		if (__predict_false(m0->m_len <
   7616 				    (hlen + sizeof(struct tcphdr)))) {
   7617 			/*
   7618 			 * TCP/IP headers are not in the first mbuf; we need
   7619 			 * to do this the slow and painful way. Let's just
   7620 			 * hope this doesn't happen very often.
   7621 			 */
   7622 			struct tcphdr th;
   7623 
   7624 			WM_Q_EVCNT_INCR(txq, tsopain);
   7625 
   7626 			m_copydata(m0, hlen, sizeof(th), &th);
   7627 			if (v4) {
   7628 				struct ip ip;
   7629 
   7630 				m_copydata(m0, offset, sizeof(ip), &ip);
   7631 				ip.ip_len = 0;
   7632 				m_copyback(m0,
   7633 				    offset + offsetof(struct ip, ip_len),
   7634 				    sizeof(ip.ip_len), &ip.ip_len);
   7635 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7636 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7637 			} else {
   7638 				struct ip6_hdr ip6;
   7639 
   7640 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7641 				ip6.ip6_plen = 0;
   7642 				m_copyback(m0,
   7643 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7644 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7645 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7646 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7647 			}
   7648 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7649 			    sizeof(th.th_sum), &th.th_sum);
   7650 
   7651 			tcp_hlen = th.th_off << 2;
   7652 		} else {
   7653 			/*
   7654 			 * TCP/IP headers are in the first mbuf; we can do
   7655 			 * this the easy way.
   7656 			 */
   7657 			struct tcphdr *th;
   7658 
   7659 			if (v4) {
   7660 				struct ip *ip =
   7661 				    (void *)(mtod(m0, char *) + offset);
   7662 				th = (void *)(mtod(m0, char *) + hlen);
   7663 
   7664 				ip->ip_len = 0;
   7665 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7666 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7667 			} else {
   7668 				struct ip6_hdr *ip6 =
   7669 				    (void *)(mtod(m0, char *) + offset);
   7670 				th = (void *)(mtod(m0, char *) + hlen);
   7671 
   7672 				ip6->ip6_plen = 0;
   7673 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7674 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7675 			}
   7676 			tcp_hlen = th->th_off << 2;
   7677 		}
   7678 		hlen += tcp_hlen;
   7679 		*cmdlenp |= NQTX_CMD_TSE;
   7680 
   7681 		if (v4) {
   7682 			WM_Q_EVCNT_INCR(txq, tso);
   7683 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7684 		} else {
   7685 			WM_Q_EVCNT_INCR(txq, tso6);
   7686 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7687 		}
   7688 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7689 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7690 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7691 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7692 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7693 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7694 	} else {
   7695 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7696 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7697 	}
   7698 
   7699 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7700 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7701 		cmdc |= NQTXC_CMD_IP4;
   7702 	}
   7703 
   7704 	if (m0->m_pkthdr.csum_flags &
   7705 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7706 		WM_Q_EVCNT_INCR(txq, tusum);
   7707 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   7708 			cmdc |= NQTXC_CMD_TCP;
   7709 		else
   7710 			cmdc |= NQTXC_CMD_UDP;
   7711 
   7712 		cmdc |= NQTXC_CMD_IP4;
   7713 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7714 	}
   7715 	if (m0->m_pkthdr.csum_flags &
   7716 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7717 		WM_Q_EVCNT_INCR(txq, tusum6);
   7718 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   7719 			cmdc |= NQTXC_CMD_TCP;
   7720 		else
   7721 			cmdc |= NQTXC_CMD_UDP;
   7722 
   7723 		cmdc |= NQTXC_CMD_IP6;
   7724 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7725 	}
   7726 
   7727 	/*
   7728 	 * We don't have to write context descriptor for every packet to
   7729 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7730 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7731 	 * controllers.
   7732 	 * It would be overhead to write context descriptor for every packet,
   7733 	 * however it does not cause problems.
   7734 	 */
   7735 	/* Fill in the context descriptor. */
   7736 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7737 	    htole32(vl_len);
   7738 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7739 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7740 	    htole32(cmdc);
   7741 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7742 	    htole32(mssidx);
   7743 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7744 	DPRINTF(WM_DEBUG_TX,
   7745 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7746 		txq->txq_next, 0, vl_len));
   7747 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7748 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7749 	txs->txs_ndesc++;
   7750 	return 0;
   7751 }
   7752 
   7753 /*
   7754  * wm_nq_start:		[ifnet interface function]
   7755  *
   7756  *	Start packet transmission on the interface for NEWQUEUE devices
   7757  */
   7758 static void
   7759 wm_nq_start(struct ifnet *ifp)
   7760 {
   7761 	struct wm_softc *sc = ifp->if_softc;
   7762 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7763 
   7764 #ifdef WM_MPSAFE
   7765 	KASSERT(if_is_mpsafe(ifp));
   7766 #endif
   7767 	/*
   7768 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7769 	 */
   7770 
   7771 	mutex_enter(txq->txq_lock);
   7772 	if (!txq->txq_stopping)
   7773 		wm_nq_start_locked(ifp);
   7774 	mutex_exit(txq->txq_lock);
   7775 }
   7776 
   7777 static void
   7778 wm_nq_start_locked(struct ifnet *ifp)
   7779 {
   7780 	struct wm_softc *sc = ifp->if_softc;
   7781 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7782 
   7783 	wm_nq_send_common_locked(ifp, txq, false);
   7784 }
   7785 
   7786 static int
   7787 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7788 {
   7789 	int qid;
   7790 	struct wm_softc *sc = ifp->if_softc;
   7791 	struct wm_txqueue *txq;
   7792 
   7793 	qid = wm_select_txqueue(ifp, m);
   7794 	txq = &sc->sc_queue[qid].wmq_txq;
   7795 
   7796 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7797 		m_freem(m);
   7798 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7799 		return ENOBUFS;
   7800 	}
   7801 
   7802 	/*
   7803 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7804 	 */
   7805 	ifp->if_obytes += m->m_pkthdr.len;
   7806 	if (m->m_flags & M_MCAST)
   7807 		ifp->if_omcasts++;
   7808 
   7809 	/*
   7810 	 * The situations which this mutex_tryenter() fails at running time
   7811 	 * are below two patterns.
   7812 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7813 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7814 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7815 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7816 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   7817 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   7818 	 * stuck, either.
   7819 	 */
   7820 	if (mutex_tryenter(txq->txq_lock)) {
   7821 		if (!txq->txq_stopping)
   7822 			wm_nq_transmit_locked(ifp, txq);
   7823 		mutex_exit(txq->txq_lock);
   7824 	}
   7825 
   7826 	return 0;
   7827 }
   7828 
   7829 static void
   7830 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7831 {
   7832 
   7833 	wm_nq_send_common_locked(ifp, txq, true);
   7834 }
   7835 
   7836 static void
   7837 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7838     bool is_transmit)
   7839 {
   7840 	struct wm_softc *sc = ifp->if_softc;
   7841 	struct mbuf *m0;
   7842 	struct wm_txsoft *txs;
   7843 	bus_dmamap_t dmamap;
   7844 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7845 	bool do_csum, sent;
   7846 	bool remap = true;
   7847 
   7848 	KASSERT(mutex_owned(txq->txq_lock));
   7849 
   7850 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7851 		return;
   7852 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7853 		return;
   7854 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7855 		return;
   7856 
   7857 	sent = false;
   7858 
   7859 	/*
   7860 	 * Loop through the send queue, setting up transmit descriptors
   7861 	 * until we drain the queue, or use up all available transmit
   7862 	 * descriptors.
   7863 	 */
   7864 	for (;;) {
   7865 		m0 = NULL;
   7866 
   7867 		/* Get a work queue entry. */
   7868 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7869 			wm_txeof(txq, UINT_MAX);
   7870 			if (txq->txq_sfree == 0) {
   7871 				DPRINTF(WM_DEBUG_TX,
   7872 				    ("%s: TX: no free job descriptors\n",
   7873 					device_xname(sc->sc_dev)));
   7874 				WM_Q_EVCNT_INCR(txq, txsstall);
   7875 				break;
   7876 			}
   7877 		}
   7878 
   7879 		/* Grab a packet off the queue. */
   7880 		if (is_transmit)
   7881 			m0 = pcq_get(txq->txq_interq);
   7882 		else
   7883 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7884 		if (m0 == NULL)
   7885 			break;
   7886 
   7887 		DPRINTF(WM_DEBUG_TX,
   7888 		    ("%s: TX: have packet to transmit: %p\n",
   7889 		    device_xname(sc->sc_dev), m0));
   7890 
   7891 		txs = &txq->txq_soft[txq->txq_snext];
   7892 		dmamap = txs->txs_dmamap;
   7893 
   7894 		/*
   7895 		 * Load the DMA map.  If this fails, the packet either
   7896 		 * didn't fit in the allotted number of segments, or we
   7897 		 * were short on resources.  For the too-many-segments
   7898 		 * case, we simply report an error and drop the packet,
   7899 		 * since we can't sanely copy a jumbo packet to a single
   7900 		 * buffer.
   7901 		 */
   7902 retry:
   7903 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7904 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7905 		if (__predict_false(error)) {
   7906 			if (error == EFBIG) {
   7907 				if (remap == true) {
   7908 					struct mbuf *m;
   7909 
   7910 					remap = false;
   7911 					m = m_defrag(m0, M_NOWAIT);
   7912 					if (m != NULL) {
   7913 						WM_Q_EVCNT_INCR(txq, defrag);
   7914 						m0 = m;
   7915 						goto retry;
   7916 					}
   7917 				}
   7918 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7919 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7920 				    "DMA segments, dropping...\n",
   7921 				    device_xname(sc->sc_dev));
   7922 				wm_dump_mbuf_chain(sc, m0);
   7923 				m_freem(m0);
   7924 				continue;
   7925 			}
   7926 			/* Short on resources, just stop for now. */
   7927 			DPRINTF(WM_DEBUG_TX,
   7928 			    ("%s: TX: dmamap load failed: %d\n",
   7929 				device_xname(sc->sc_dev), error));
   7930 			break;
   7931 		}
   7932 
   7933 		segs_needed = dmamap->dm_nsegs;
   7934 
   7935 		/*
   7936 		 * Ensure we have enough descriptors free to describe
   7937 		 * the packet. Note, we always reserve one descriptor
   7938 		 * at the end of the ring due to the semantics of the
   7939 		 * TDT register, plus one more in the event we need
   7940 		 * to load offload context.
   7941 		 */
   7942 		if (segs_needed > txq->txq_free - 2) {
   7943 			/*
   7944 			 * Not enough free descriptors to transmit this
   7945 			 * packet.  We haven't committed anything yet,
   7946 			 * so just unload the DMA map, put the packet
   7947 			 * pack on the queue, and punt. Notify the upper
   7948 			 * layer that there are no more slots left.
   7949 			 */
   7950 			DPRINTF(WM_DEBUG_TX,
   7951 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7952 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7953 				segs_needed, txq->txq_free - 1));
   7954 			if (!is_transmit)
   7955 				ifp->if_flags |= IFF_OACTIVE;
   7956 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7957 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7958 			WM_Q_EVCNT_INCR(txq, txdstall);
   7959 			break;
   7960 		}
   7961 
   7962 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7963 
   7964 		DPRINTF(WM_DEBUG_TX,
   7965 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7966 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7967 
   7968 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7969 
   7970 		/*
   7971 		 * Store a pointer to the packet so that we can free it
   7972 		 * later.
   7973 		 *
   7974 		 * Initially, we consider the number of descriptors the
   7975 		 * packet uses the number of DMA segments.  This may be
   7976 		 * incremented by 1 if we do checksum offload (a descriptor
   7977 		 * is used to set the checksum context).
   7978 		 */
   7979 		txs->txs_mbuf = m0;
   7980 		txs->txs_firstdesc = txq->txq_next;
   7981 		txs->txs_ndesc = segs_needed;
   7982 
   7983 		/* Set up offload parameters for this packet. */
   7984 		uint32_t cmdlen, fields, dcmdlen;
   7985 		if (m0->m_pkthdr.csum_flags &
   7986 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7987 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7988 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7989 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7990 			    &do_csum) != 0) {
   7991 				/* Error message already displayed. */
   7992 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7993 				continue;
   7994 			}
   7995 		} else {
   7996 			do_csum = false;
   7997 			cmdlen = 0;
   7998 			fields = 0;
   7999 		}
   8000 
   8001 		/* Sync the DMA map. */
   8002 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8003 		    BUS_DMASYNC_PREWRITE);
   8004 
   8005 		/* Initialize the first transmit descriptor. */
   8006 		nexttx = txq->txq_next;
   8007 		if (!do_csum) {
   8008 			/* setup a legacy descriptor */
   8009 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8010 			    dmamap->dm_segs[0].ds_addr);
   8011 			txq->txq_descs[nexttx].wtx_cmdlen =
   8012 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8013 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8014 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8015 			if (vlan_has_tag(m0)) {
   8016 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8017 				    htole32(WTX_CMD_VLE);
   8018 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8019 				    htole16(vlan_get_tag(m0));
   8020 			} else
   8021 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8022 
   8023 			dcmdlen = 0;
   8024 		} else {
   8025 			/* setup an advanced data descriptor */
   8026 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8027 			    htole64(dmamap->dm_segs[0].ds_addr);
   8028 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8029 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8030 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   8031 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8032 			    htole32(fields);
   8033 			DPRINTF(WM_DEBUG_TX,
   8034 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8035 				device_xname(sc->sc_dev), nexttx,
   8036 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8037 			DPRINTF(WM_DEBUG_TX,
   8038 			    ("\t 0x%08x%08x\n", fields,
   8039 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8040 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8041 		}
   8042 
   8043 		lasttx = nexttx;
   8044 		nexttx = WM_NEXTTX(txq, nexttx);
   8045 		/*
   8046 		 * fill in the next descriptors. legacy or advanced format
   8047 		 * is the same here
   8048 		 */
   8049 		for (seg = 1; seg < dmamap->dm_nsegs;
   8050 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8051 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8052 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8053 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8054 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8055 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8056 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8057 			lasttx = nexttx;
   8058 
   8059 			DPRINTF(WM_DEBUG_TX,
   8060 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8061 				device_xname(sc->sc_dev), nexttx,
   8062 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8063 				dmamap->dm_segs[seg].ds_len));
   8064 		}
   8065 
   8066 		KASSERT(lasttx != -1);
   8067 
   8068 		/*
   8069 		 * Set up the command byte on the last descriptor of
   8070 		 * the packet. If we're in the interrupt delay window,
   8071 		 * delay the interrupt.
   8072 		 */
   8073 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8074 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8075 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8076 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8077 
   8078 		txs->txs_lastdesc = lasttx;
   8079 
   8080 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8081 		    device_xname(sc->sc_dev),
   8082 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8083 
   8084 		/* Sync the descriptors we're using. */
   8085 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8086 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8087 
   8088 		/* Give the packet to the chip. */
   8089 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8090 		sent = true;
   8091 
   8092 		DPRINTF(WM_DEBUG_TX,
   8093 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8094 
   8095 		DPRINTF(WM_DEBUG_TX,
   8096 		    ("%s: TX: finished transmitting packet, job %d\n",
   8097 			device_xname(sc->sc_dev), txq->txq_snext));
   8098 
   8099 		/* Advance the tx pointer. */
   8100 		txq->txq_free -= txs->txs_ndesc;
   8101 		txq->txq_next = nexttx;
   8102 
   8103 		txq->txq_sfree--;
   8104 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8105 
   8106 		/* Pass the packet to any BPF listeners. */
   8107 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8108 	}
   8109 
   8110 	if (m0 != NULL) {
   8111 		if (!is_transmit)
   8112 			ifp->if_flags |= IFF_OACTIVE;
   8113 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8114 		WM_Q_EVCNT_INCR(txq, descdrop);
   8115 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8116 			__func__));
   8117 		m_freem(m0);
   8118 	}
   8119 
   8120 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8121 		/* No more slots; notify upper layer. */
   8122 		if (!is_transmit)
   8123 			ifp->if_flags |= IFF_OACTIVE;
   8124 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8125 	}
   8126 
   8127 	if (sent) {
   8128 		/* Set a watchdog timer in case the chip flakes out. */
   8129 		txq->txq_lastsent = time_uptime;
   8130 		txq->txq_sending = true;
   8131 	}
   8132 }
   8133 
   8134 static void
   8135 wm_deferred_start_locked(struct wm_txqueue *txq)
   8136 {
   8137 	struct wm_softc *sc = txq->txq_sc;
   8138 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8139 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8140 	int qid = wmq->wmq_id;
   8141 
   8142 	KASSERT(mutex_owned(txq->txq_lock));
   8143 
   8144 	if (txq->txq_stopping) {
   8145 		mutex_exit(txq->txq_lock);
   8146 		return;
   8147 	}
   8148 
   8149 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8150 		/* XXX need for ALTQ or one CPU system */
   8151 		if (qid == 0)
   8152 			wm_nq_start_locked(ifp);
   8153 		wm_nq_transmit_locked(ifp, txq);
   8154 	} else {
   8155 		/* XXX need for ALTQ or one CPU system */
   8156 		if (qid == 0)
   8157 			wm_start_locked(ifp);
   8158 		wm_transmit_locked(ifp, txq);
   8159 	}
   8160 }
   8161 
   8162 /* Interrupt */
   8163 
   8164 /*
   8165  * wm_txeof:
   8166  *
   8167  *	Helper; handle transmit interrupts.
   8168  */
   8169 static bool
   8170 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8171 {
   8172 	struct wm_softc *sc = txq->txq_sc;
   8173 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8174 	struct wm_txsoft *txs;
   8175 	int count = 0;
   8176 	int i;
   8177 	uint8_t status;
   8178 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8179 	bool more = false;
   8180 
   8181 	KASSERT(mutex_owned(txq->txq_lock));
   8182 
   8183 	if (txq->txq_stopping)
   8184 		return false;
   8185 
   8186 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8187 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8188 	if (wmq->wmq_id == 0)
   8189 		ifp->if_flags &= ~IFF_OACTIVE;
   8190 
   8191 	/*
   8192 	 * Go through the Tx list and free mbufs for those
   8193 	 * frames which have been transmitted.
   8194 	 */
   8195 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8196 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8197 		if (limit-- == 0) {
   8198 			more = true;
   8199 			DPRINTF(WM_DEBUG_TX,
   8200 			    ("%s: TX: loop limited, job %d is not processed\n",
   8201 				device_xname(sc->sc_dev), i));
   8202 			break;
   8203 		}
   8204 
   8205 		txs = &txq->txq_soft[i];
   8206 
   8207 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8208 			device_xname(sc->sc_dev), i));
   8209 
   8210 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8211 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8212 
   8213 		status =
   8214 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8215 		if ((status & WTX_ST_DD) == 0) {
   8216 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8217 			    BUS_DMASYNC_PREREAD);
   8218 			break;
   8219 		}
   8220 
   8221 		count++;
   8222 		DPRINTF(WM_DEBUG_TX,
   8223 		    ("%s: TX: job %d done: descs %d..%d\n",
   8224 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8225 		    txs->txs_lastdesc));
   8226 
   8227 		/*
   8228 		 * XXX We should probably be using the statistics
   8229 		 * XXX registers, but I don't know if they exist
   8230 		 * XXX on chips before the i82544.
   8231 		 */
   8232 
   8233 #ifdef WM_EVENT_COUNTERS
   8234 		if (status & WTX_ST_TU)
   8235 			WM_Q_EVCNT_INCR(txq, underrun);
   8236 #endif /* WM_EVENT_COUNTERS */
   8237 
   8238 		/*
   8239 		 * 82574 and newer's document says the status field has neither
   8240 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8241 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8242 		 * Developer's Manual", 82574 datasheet and newer.
   8243 		 *
   8244 		 * XXX I saw the LC bit was set on I218 even though the media
   8245 		 * was full duplex, so the bit might be used for other
   8246 		 * meaning ...(I have no document).
   8247 		 */
   8248 
   8249 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8250 		    && ((sc->sc_type < WM_T_82574)
   8251 			|| (sc->sc_type == WM_T_80003))) {
   8252 			ifp->if_oerrors++;
   8253 			if (status & WTX_ST_LC)
   8254 				log(LOG_WARNING, "%s: late collision\n",
   8255 				    device_xname(sc->sc_dev));
   8256 			else if (status & WTX_ST_EC) {
   8257 				ifp->if_collisions +=
   8258 				    TX_COLLISION_THRESHOLD + 1;
   8259 				log(LOG_WARNING, "%s: excessive collisions\n",
   8260 				    device_xname(sc->sc_dev));
   8261 			}
   8262 		} else
   8263 			ifp->if_opackets++;
   8264 
   8265 		txq->txq_packets++;
   8266 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8267 
   8268 		txq->txq_free += txs->txs_ndesc;
   8269 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8270 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8271 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8272 		m_freem(txs->txs_mbuf);
   8273 		txs->txs_mbuf = NULL;
   8274 	}
   8275 
   8276 	/* Update the dirty transmit buffer pointer. */
   8277 	txq->txq_sdirty = i;
   8278 	DPRINTF(WM_DEBUG_TX,
   8279 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8280 
   8281 	if (count != 0)
   8282 		rnd_add_uint32(&sc->rnd_source, count);
   8283 
   8284 	/*
   8285 	 * If there are no more pending transmissions, cancel the watchdog
   8286 	 * timer.
   8287 	 */
   8288 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8289 		txq->txq_sending = false;
   8290 
   8291 	return more;
   8292 }
   8293 
   8294 static inline uint32_t
   8295 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8296 {
   8297 	struct wm_softc *sc = rxq->rxq_sc;
   8298 
   8299 	if (sc->sc_type == WM_T_82574)
   8300 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8301 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8302 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8303 	else
   8304 		return rxq->rxq_descs[idx].wrx_status;
   8305 }
   8306 
   8307 static inline uint32_t
   8308 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8309 {
   8310 	struct wm_softc *sc = rxq->rxq_sc;
   8311 
   8312 	if (sc->sc_type == WM_T_82574)
   8313 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8314 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8315 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8316 	else
   8317 		return rxq->rxq_descs[idx].wrx_errors;
   8318 }
   8319 
   8320 static inline uint16_t
   8321 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8322 {
   8323 	struct wm_softc *sc = rxq->rxq_sc;
   8324 
   8325 	if (sc->sc_type == WM_T_82574)
   8326 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8327 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8328 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8329 	else
   8330 		return rxq->rxq_descs[idx].wrx_special;
   8331 }
   8332 
   8333 static inline int
   8334 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8335 {
   8336 	struct wm_softc *sc = rxq->rxq_sc;
   8337 
   8338 	if (sc->sc_type == WM_T_82574)
   8339 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8340 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8341 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8342 	else
   8343 		return rxq->rxq_descs[idx].wrx_len;
   8344 }
   8345 
   8346 #ifdef WM_DEBUG
   8347 static inline uint32_t
   8348 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8349 {
   8350 	struct wm_softc *sc = rxq->rxq_sc;
   8351 
   8352 	if (sc->sc_type == WM_T_82574)
   8353 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8354 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8355 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8356 	else
   8357 		return 0;
   8358 }
   8359 
   8360 static inline uint8_t
   8361 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8362 {
   8363 	struct wm_softc *sc = rxq->rxq_sc;
   8364 
   8365 	if (sc->sc_type == WM_T_82574)
   8366 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8367 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8368 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8369 	else
   8370 		return 0;
   8371 }
   8372 #endif /* WM_DEBUG */
   8373 
   8374 static inline bool
   8375 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8376     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8377 {
   8378 
   8379 	if (sc->sc_type == WM_T_82574)
   8380 		return (status & ext_bit) != 0;
   8381 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8382 		return (status & nq_bit) != 0;
   8383 	else
   8384 		return (status & legacy_bit) != 0;
   8385 }
   8386 
   8387 static inline bool
   8388 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8389     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8390 {
   8391 
   8392 	if (sc->sc_type == WM_T_82574)
   8393 		return (error & ext_bit) != 0;
   8394 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8395 		return (error & nq_bit) != 0;
   8396 	else
   8397 		return (error & legacy_bit) != 0;
   8398 }
   8399 
   8400 static inline bool
   8401 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8402 {
   8403 
   8404 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8405 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8406 		return true;
   8407 	else
   8408 		return false;
   8409 }
   8410 
   8411 static inline bool
   8412 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8413 {
   8414 	struct wm_softc *sc = rxq->rxq_sc;
   8415 
   8416 	/* XXXX missing error bit for newqueue? */
   8417 	if (wm_rxdesc_is_set_error(sc, errors,
   8418 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8419 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8420 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8421 		NQRXC_ERROR_RXE)) {
   8422 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8423 		    EXTRXC_ERROR_SE, 0))
   8424 			log(LOG_WARNING, "%s: symbol error\n",
   8425 			    device_xname(sc->sc_dev));
   8426 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8427 		    EXTRXC_ERROR_SEQ, 0))
   8428 			log(LOG_WARNING, "%s: receive sequence error\n",
   8429 			    device_xname(sc->sc_dev));
   8430 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8431 		    EXTRXC_ERROR_CE, 0))
   8432 			log(LOG_WARNING, "%s: CRC error\n",
   8433 			    device_xname(sc->sc_dev));
   8434 		return true;
   8435 	}
   8436 
   8437 	return false;
   8438 }
   8439 
   8440 static inline bool
   8441 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8442 {
   8443 	struct wm_softc *sc = rxq->rxq_sc;
   8444 
   8445 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8446 		NQRXC_STATUS_DD)) {
   8447 		/* We have processed all of the receive descriptors. */
   8448 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8449 		return false;
   8450 	}
   8451 
   8452 	return true;
   8453 }
   8454 
   8455 static inline bool
   8456 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8457     uint16_t vlantag, struct mbuf *m)
   8458 {
   8459 
   8460 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8461 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8462 		vlan_set_tag(m, le16toh(vlantag));
   8463 	}
   8464 
   8465 	return true;
   8466 }
   8467 
   8468 static inline void
   8469 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8470     uint32_t errors, struct mbuf *m)
   8471 {
   8472 	struct wm_softc *sc = rxq->rxq_sc;
   8473 
   8474 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8475 		if (wm_rxdesc_is_set_status(sc, status,
   8476 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8477 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8478 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8479 			if (wm_rxdesc_is_set_error(sc, errors,
   8480 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8481 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8482 		}
   8483 		if (wm_rxdesc_is_set_status(sc, status,
   8484 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8485 			/*
   8486 			 * Note: we don't know if this was TCP or UDP,
   8487 			 * so we just set both bits, and expect the
   8488 			 * upper layers to deal.
   8489 			 */
   8490 			WM_Q_EVCNT_INCR(rxq, tusum);
   8491 			m->m_pkthdr.csum_flags |=
   8492 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8493 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8494 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8495 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8496 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8497 		}
   8498 	}
   8499 }
   8500 
   8501 /*
   8502  * wm_rxeof:
   8503  *
   8504  *	Helper; handle receive interrupts.
   8505  */
   8506 static bool
   8507 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8508 {
   8509 	struct wm_softc *sc = rxq->rxq_sc;
   8510 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8511 	struct wm_rxsoft *rxs;
   8512 	struct mbuf *m;
   8513 	int i, len;
   8514 	int count = 0;
   8515 	uint32_t status, errors;
   8516 	uint16_t vlantag;
   8517 	bool more = false;
   8518 
   8519 	KASSERT(mutex_owned(rxq->rxq_lock));
   8520 
   8521 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8522 		if (limit-- == 0) {
   8523 			rxq->rxq_ptr = i;
   8524 			more = true;
   8525 			DPRINTF(WM_DEBUG_RX,
   8526 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8527 				device_xname(sc->sc_dev), i));
   8528 			break;
   8529 		}
   8530 
   8531 		rxs = &rxq->rxq_soft[i];
   8532 
   8533 		DPRINTF(WM_DEBUG_RX,
   8534 		    ("%s: RX: checking descriptor %d\n",
   8535 			device_xname(sc->sc_dev), i));
   8536 		wm_cdrxsync(rxq, i,
   8537 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8538 
   8539 		status = wm_rxdesc_get_status(rxq, i);
   8540 		errors = wm_rxdesc_get_errors(rxq, i);
   8541 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8542 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8543 #ifdef WM_DEBUG
   8544 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8545 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8546 #endif
   8547 
   8548 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8549 			/*
   8550 			 * Update the receive pointer holding rxq_lock
   8551 			 * consistent with increment counter.
   8552 			 */
   8553 			rxq->rxq_ptr = i;
   8554 			break;
   8555 		}
   8556 
   8557 		count++;
   8558 		if (__predict_false(rxq->rxq_discard)) {
   8559 			DPRINTF(WM_DEBUG_RX,
   8560 			    ("%s: RX: discarding contents of descriptor %d\n",
   8561 				device_xname(sc->sc_dev), i));
   8562 			wm_init_rxdesc(rxq, i);
   8563 			if (wm_rxdesc_is_eop(rxq, status)) {
   8564 				/* Reset our state. */
   8565 				DPRINTF(WM_DEBUG_RX,
   8566 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8567 					device_xname(sc->sc_dev)));
   8568 				rxq->rxq_discard = 0;
   8569 			}
   8570 			continue;
   8571 		}
   8572 
   8573 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8574 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8575 
   8576 		m = rxs->rxs_mbuf;
   8577 
   8578 		/*
   8579 		 * Add a new receive buffer to the ring, unless of
   8580 		 * course the length is zero. Treat the latter as a
   8581 		 * failed mapping.
   8582 		 */
   8583 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8584 			/*
   8585 			 * Failed, throw away what we've done so
   8586 			 * far, and discard the rest of the packet.
   8587 			 */
   8588 			ifp->if_ierrors++;
   8589 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8590 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8591 			wm_init_rxdesc(rxq, i);
   8592 			if (!wm_rxdesc_is_eop(rxq, status))
   8593 				rxq->rxq_discard = 1;
   8594 			if (rxq->rxq_head != NULL)
   8595 				m_freem(rxq->rxq_head);
   8596 			WM_RXCHAIN_RESET(rxq);
   8597 			DPRINTF(WM_DEBUG_RX,
   8598 			    ("%s: RX: Rx buffer allocation failed, "
   8599 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8600 				rxq->rxq_discard ? " (discard)" : ""));
   8601 			continue;
   8602 		}
   8603 
   8604 		m->m_len = len;
   8605 		rxq->rxq_len += len;
   8606 		DPRINTF(WM_DEBUG_RX,
   8607 		    ("%s: RX: buffer at %p len %d\n",
   8608 			device_xname(sc->sc_dev), m->m_data, len));
   8609 
   8610 		/* If this is not the end of the packet, keep looking. */
   8611 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8612 			WM_RXCHAIN_LINK(rxq, m);
   8613 			DPRINTF(WM_DEBUG_RX,
   8614 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8615 				device_xname(sc->sc_dev), rxq->rxq_len));
   8616 			continue;
   8617 		}
   8618 
   8619 		/*
   8620 		 * Okay, we have the entire packet now. The chip is
   8621 		 * configured to include the FCS except I350 and I21[01]
   8622 		 * (not all chips can be configured to strip it),
   8623 		 * so we need to trim it.
   8624 		 * May need to adjust length of previous mbuf in the
   8625 		 * chain if the current mbuf is too short.
   8626 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8627 		 * is always set in I350, so we don't trim it.
   8628 		 */
   8629 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8630 		    && (sc->sc_type != WM_T_I210)
   8631 		    && (sc->sc_type != WM_T_I211)) {
   8632 			if (m->m_len < ETHER_CRC_LEN) {
   8633 				rxq->rxq_tail->m_len
   8634 				    -= (ETHER_CRC_LEN - m->m_len);
   8635 				m->m_len = 0;
   8636 			} else
   8637 				m->m_len -= ETHER_CRC_LEN;
   8638 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8639 		} else
   8640 			len = rxq->rxq_len;
   8641 
   8642 		WM_RXCHAIN_LINK(rxq, m);
   8643 
   8644 		*rxq->rxq_tailp = NULL;
   8645 		m = rxq->rxq_head;
   8646 
   8647 		WM_RXCHAIN_RESET(rxq);
   8648 
   8649 		DPRINTF(WM_DEBUG_RX,
   8650 		    ("%s: RX: have entire packet, len -> %d\n",
   8651 			device_xname(sc->sc_dev), len));
   8652 
   8653 		/* If an error occurred, update stats and drop the packet. */
   8654 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8655 			m_freem(m);
   8656 			continue;
   8657 		}
   8658 
   8659 		/* No errors.  Receive the packet. */
   8660 		m_set_rcvif(m, ifp);
   8661 		m->m_pkthdr.len = len;
   8662 		/*
   8663 		 * TODO
   8664 		 * should be save rsshash and rsstype to this mbuf.
   8665 		 */
   8666 		DPRINTF(WM_DEBUG_RX,
   8667 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8668 			device_xname(sc->sc_dev), rsstype, rsshash));
   8669 
   8670 		/*
   8671 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8672 		 * for us.  Associate the tag with the packet.
   8673 		 */
   8674 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8675 			continue;
   8676 
   8677 		/* Set up checksum info for this packet. */
   8678 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8679 		/*
   8680 		 * Update the receive pointer holding rxq_lock consistent with
   8681 		 * increment counter.
   8682 		 */
   8683 		rxq->rxq_ptr = i;
   8684 		rxq->rxq_packets++;
   8685 		rxq->rxq_bytes += len;
   8686 		mutex_exit(rxq->rxq_lock);
   8687 
   8688 		/* Pass it on. */
   8689 		if_percpuq_enqueue(sc->sc_ipq, m);
   8690 
   8691 		mutex_enter(rxq->rxq_lock);
   8692 
   8693 		if (rxq->rxq_stopping)
   8694 			break;
   8695 	}
   8696 
   8697 	if (count != 0)
   8698 		rnd_add_uint32(&sc->rnd_source, count);
   8699 
   8700 	DPRINTF(WM_DEBUG_RX,
   8701 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8702 
   8703 	return more;
   8704 }
   8705 
   8706 /*
   8707  * wm_linkintr_gmii:
   8708  *
   8709  *	Helper; handle link interrupts for GMII.
   8710  */
   8711 static void
   8712 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8713 {
   8714 
   8715 	KASSERT(WM_CORE_LOCKED(sc));
   8716 
   8717 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8718 		__func__));
   8719 
   8720 	if (icr & ICR_LSC) {
   8721 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8722 		uint32_t reg;
   8723 		bool link;
   8724 
   8725 		link = status & STATUS_LU;
   8726 		if (link) {
   8727 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8728 				device_xname(sc->sc_dev),
   8729 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8730 		} else {
   8731 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8732 				device_xname(sc->sc_dev)));
   8733 		}
   8734 		if ((sc->sc_type == WM_T_ICH8) && (link == false))
   8735 			wm_gig_downshift_workaround_ich8lan(sc);
   8736 
   8737 		if ((sc->sc_type == WM_T_ICH8)
   8738 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8739 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8740 		}
   8741 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8742 			device_xname(sc->sc_dev)));
   8743 		mii_pollstat(&sc->sc_mii);
   8744 		if (sc->sc_type == WM_T_82543) {
   8745 			int miistatus, active;
   8746 
   8747 			/*
   8748 			 * With 82543, we need to force speed and
   8749 			 * duplex on the MAC equal to what the PHY
   8750 			 * speed and duplex configuration is.
   8751 			 */
   8752 			miistatus = sc->sc_mii.mii_media_status;
   8753 
   8754 			if (miistatus & IFM_ACTIVE) {
   8755 				active = sc->sc_mii.mii_media_active;
   8756 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8757 				switch (IFM_SUBTYPE(active)) {
   8758 				case IFM_10_T:
   8759 					sc->sc_ctrl |= CTRL_SPEED_10;
   8760 					break;
   8761 				case IFM_100_TX:
   8762 					sc->sc_ctrl |= CTRL_SPEED_100;
   8763 					break;
   8764 				case IFM_1000_T:
   8765 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8766 					break;
   8767 				default:
   8768 					/*
   8769 					 * fiber?
   8770 					 * Shoud not enter here.
   8771 					 */
   8772 					printf("unknown media (%x)\n", active);
   8773 					break;
   8774 				}
   8775 				if (active & IFM_FDX)
   8776 					sc->sc_ctrl |= CTRL_FD;
   8777 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8778 			}
   8779 		} else if (sc->sc_type == WM_T_PCH) {
   8780 			wm_k1_gig_workaround_hv(sc,
   8781 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8782 		}
   8783 
   8784 		if ((sc->sc_phytype == WMPHY_82578)
   8785 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8786 			== IFM_1000_T)) {
   8787 
   8788 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8789 				delay(200*1000); /* XXX too big */
   8790 
   8791 				/* Link stall fix for link up */
   8792 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8793 				    HV_MUX_DATA_CTRL,
   8794 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8795 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8796 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8797 				    HV_MUX_DATA_CTRL,
   8798 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8799 			}
   8800 		}
   8801 		/*
   8802 		 * I217 Packet Loss issue:
   8803 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8804 		 * on power up.
   8805 		 * Set the Beacon Duration for I217 to 8 usec
   8806 		 */
   8807 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8808 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8809 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8810 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8811 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8812 		}
   8813 
   8814 		/* Work-around I218 hang issue */
   8815 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   8816 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   8817 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   8818 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   8819 			wm_k1_workaround_lpt_lp(sc, link);
   8820 
   8821 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8822 			/*
   8823 			 * Set platform power management values for Latency
   8824 			 * Tolerance Reporting (LTR)
   8825 			 */
   8826 			wm_platform_pm_pch_lpt(sc,
   8827 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8828 		}
   8829 
   8830 		/* FEXTNVM6 K1-off workaround */
   8831 		if (sc->sc_type == WM_T_PCH_SPT) {
   8832 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8833 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8834 			    & FEXTNVM6_K1_OFF_ENABLE)
   8835 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8836 			else
   8837 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8838 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8839 		}
   8840 	} else if (icr & ICR_RXSEQ) {
   8841 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8842 			device_xname(sc->sc_dev)));
   8843 	}
   8844 }
   8845 
   8846 /*
   8847  * wm_linkintr_tbi:
   8848  *
   8849  *	Helper; handle link interrupts for TBI mode.
   8850  */
   8851 static void
   8852 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8853 {
   8854 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8855 	uint32_t status;
   8856 
   8857 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8858 		__func__));
   8859 
   8860 	status = CSR_READ(sc, WMREG_STATUS);
   8861 	if (icr & ICR_LSC) {
   8862 		wm_check_for_link(sc);
   8863 		if (status & STATUS_LU) {
   8864 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8865 				device_xname(sc->sc_dev),
   8866 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8867 			/*
   8868 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8869 			 * so we should update sc->sc_ctrl
   8870 			 */
   8871 
   8872 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8873 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8874 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8875 			if (status & STATUS_FD)
   8876 				sc->sc_tctl |=
   8877 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8878 			else
   8879 				sc->sc_tctl |=
   8880 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8881 			if (sc->sc_ctrl & CTRL_TFCE)
   8882 				sc->sc_fcrtl |= FCRTL_XONE;
   8883 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8884 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8885 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   8886 			sc->sc_tbi_linkup = 1;
   8887 			if_link_state_change(ifp, LINK_STATE_UP);
   8888 		} else {
   8889 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8890 				device_xname(sc->sc_dev)));
   8891 			sc->sc_tbi_linkup = 0;
   8892 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8893 		}
   8894 		/* Update LED */
   8895 		wm_tbi_serdes_set_linkled(sc);
   8896 	} else if (icr & ICR_RXSEQ) {
   8897 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   8898 			device_xname(sc->sc_dev)));
   8899 	}
   8900 }
   8901 
   8902 /*
   8903  * wm_linkintr_serdes:
   8904  *
   8905  *	Helper; handle link interrupts for TBI mode.
   8906  */
   8907 static void
   8908 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8909 {
   8910 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8911 	struct mii_data *mii = &sc->sc_mii;
   8912 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8913 	uint32_t pcs_adv, pcs_lpab, reg;
   8914 
   8915 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8916 		__func__));
   8917 
   8918 	if (icr & ICR_LSC) {
   8919 		/* Check PCS */
   8920 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8921 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8922 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8923 				device_xname(sc->sc_dev)));
   8924 			mii->mii_media_status |= IFM_ACTIVE;
   8925 			sc->sc_tbi_linkup = 1;
   8926 			if_link_state_change(ifp, LINK_STATE_UP);
   8927 		} else {
   8928 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8929 				device_xname(sc->sc_dev)));
   8930 			mii->mii_media_status |= IFM_NONE;
   8931 			sc->sc_tbi_linkup = 0;
   8932 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8933 			wm_tbi_serdes_set_linkled(sc);
   8934 			return;
   8935 		}
   8936 		mii->mii_media_active |= IFM_1000_SX;
   8937 		if ((reg & PCS_LSTS_FDX) != 0)
   8938 			mii->mii_media_active |= IFM_FDX;
   8939 		else
   8940 			mii->mii_media_active |= IFM_HDX;
   8941 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8942 			/* Check flow */
   8943 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8944 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8945 				DPRINTF(WM_DEBUG_LINK,
   8946 				    ("XXX LINKOK but not ACOMP\n"));
   8947 				return;
   8948 			}
   8949 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8950 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8951 			DPRINTF(WM_DEBUG_LINK,
   8952 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8953 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8954 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8955 				mii->mii_media_active |= IFM_FLOW
   8956 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8957 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8958 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8959 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8960 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8961 				mii->mii_media_active |= IFM_FLOW
   8962 				    | IFM_ETH_TXPAUSE;
   8963 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8964 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8965 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8966 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8967 				mii->mii_media_active |= IFM_FLOW
   8968 				    | IFM_ETH_RXPAUSE;
   8969 		}
   8970 		/* Update LED */
   8971 		wm_tbi_serdes_set_linkled(sc);
   8972 	} else {
   8973 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   8974 		    device_xname(sc->sc_dev)));
   8975 	}
   8976 }
   8977 
   8978 /*
   8979  * wm_linkintr:
   8980  *
   8981  *	Helper; handle link interrupts.
   8982  */
   8983 static void
   8984 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8985 {
   8986 
   8987 	KASSERT(WM_CORE_LOCKED(sc));
   8988 
   8989 	if (sc->sc_flags & WM_F_HAS_MII)
   8990 		wm_linkintr_gmii(sc, icr);
   8991 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8992 	    && (sc->sc_type >= WM_T_82575))
   8993 		wm_linkintr_serdes(sc, icr);
   8994 	else
   8995 		wm_linkintr_tbi(sc, icr);
   8996 }
   8997 
   8998 /*
   8999  * wm_intr_legacy:
   9000  *
   9001  *	Interrupt service routine for INTx and MSI.
   9002  */
   9003 static int
   9004 wm_intr_legacy(void *arg)
   9005 {
   9006 	struct wm_softc *sc = arg;
   9007 	struct wm_queue *wmq = &sc->sc_queue[0];
   9008 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9009 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9010 	uint32_t icr, rndval = 0;
   9011 	int handled = 0;
   9012 
   9013 	while (1 /* CONSTCOND */) {
   9014 		icr = CSR_READ(sc, WMREG_ICR);
   9015 		if ((icr & sc->sc_icr) == 0)
   9016 			break;
   9017 		if (handled == 0) {
   9018 			DPRINTF(WM_DEBUG_TX,
   9019 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9020 		}
   9021 		if (rndval == 0)
   9022 			rndval = icr;
   9023 
   9024 		mutex_enter(rxq->rxq_lock);
   9025 
   9026 		if (rxq->rxq_stopping) {
   9027 			mutex_exit(rxq->rxq_lock);
   9028 			break;
   9029 		}
   9030 
   9031 		handled = 1;
   9032 
   9033 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9034 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9035 			DPRINTF(WM_DEBUG_RX,
   9036 			    ("%s: RX: got Rx intr 0x%08x\n",
   9037 				device_xname(sc->sc_dev),
   9038 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9039 			WM_Q_EVCNT_INCR(rxq, intr);
   9040 		}
   9041 #endif
   9042 		/*
   9043 		 * wm_rxeof() does *not* call upper layer functions directly,
   9044 		 * as if_percpuq_enqueue() just call softint_schedule().
   9045 		 * So, we can call wm_rxeof() in interrupt context.
   9046 		 */
   9047 		wm_rxeof(rxq, UINT_MAX);
   9048 
   9049 		mutex_exit(rxq->rxq_lock);
   9050 		mutex_enter(txq->txq_lock);
   9051 
   9052 		if (txq->txq_stopping) {
   9053 			mutex_exit(txq->txq_lock);
   9054 			break;
   9055 		}
   9056 
   9057 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9058 		if (icr & ICR_TXDW) {
   9059 			DPRINTF(WM_DEBUG_TX,
   9060 			    ("%s: TX: got TXDW interrupt\n",
   9061 				device_xname(sc->sc_dev)));
   9062 			WM_Q_EVCNT_INCR(txq, txdw);
   9063 		}
   9064 #endif
   9065 		wm_txeof(txq, UINT_MAX);
   9066 
   9067 		mutex_exit(txq->txq_lock);
   9068 		WM_CORE_LOCK(sc);
   9069 
   9070 		if (sc->sc_core_stopping) {
   9071 			WM_CORE_UNLOCK(sc);
   9072 			break;
   9073 		}
   9074 
   9075 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9076 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9077 			wm_linkintr(sc, icr);
   9078 		}
   9079 
   9080 		WM_CORE_UNLOCK(sc);
   9081 
   9082 		if (icr & ICR_RXO) {
   9083 #if defined(WM_DEBUG)
   9084 			log(LOG_WARNING, "%s: Receive overrun\n",
   9085 			    device_xname(sc->sc_dev));
   9086 #endif /* defined(WM_DEBUG) */
   9087 		}
   9088 	}
   9089 
   9090 	rnd_add_uint32(&sc->rnd_source, rndval);
   9091 
   9092 	if (handled) {
   9093 		/* Try to get more packets going. */
   9094 		softint_schedule(wmq->wmq_si);
   9095 	}
   9096 
   9097 	return handled;
   9098 }
   9099 
   9100 static inline void
   9101 wm_txrxintr_disable(struct wm_queue *wmq)
   9102 {
   9103 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9104 
   9105 	if (sc->sc_type == WM_T_82574)
   9106 		CSR_WRITE(sc, WMREG_IMC,
   9107 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9108 	else if (sc->sc_type == WM_T_82575)
   9109 		CSR_WRITE(sc, WMREG_EIMC,
   9110 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9111 	else
   9112 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9113 }
   9114 
   9115 static inline void
   9116 wm_txrxintr_enable(struct wm_queue *wmq)
   9117 {
   9118 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9119 
   9120 	wm_itrs_calculate(sc, wmq);
   9121 
   9122 	/*
   9123 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9124 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9125 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9126 	 * while each wm_handle_queue(wmq) is runnig.
   9127 	 */
   9128 	if (sc->sc_type == WM_T_82574)
   9129 		CSR_WRITE(sc, WMREG_IMS,
   9130 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9131 	else if (sc->sc_type == WM_T_82575)
   9132 		CSR_WRITE(sc, WMREG_EIMS,
   9133 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9134 	else
   9135 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9136 }
   9137 
   9138 static int
   9139 wm_txrxintr_msix(void *arg)
   9140 {
   9141 	struct wm_queue *wmq = arg;
   9142 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9143 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9144 	struct wm_softc *sc = txq->txq_sc;
   9145 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9146 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9147 	bool txmore;
   9148 	bool rxmore;
   9149 
   9150 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9151 
   9152 	DPRINTF(WM_DEBUG_TX,
   9153 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9154 
   9155 	wm_txrxintr_disable(wmq);
   9156 
   9157 	mutex_enter(txq->txq_lock);
   9158 
   9159 	if (txq->txq_stopping) {
   9160 		mutex_exit(txq->txq_lock);
   9161 		return 0;
   9162 	}
   9163 
   9164 	WM_Q_EVCNT_INCR(txq, txdw);
   9165 	txmore = wm_txeof(txq, txlimit);
   9166 	/* wm_deferred start() is done in wm_handle_queue(). */
   9167 	mutex_exit(txq->txq_lock);
   9168 
   9169 	DPRINTF(WM_DEBUG_RX,
   9170 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9171 	mutex_enter(rxq->rxq_lock);
   9172 
   9173 	if (rxq->rxq_stopping) {
   9174 		mutex_exit(rxq->rxq_lock);
   9175 		return 0;
   9176 	}
   9177 
   9178 	WM_Q_EVCNT_INCR(rxq, intr);
   9179 	rxmore = wm_rxeof(rxq, rxlimit);
   9180 	mutex_exit(rxq->rxq_lock);
   9181 
   9182 	wm_itrs_writereg(sc, wmq);
   9183 
   9184 	if (txmore || rxmore)
   9185 		softint_schedule(wmq->wmq_si);
   9186 	else
   9187 		wm_txrxintr_enable(wmq);
   9188 
   9189 	return 1;
   9190 }
   9191 
   9192 static void
   9193 wm_handle_queue(void *arg)
   9194 {
   9195 	struct wm_queue *wmq = arg;
   9196 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9197 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9198 	struct wm_softc *sc = txq->txq_sc;
   9199 	u_int txlimit = sc->sc_tx_process_limit;
   9200 	u_int rxlimit = sc->sc_rx_process_limit;
   9201 	bool txmore;
   9202 	bool rxmore;
   9203 
   9204 	mutex_enter(txq->txq_lock);
   9205 	if (txq->txq_stopping) {
   9206 		mutex_exit(txq->txq_lock);
   9207 		return;
   9208 	}
   9209 	txmore = wm_txeof(txq, txlimit);
   9210 	wm_deferred_start_locked(txq);
   9211 	mutex_exit(txq->txq_lock);
   9212 
   9213 	mutex_enter(rxq->rxq_lock);
   9214 	if (rxq->rxq_stopping) {
   9215 		mutex_exit(rxq->rxq_lock);
   9216 		return;
   9217 	}
   9218 	WM_Q_EVCNT_INCR(rxq, defer);
   9219 	rxmore = wm_rxeof(rxq, rxlimit);
   9220 	mutex_exit(rxq->rxq_lock);
   9221 
   9222 	if (txmore || rxmore)
   9223 		softint_schedule(wmq->wmq_si);
   9224 	else
   9225 		wm_txrxintr_enable(wmq);
   9226 }
   9227 
   9228 /*
   9229  * wm_linkintr_msix:
   9230  *
   9231  *	Interrupt service routine for link status change for MSI-X.
   9232  */
   9233 static int
   9234 wm_linkintr_msix(void *arg)
   9235 {
   9236 	struct wm_softc *sc = arg;
   9237 	uint32_t reg;
   9238 	bool has_rxo;
   9239 
   9240 	DPRINTF(WM_DEBUG_LINK,
   9241 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9242 
   9243 	reg = CSR_READ(sc, WMREG_ICR);
   9244 	WM_CORE_LOCK(sc);
   9245 	if (sc->sc_core_stopping)
   9246 		goto out;
   9247 
   9248 	if ((reg & ICR_LSC) != 0) {
   9249 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9250 		wm_linkintr(sc, ICR_LSC);
   9251 	}
   9252 
   9253 	/*
   9254 	 * XXX 82574 MSI-X mode workaround
   9255 	 *
   9256 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9257 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9258 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9259 	 * interrupts by writing WMREG_ICS to process receive packets.
   9260 	 */
   9261 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9262 #if defined(WM_DEBUG)
   9263 		log(LOG_WARNING, "%s: Receive overrun\n",
   9264 		    device_xname(sc->sc_dev));
   9265 #endif /* defined(WM_DEBUG) */
   9266 
   9267 		has_rxo = true;
   9268 		/*
   9269 		 * The RXO interrupt is very high rate when receive traffic is
   9270 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9271 		 * interrupts. ICR_OTHER will be enabled at the end of
   9272 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9273 		 * ICR_RXQ(1) interrupts.
   9274 		 */
   9275 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9276 
   9277 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9278 	}
   9279 
   9280 
   9281 
   9282 out:
   9283 	WM_CORE_UNLOCK(sc);
   9284 
   9285 	if (sc->sc_type == WM_T_82574) {
   9286 		if (!has_rxo)
   9287 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9288 		else
   9289 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9290 	} else if (sc->sc_type == WM_T_82575)
   9291 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9292 	else
   9293 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9294 
   9295 	return 1;
   9296 }
   9297 
   9298 /*
   9299  * Media related.
   9300  * GMII, SGMII, TBI (and SERDES)
   9301  */
   9302 
   9303 /* Common */
   9304 
   9305 /*
   9306  * wm_tbi_serdes_set_linkled:
   9307  *
   9308  *	Update the link LED on TBI and SERDES devices.
   9309  */
   9310 static void
   9311 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9312 {
   9313 
   9314 	if (sc->sc_tbi_linkup)
   9315 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9316 	else
   9317 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9318 
   9319 	/* 82540 or newer devices are active low */
   9320 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9321 
   9322 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9323 }
   9324 
   9325 /* GMII related */
   9326 
   9327 /*
   9328  * wm_gmii_reset:
   9329  *
   9330  *	Reset the PHY.
   9331  */
   9332 static void
   9333 wm_gmii_reset(struct wm_softc *sc)
   9334 {
   9335 	uint32_t reg;
   9336 	int rv;
   9337 
   9338 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9339 		device_xname(sc->sc_dev), __func__));
   9340 
   9341 	rv = sc->phy.acquire(sc);
   9342 	if (rv != 0) {
   9343 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9344 		    __func__);
   9345 		return;
   9346 	}
   9347 
   9348 	switch (sc->sc_type) {
   9349 	case WM_T_82542_2_0:
   9350 	case WM_T_82542_2_1:
   9351 		/* null */
   9352 		break;
   9353 	case WM_T_82543:
   9354 		/*
   9355 		 * With 82543, we need to force speed and duplex on the MAC
   9356 		 * equal to what the PHY speed and duplex configuration is.
   9357 		 * In addition, we need to perform a hardware reset on the PHY
   9358 		 * to take it out of reset.
   9359 		 */
   9360 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9361 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9362 
   9363 		/* The PHY reset pin is active-low. */
   9364 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9365 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9366 		    CTRL_EXT_SWDPIN(4));
   9367 		reg |= CTRL_EXT_SWDPIO(4);
   9368 
   9369 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9370 		CSR_WRITE_FLUSH(sc);
   9371 		delay(10*1000);
   9372 
   9373 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9374 		CSR_WRITE_FLUSH(sc);
   9375 		delay(150);
   9376 #if 0
   9377 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9378 #endif
   9379 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9380 		break;
   9381 	case WM_T_82544:	/* reset 10000us */
   9382 	case WM_T_82540:
   9383 	case WM_T_82545:
   9384 	case WM_T_82545_3:
   9385 	case WM_T_82546:
   9386 	case WM_T_82546_3:
   9387 	case WM_T_82541:
   9388 	case WM_T_82541_2:
   9389 	case WM_T_82547:
   9390 	case WM_T_82547_2:
   9391 	case WM_T_82571:	/* reset 100us */
   9392 	case WM_T_82572:
   9393 	case WM_T_82573:
   9394 	case WM_T_82574:
   9395 	case WM_T_82575:
   9396 	case WM_T_82576:
   9397 	case WM_T_82580:
   9398 	case WM_T_I350:
   9399 	case WM_T_I354:
   9400 	case WM_T_I210:
   9401 	case WM_T_I211:
   9402 	case WM_T_82583:
   9403 	case WM_T_80003:
   9404 		/* generic reset */
   9405 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9406 		CSR_WRITE_FLUSH(sc);
   9407 		delay(20000);
   9408 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9409 		CSR_WRITE_FLUSH(sc);
   9410 		delay(20000);
   9411 
   9412 		if ((sc->sc_type == WM_T_82541)
   9413 		    || (sc->sc_type == WM_T_82541_2)
   9414 		    || (sc->sc_type == WM_T_82547)
   9415 		    || (sc->sc_type == WM_T_82547_2)) {
   9416 			/* workaround for igp are done in igp_reset() */
   9417 			/* XXX add code to set LED after phy reset */
   9418 		}
   9419 		break;
   9420 	case WM_T_ICH8:
   9421 	case WM_T_ICH9:
   9422 	case WM_T_ICH10:
   9423 	case WM_T_PCH:
   9424 	case WM_T_PCH2:
   9425 	case WM_T_PCH_LPT:
   9426 	case WM_T_PCH_SPT:
   9427 	case WM_T_PCH_CNP:
   9428 		/* generic reset */
   9429 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9430 		CSR_WRITE_FLUSH(sc);
   9431 		delay(100);
   9432 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9433 		CSR_WRITE_FLUSH(sc);
   9434 		delay(150);
   9435 		break;
   9436 	default:
   9437 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9438 		    __func__);
   9439 		break;
   9440 	}
   9441 
   9442 	sc->phy.release(sc);
   9443 
   9444 	/* get_cfg_done */
   9445 	wm_get_cfg_done(sc);
   9446 
   9447 	/* extra setup */
   9448 	switch (sc->sc_type) {
   9449 	case WM_T_82542_2_0:
   9450 	case WM_T_82542_2_1:
   9451 	case WM_T_82543:
   9452 	case WM_T_82544:
   9453 	case WM_T_82540:
   9454 	case WM_T_82545:
   9455 	case WM_T_82545_3:
   9456 	case WM_T_82546:
   9457 	case WM_T_82546_3:
   9458 	case WM_T_82541_2:
   9459 	case WM_T_82547_2:
   9460 	case WM_T_82571:
   9461 	case WM_T_82572:
   9462 	case WM_T_82573:
   9463 	case WM_T_82574:
   9464 	case WM_T_82583:
   9465 	case WM_T_82575:
   9466 	case WM_T_82576:
   9467 	case WM_T_82580:
   9468 	case WM_T_I350:
   9469 	case WM_T_I354:
   9470 	case WM_T_I210:
   9471 	case WM_T_I211:
   9472 	case WM_T_80003:
   9473 		/* null */
   9474 		break;
   9475 	case WM_T_82541:
   9476 	case WM_T_82547:
   9477 		/* XXX Configure actively LED after PHY reset */
   9478 		break;
   9479 	case WM_T_ICH8:
   9480 	case WM_T_ICH9:
   9481 	case WM_T_ICH10:
   9482 	case WM_T_PCH:
   9483 	case WM_T_PCH2:
   9484 	case WM_T_PCH_LPT:
   9485 	case WM_T_PCH_SPT:
   9486 	case WM_T_PCH_CNP:
   9487 		wm_phy_post_reset(sc);
   9488 		break;
   9489 	default:
   9490 		panic("%s: unknown type\n", __func__);
   9491 		break;
   9492 	}
   9493 }
   9494 
   9495 /*
   9496  * Setup sc_phytype and mii_{read|write}reg.
   9497  *
   9498  *  To identify PHY type, correct read/write function should be selected.
   9499  * To select correct read/write function, PCI ID or MAC type are required
   9500  * without accessing PHY registers.
   9501  *
   9502  *  On the first call of this function, PHY ID is not known yet. Check
   9503  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9504  * result might be incorrect.
   9505  *
   9506  *  In the second call, PHY OUI and model is used to identify PHY type.
   9507  * It might not be perfpect because of the lack of compared entry, but it
   9508  * would be better than the first call.
   9509  *
   9510  *  If the detected new result and previous assumption is different,
   9511  * diagnous message will be printed.
   9512  */
   9513 static void
   9514 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9515     uint16_t phy_model)
   9516 {
   9517 	device_t dev = sc->sc_dev;
   9518 	struct mii_data *mii = &sc->sc_mii;
   9519 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9520 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9521 	mii_readreg_t new_readreg;
   9522 	mii_writereg_t new_writereg;
   9523 
   9524 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9525 		device_xname(sc->sc_dev), __func__));
   9526 
   9527 	if (mii->mii_readreg == NULL) {
   9528 		/*
   9529 		 *  This is the first call of this function. For ICH and PCH
   9530 		 * variants, it's difficult to determine the PHY access method
   9531 		 * by sc_type, so use the PCI product ID for some devices.
   9532 		 */
   9533 
   9534 		switch (sc->sc_pcidevid) {
   9535 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9536 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9537 			/* 82577 */
   9538 			new_phytype = WMPHY_82577;
   9539 			break;
   9540 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9541 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9542 			/* 82578 */
   9543 			new_phytype = WMPHY_82578;
   9544 			break;
   9545 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9546 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9547 			/* 82579 */
   9548 			new_phytype = WMPHY_82579;
   9549 			break;
   9550 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9551 		case PCI_PRODUCT_INTEL_82801I_BM:
   9552 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9553 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9554 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9555 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9556 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9557 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9558 			/* ICH8, 9, 10 with 82567 */
   9559 			new_phytype = WMPHY_BM;
   9560 			break;
   9561 		default:
   9562 			break;
   9563 		}
   9564 	} else {
   9565 		/* It's not the first call. Use PHY OUI and model */
   9566 		switch (phy_oui) {
   9567 		case MII_OUI_ATTANSIC: /* XXX ??? */
   9568 			switch (phy_model) {
   9569 			case 0x0004: /* XXX */
   9570 				new_phytype = WMPHY_82578;
   9571 				break;
   9572 			default:
   9573 				break;
   9574 			}
   9575 			break;
   9576 		case MII_OUI_xxMARVELL:
   9577 			switch (phy_model) {
   9578 			case MII_MODEL_xxMARVELL_I210:
   9579 				new_phytype = WMPHY_I210;
   9580 				break;
   9581 			case MII_MODEL_xxMARVELL_E1011:
   9582 			case MII_MODEL_xxMARVELL_E1000_3:
   9583 			case MII_MODEL_xxMARVELL_E1000_5:
   9584 			case MII_MODEL_xxMARVELL_E1112:
   9585 				new_phytype = WMPHY_M88;
   9586 				break;
   9587 			case MII_MODEL_xxMARVELL_E1149:
   9588 				new_phytype = WMPHY_BM;
   9589 				break;
   9590 			case MII_MODEL_xxMARVELL_E1111:
   9591 			case MII_MODEL_xxMARVELL_I347:
   9592 			case MII_MODEL_xxMARVELL_E1512:
   9593 			case MII_MODEL_xxMARVELL_E1340M:
   9594 			case MII_MODEL_xxMARVELL_E1543:
   9595 				new_phytype = WMPHY_M88;
   9596 				break;
   9597 			case MII_MODEL_xxMARVELL_I82563:
   9598 				new_phytype = WMPHY_GG82563;
   9599 				break;
   9600 			default:
   9601 				break;
   9602 			}
   9603 			break;
   9604 		case MII_OUI_INTEL:
   9605 			switch (phy_model) {
   9606 			case MII_MODEL_INTEL_I82577:
   9607 				new_phytype = WMPHY_82577;
   9608 				break;
   9609 			case MII_MODEL_INTEL_I82579:
   9610 				new_phytype = WMPHY_82579;
   9611 				break;
   9612 			case MII_MODEL_INTEL_I217:
   9613 				new_phytype = WMPHY_I217;
   9614 				break;
   9615 			case MII_MODEL_INTEL_I82580:
   9616 			case MII_MODEL_INTEL_I350:
   9617 				new_phytype = WMPHY_82580;
   9618 				break;
   9619 			default:
   9620 				break;
   9621 			}
   9622 			break;
   9623 		case MII_OUI_yyINTEL:
   9624 			switch (phy_model) {
   9625 			case MII_MODEL_yyINTEL_I82562G:
   9626 			case MII_MODEL_yyINTEL_I82562EM:
   9627 			case MII_MODEL_yyINTEL_I82562ET:
   9628 				new_phytype = WMPHY_IFE;
   9629 				break;
   9630 			case MII_MODEL_yyINTEL_IGP01E1000:
   9631 				new_phytype = WMPHY_IGP;
   9632 				break;
   9633 			case MII_MODEL_yyINTEL_I82566:
   9634 				new_phytype = WMPHY_IGP_3;
   9635 				break;
   9636 			default:
   9637 				break;
   9638 			}
   9639 			break;
   9640 		default:
   9641 			break;
   9642 		}
   9643 		if (new_phytype == WMPHY_UNKNOWN)
   9644 			aprint_verbose_dev(dev,
   9645 			    "%s: unknown PHY model. OUI=%06x, model=%04x\n",
   9646 			    __func__, phy_oui, phy_model);
   9647 
   9648 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9649 		    && (sc->sc_phytype != new_phytype )) {
   9650 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9651 			    "was incorrect. PHY type from PHY ID = %u\n",
   9652 			    sc->sc_phytype, new_phytype);
   9653 		}
   9654 	}
   9655 
   9656 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9657 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9658 		/* SGMII */
   9659 		new_readreg = wm_sgmii_readreg;
   9660 		new_writereg = wm_sgmii_writereg;
   9661 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9662 		/* BM2 (phyaddr == 1) */
   9663 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9664 		    && (new_phytype != WMPHY_BM)
   9665 		    && (new_phytype != WMPHY_UNKNOWN))
   9666 			doubt_phytype = new_phytype;
   9667 		new_phytype = WMPHY_BM;
   9668 		new_readreg = wm_gmii_bm_readreg;
   9669 		new_writereg = wm_gmii_bm_writereg;
   9670 	} else if (sc->sc_type >= WM_T_PCH) {
   9671 		/* All PCH* use _hv_ */
   9672 		new_readreg = wm_gmii_hv_readreg;
   9673 		new_writereg = wm_gmii_hv_writereg;
   9674 	} else if (sc->sc_type >= WM_T_ICH8) {
   9675 		/* non-82567 ICH8, 9 and 10 */
   9676 		new_readreg = wm_gmii_i82544_readreg;
   9677 		new_writereg = wm_gmii_i82544_writereg;
   9678 	} else if (sc->sc_type >= WM_T_80003) {
   9679 		/* 80003 */
   9680 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9681 		    && (new_phytype != WMPHY_GG82563)
   9682 		    && (new_phytype != WMPHY_UNKNOWN))
   9683 			doubt_phytype = new_phytype;
   9684 		new_phytype = WMPHY_GG82563;
   9685 		new_readreg = wm_gmii_i80003_readreg;
   9686 		new_writereg = wm_gmii_i80003_writereg;
   9687 	} else if (sc->sc_type >= WM_T_I210) {
   9688 		/* I210 and I211 */
   9689 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9690 		    && (new_phytype != WMPHY_I210)
   9691 		    && (new_phytype != WMPHY_UNKNOWN))
   9692 			doubt_phytype = new_phytype;
   9693 		new_phytype = WMPHY_I210;
   9694 		new_readreg = wm_gmii_gs40g_readreg;
   9695 		new_writereg = wm_gmii_gs40g_writereg;
   9696 	} else if (sc->sc_type >= WM_T_82580) {
   9697 		/* 82580, I350 and I354 */
   9698 		new_readreg = wm_gmii_82580_readreg;
   9699 		new_writereg = wm_gmii_82580_writereg;
   9700 	} else if (sc->sc_type >= WM_T_82544) {
   9701 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9702 		new_readreg = wm_gmii_i82544_readreg;
   9703 		new_writereg = wm_gmii_i82544_writereg;
   9704 	} else {
   9705 		new_readreg = wm_gmii_i82543_readreg;
   9706 		new_writereg = wm_gmii_i82543_writereg;
   9707 	}
   9708 
   9709 	if (new_phytype == WMPHY_BM) {
   9710 		/* All BM use _bm_ */
   9711 		new_readreg = wm_gmii_bm_readreg;
   9712 		new_writereg = wm_gmii_bm_writereg;
   9713 	}
   9714 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   9715 		/* All PCH* use _hv_ */
   9716 		new_readreg = wm_gmii_hv_readreg;
   9717 		new_writereg = wm_gmii_hv_writereg;
   9718 	}
   9719 
   9720 	/* Diag output */
   9721 	if (doubt_phytype != WMPHY_UNKNOWN)
   9722 		aprint_error_dev(dev, "Assumed new PHY type was "
   9723 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9724 		    new_phytype);
   9725 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9726 	    && (sc->sc_phytype != new_phytype ))
   9727 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9728 		    "was incorrect. New PHY type = %u\n",
   9729 		    sc->sc_phytype, new_phytype);
   9730 
   9731 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9732 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9733 
   9734 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9735 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9736 		    "function was incorrect.\n");
   9737 
   9738 	/* Update now */
   9739 	sc->sc_phytype = new_phytype;
   9740 	mii->mii_readreg = new_readreg;
   9741 	mii->mii_writereg = new_writereg;
   9742 	if (new_readreg == wm_gmii_hv_readreg) {
   9743 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   9744 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   9745 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   9746 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   9747 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   9748 	}
   9749 }
   9750 
   9751 /*
   9752  * wm_get_phy_id_82575:
   9753  *
   9754  * Return PHY ID. Return -1 if it failed.
   9755  */
   9756 static int
   9757 wm_get_phy_id_82575(struct wm_softc *sc)
   9758 {
   9759 	uint32_t reg;
   9760 	int phyid = -1;
   9761 
   9762 	/* XXX */
   9763 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9764 		return -1;
   9765 
   9766 	if (wm_sgmii_uses_mdio(sc)) {
   9767 		switch (sc->sc_type) {
   9768 		case WM_T_82575:
   9769 		case WM_T_82576:
   9770 			reg = CSR_READ(sc, WMREG_MDIC);
   9771 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9772 			break;
   9773 		case WM_T_82580:
   9774 		case WM_T_I350:
   9775 		case WM_T_I354:
   9776 		case WM_T_I210:
   9777 		case WM_T_I211:
   9778 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9779 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9780 			break;
   9781 		default:
   9782 			return -1;
   9783 		}
   9784 	}
   9785 
   9786 	return phyid;
   9787 }
   9788 
   9789 
   9790 /*
   9791  * wm_gmii_mediainit:
   9792  *
   9793  *	Initialize media for use on 1000BASE-T devices.
   9794  */
   9795 static void
   9796 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9797 {
   9798 	device_t dev = sc->sc_dev;
   9799 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9800 	struct mii_data *mii = &sc->sc_mii;
   9801 	uint32_t reg;
   9802 
   9803 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9804 		device_xname(sc->sc_dev), __func__));
   9805 
   9806 	/* We have GMII. */
   9807 	sc->sc_flags |= WM_F_HAS_MII;
   9808 
   9809 	if (sc->sc_type == WM_T_80003)
   9810 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9811 	else
   9812 		sc->sc_tipg = TIPG_1000T_DFLT;
   9813 
   9814 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9815 	if ((sc->sc_type == WM_T_82580)
   9816 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9817 	    || (sc->sc_type == WM_T_I211)) {
   9818 		reg = CSR_READ(sc, WMREG_PHPM);
   9819 		reg &= ~PHPM_GO_LINK_D;
   9820 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9821 	}
   9822 
   9823 	/*
   9824 	 * Let the chip set speed/duplex on its own based on
   9825 	 * signals from the PHY.
   9826 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9827 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9828 	 */
   9829 	sc->sc_ctrl |= CTRL_SLU;
   9830 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9831 
   9832 	/* Initialize our media structures and probe the GMII. */
   9833 	mii->mii_ifp = ifp;
   9834 
   9835 	mii->mii_statchg = wm_gmii_statchg;
   9836 
   9837 	/* get PHY control from SMBus to PCIe */
   9838 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9839 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   9840 	    || (sc->sc_type == WM_T_PCH_CNP))
   9841 		wm_smbustopci(sc);
   9842 
   9843 	wm_gmii_reset(sc);
   9844 
   9845 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9846 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9847 	    wm_gmii_mediastatus);
   9848 
   9849 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9850 	    || (sc->sc_type == WM_T_82580)
   9851 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9852 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9853 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9854 			/* Attach only one port */
   9855 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9856 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9857 		} else {
   9858 			int i, id;
   9859 			uint32_t ctrl_ext;
   9860 
   9861 			id = wm_get_phy_id_82575(sc);
   9862 			if (id != -1) {
   9863 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9864 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9865 			}
   9866 			if ((id == -1)
   9867 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9868 				/* Power on sgmii phy if it is disabled */
   9869 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9870 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9871 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9872 				CSR_WRITE_FLUSH(sc);
   9873 				delay(300*1000); /* XXX too long */
   9874 
   9875 				/* from 1 to 8 */
   9876 				for (i = 1; i < 8; i++)
   9877 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9878 					    0xffffffff, i, MII_OFFSET_ANY,
   9879 					    MIIF_DOPAUSE);
   9880 
   9881 				/* restore previous sfp cage power state */
   9882 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9883 			}
   9884 		}
   9885 	} else
   9886 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9887 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9888 
   9889 	/*
   9890 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9891 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9892 	 */
   9893 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   9894 		|| (sc->sc_type == WM_T_PCH_SPT)
   9895 		|| (sc->sc_type == WM_T_PCH_CNP))
   9896 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9897 		wm_set_mdio_slow_mode_hv(sc);
   9898 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9899 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9900 	}
   9901 
   9902 	/*
   9903 	 * (For ICH8 variants)
   9904 	 * If PHY detection failed, use BM's r/w function and retry.
   9905 	 */
   9906 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9907 		/* if failed, retry with *_bm_* */
   9908 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9909 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9910 		    sc->sc_phytype);
   9911 		sc->sc_phytype = WMPHY_BM;
   9912 		mii->mii_readreg = wm_gmii_bm_readreg;
   9913 		mii->mii_writereg = wm_gmii_bm_writereg;
   9914 
   9915 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9916 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9917 	}
   9918 
   9919 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9920 		/* Any PHY wasn't find */
   9921 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9922 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9923 		sc->sc_phytype = WMPHY_NONE;
   9924 	} else {
   9925 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9926 
   9927 		/*
   9928 		 * PHY Found! Check PHY type again by the second call of
   9929 		 * wm_gmii_setup_phytype.
   9930 		 */
   9931 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9932 		    child->mii_mpd_model);
   9933 
   9934 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9935 	}
   9936 }
   9937 
   9938 /*
   9939  * wm_gmii_mediachange:	[ifmedia interface function]
   9940  *
   9941  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9942  */
   9943 static int
   9944 wm_gmii_mediachange(struct ifnet *ifp)
   9945 {
   9946 	struct wm_softc *sc = ifp->if_softc;
   9947 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9948 	int rc;
   9949 
   9950 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9951 		device_xname(sc->sc_dev), __func__));
   9952 	if ((ifp->if_flags & IFF_UP) == 0)
   9953 		return 0;
   9954 
   9955 	/* Disable D0 LPLU. */
   9956 	wm_lplu_d0_disable(sc);
   9957 
   9958 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9959 	sc->sc_ctrl |= CTRL_SLU;
   9960 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9961 	    || (sc->sc_type > WM_T_82543)) {
   9962 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9963 	} else {
   9964 		sc->sc_ctrl &= ~CTRL_ASDE;
   9965 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9966 		if (ife->ifm_media & IFM_FDX)
   9967 			sc->sc_ctrl |= CTRL_FD;
   9968 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9969 		case IFM_10_T:
   9970 			sc->sc_ctrl |= CTRL_SPEED_10;
   9971 			break;
   9972 		case IFM_100_TX:
   9973 			sc->sc_ctrl |= CTRL_SPEED_100;
   9974 			break;
   9975 		case IFM_1000_T:
   9976 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9977 			break;
   9978 		default:
   9979 			panic("wm_gmii_mediachange: bad media 0x%x",
   9980 			    ife->ifm_media);
   9981 		}
   9982 	}
   9983 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9984 	CSR_WRITE_FLUSH(sc);
   9985 	if (sc->sc_type <= WM_T_82543)
   9986 		wm_gmii_reset(sc);
   9987 
   9988 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9989 		return 0;
   9990 	return rc;
   9991 }
   9992 
   9993 /*
   9994  * wm_gmii_mediastatus:	[ifmedia interface function]
   9995  *
   9996  *	Get the current interface media status on a 1000BASE-T device.
   9997  */
   9998 static void
   9999 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10000 {
   10001 	struct wm_softc *sc = ifp->if_softc;
   10002 
   10003 	ether_mediastatus(ifp, ifmr);
   10004 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10005 	    | sc->sc_flowflags;
   10006 }
   10007 
   10008 #define	MDI_IO		CTRL_SWDPIN(2)
   10009 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10010 #define	MDI_CLK		CTRL_SWDPIN(3)
   10011 
   10012 static void
   10013 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10014 {
   10015 	uint32_t i, v;
   10016 
   10017 	v = CSR_READ(sc, WMREG_CTRL);
   10018 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10019 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10020 
   10021 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   10022 		if (data & i)
   10023 			v |= MDI_IO;
   10024 		else
   10025 			v &= ~MDI_IO;
   10026 		CSR_WRITE(sc, WMREG_CTRL, v);
   10027 		CSR_WRITE_FLUSH(sc);
   10028 		delay(10);
   10029 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10030 		CSR_WRITE_FLUSH(sc);
   10031 		delay(10);
   10032 		CSR_WRITE(sc, WMREG_CTRL, v);
   10033 		CSR_WRITE_FLUSH(sc);
   10034 		delay(10);
   10035 	}
   10036 }
   10037 
   10038 static uint32_t
   10039 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10040 {
   10041 	uint32_t v, i, data = 0;
   10042 
   10043 	v = CSR_READ(sc, WMREG_CTRL);
   10044 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10045 	v |= CTRL_SWDPIO(3);
   10046 
   10047 	CSR_WRITE(sc, WMREG_CTRL, v);
   10048 	CSR_WRITE_FLUSH(sc);
   10049 	delay(10);
   10050 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10051 	CSR_WRITE_FLUSH(sc);
   10052 	delay(10);
   10053 	CSR_WRITE(sc, WMREG_CTRL, v);
   10054 	CSR_WRITE_FLUSH(sc);
   10055 	delay(10);
   10056 
   10057 	for (i = 0; i < 16; i++) {
   10058 		data <<= 1;
   10059 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10060 		CSR_WRITE_FLUSH(sc);
   10061 		delay(10);
   10062 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10063 			data |= 1;
   10064 		CSR_WRITE(sc, WMREG_CTRL, v);
   10065 		CSR_WRITE_FLUSH(sc);
   10066 		delay(10);
   10067 	}
   10068 
   10069 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10070 	CSR_WRITE_FLUSH(sc);
   10071 	delay(10);
   10072 	CSR_WRITE(sc, WMREG_CTRL, v);
   10073 	CSR_WRITE_FLUSH(sc);
   10074 	delay(10);
   10075 
   10076 	return data;
   10077 }
   10078 
   10079 #undef MDI_IO
   10080 #undef MDI_DIR
   10081 #undef MDI_CLK
   10082 
   10083 /*
   10084  * wm_gmii_i82543_readreg:	[mii interface function]
   10085  *
   10086  *	Read a PHY register on the GMII (i82543 version).
   10087  */
   10088 static int
   10089 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   10090 {
   10091 	struct wm_softc *sc = device_private(dev);
   10092 	int rv;
   10093 
   10094 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10095 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10096 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10097 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   10098 
   10099 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   10100 		device_xname(dev), phy, reg, rv));
   10101 
   10102 	return rv;
   10103 }
   10104 
   10105 /*
   10106  * wm_gmii_i82543_writereg:	[mii interface function]
   10107  *
   10108  *	Write a PHY register on the GMII (i82543 version).
   10109  */
   10110 static void
   10111 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   10112 {
   10113 	struct wm_softc *sc = device_private(dev);
   10114 
   10115 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10116 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10117 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10118 	    (MII_COMMAND_START << 30), 32);
   10119 }
   10120 
   10121 /*
   10122  * wm_gmii_mdic_readreg:	[mii interface function]
   10123  *
   10124  *	Read a PHY register on the GMII.
   10125  */
   10126 static int
   10127 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   10128 {
   10129 	struct wm_softc *sc = device_private(dev);
   10130 	uint32_t mdic = 0;
   10131 	int i, rv;
   10132 
   10133 	if (reg > MII_ADDRMASK) {
   10134 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10135 		    __func__, sc->sc_phytype, reg);
   10136 		reg &= MII_ADDRMASK;
   10137 	}
   10138 
   10139 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10140 	    MDIC_REGADD(reg));
   10141 
   10142 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10143 		delay(50);
   10144 		mdic = CSR_READ(sc, WMREG_MDIC);
   10145 		if (mdic & MDIC_READY)
   10146 			break;
   10147 	}
   10148 
   10149 	if ((mdic & MDIC_READY) == 0) {
   10150 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   10151 		    device_xname(dev), phy, reg);
   10152 		return 0;
   10153 	} else if (mdic & MDIC_E) {
   10154 #if 0 /* This is normal if no PHY is present. */
   10155 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   10156 		    device_xname(dev), phy, reg);
   10157 #endif
   10158 		return 0;
   10159 	} else {
   10160 		rv = MDIC_DATA(mdic);
   10161 		if (rv == 0xffff)
   10162 			rv = 0;
   10163 	}
   10164 
   10165 	/*
   10166 	 * Allow some time after each MDIC transaction to avoid
   10167 	 * reading duplicate data in the next MDIC transaction.
   10168 	 */
   10169 	if (sc->sc_type == WM_T_PCH2)
   10170 		delay(100);
   10171 
   10172 	return rv;
   10173 }
   10174 
   10175 /*
   10176  * wm_gmii_mdic_writereg:	[mii interface function]
   10177  *
   10178  *	Write a PHY register on the GMII.
   10179  */
   10180 static void
   10181 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   10182 {
   10183 	struct wm_softc *sc = device_private(dev);
   10184 	uint32_t mdic = 0;
   10185 	int i;
   10186 
   10187 	if (reg > MII_ADDRMASK) {
   10188 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10189 		    __func__, sc->sc_phytype, reg);
   10190 		reg &= MII_ADDRMASK;
   10191 	}
   10192 
   10193 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10194 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10195 
   10196 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10197 		delay(50);
   10198 		mdic = CSR_READ(sc, WMREG_MDIC);
   10199 		if (mdic & MDIC_READY)
   10200 			break;
   10201 	}
   10202 
   10203 	if ((mdic & MDIC_READY) == 0) {
   10204 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   10205 		    device_xname(dev), phy, reg);
   10206 		return;
   10207 	} else if (mdic & MDIC_E) {
   10208 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   10209 		    device_xname(dev), phy, reg);
   10210 		return;
   10211 	}
   10212 
   10213 	/*
   10214 	 * Allow some time after each MDIC transaction to avoid
   10215 	 * reading duplicate data in the next MDIC transaction.
   10216 	 */
   10217 	if (sc->sc_type == WM_T_PCH2)
   10218 		delay(100);
   10219 }
   10220 
   10221 /*
   10222  * wm_gmii_i82544_readreg:	[mii interface function]
   10223  *
   10224  *	Read a PHY register on the GMII.
   10225  */
   10226 static int
   10227 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   10228 {
   10229 	struct wm_softc *sc = device_private(dev);
   10230 	uint16_t val;
   10231 
   10232 	if (sc->phy.acquire(sc)) {
   10233 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10234 		return 0;
   10235 	}
   10236 
   10237 	wm_gmii_i82544_readreg_locked(dev, phy, reg, &val);
   10238 
   10239 	sc->phy.release(sc);
   10240 
   10241 	return val;
   10242 }
   10243 
   10244 static int
   10245 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10246 {
   10247 	struct wm_softc *sc = device_private(dev);
   10248 
   10249 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10250 		switch (sc->sc_phytype) {
   10251 		case WMPHY_IGP:
   10252 		case WMPHY_IGP_2:
   10253 		case WMPHY_IGP_3:
   10254 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10255 			    reg);
   10256 			break;
   10257 		default:
   10258 #ifdef WM_DEBUG
   10259 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10260 			    __func__, sc->sc_phytype, reg);
   10261 #endif
   10262 			break;
   10263 		}
   10264 	}
   10265 
   10266 	*val = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10267 
   10268 	return 0;
   10269 }
   10270 
   10271 /*
   10272  * wm_gmii_i82544_writereg:	[mii interface function]
   10273  *
   10274  *	Write a PHY register on the GMII.
   10275  */
   10276 static void
   10277 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   10278 {
   10279 	struct wm_softc *sc = device_private(dev);
   10280 
   10281 	if (sc->phy.acquire(sc)) {
   10282 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10283 		return;
   10284 	}
   10285 
   10286 	wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10287 	sc->phy.release(sc);
   10288 }
   10289 
   10290 static int
   10291 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10292 {
   10293 	struct wm_softc *sc = device_private(dev);
   10294 
   10295 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10296 		switch (sc->sc_phytype) {
   10297 		case WMPHY_IGP:
   10298 		case WMPHY_IGP_2:
   10299 		case WMPHY_IGP_3:
   10300 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10301 			    reg);
   10302 			break;
   10303 		default:
   10304 #ifdef WM_DEBUG
   10305 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10306 			    __func__, sc->sc_phytype, reg);
   10307 #endif
   10308 			break;
   10309 		}
   10310 	}
   10311 
   10312 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10313 
   10314 	return 0;
   10315 }
   10316 
   10317 /*
   10318  * wm_gmii_i80003_readreg:	[mii interface function]
   10319  *
   10320  *	Read a PHY register on the kumeran
   10321  * This could be handled by the PHY layer if we didn't have to lock the
   10322  * ressource ...
   10323  */
   10324 static int
   10325 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   10326 {
   10327 	struct wm_softc *sc = device_private(dev);
   10328 	int page_select, temp;
   10329 	int rv;
   10330 
   10331 	if (phy != 1) /* only one PHY on kumeran bus */
   10332 		return 0;
   10333 
   10334 	if (sc->phy.acquire(sc)) {
   10335 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10336 		return 0;
   10337 	}
   10338 
   10339 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10340 		page_select = GG82563_PHY_PAGE_SELECT;
   10341 	else {
   10342 		/*
   10343 		 * Use Alternative Page Select register to access registers
   10344 		 * 30 and 31.
   10345 		 */
   10346 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10347 	}
   10348 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10349 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10350 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10351 		/*
   10352 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10353 		 * register.
   10354 		 */
   10355 		delay(200);
   10356 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10357 			device_printf(dev, "%s failed\n", __func__);
   10358 			rv = 0; /* XXX */
   10359 			goto out;
   10360 		}
   10361 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10362 		delay(200);
   10363 	} else
   10364 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10365 
   10366 out:
   10367 	sc->phy.release(sc);
   10368 	return rv;
   10369 }
   10370 
   10371 /*
   10372  * wm_gmii_i80003_writereg:	[mii interface function]
   10373  *
   10374  *	Write a PHY register on the kumeran.
   10375  * This could be handled by the PHY layer if we didn't have to lock the
   10376  * ressource ...
   10377  */
   10378 static void
   10379 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10380 {
   10381 	struct wm_softc *sc = device_private(dev);
   10382 	int page_select, temp;
   10383 
   10384 	if (phy != 1) /* only one PHY on kumeran bus */
   10385 		return;
   10386 
   10387 	if (sc->phy.acquire(sc)) {
   10388 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10389 		return;
   10390 	}
   10391 
   10392 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10393 		page_select = GG82563_PHY_PAGE_SELECT;
   10394 	else {
   10395 		/*
   10396 		 * Use Alternative Page Select register to access registers
   10397 		 * 30 and 31.
   10398 		 */
   10399 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10400 	}
   10401 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10402 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10403 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10404 		/*
   10405 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10406 		 * register.
   10407 		 */
   10408 		delay(200);
   10409 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10410 			device_printf(dev, "%s failed\n", __func__);
   10411 			goto out;
   10412 		}
   10413 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10414 		delay(200);
   10415 	} else
   10416 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10417 
   10418 out:
   10419 	sc->phy.release(sc);
   10420 }
   10421 
   10422 /*
   10423  * wm_gmii_bm_readreg:	[mii interface function]
   10424  *
   10425  *	Read a PHY register on the kumeran
   10426  * This could be handled by the PHY layer if we didn't have to lock the
   10427  * ressource ...
   10428  */
   10429 static int
   10430 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10431 {
   10432 	struct wm_softc *sc = device_private(dev);
   10433 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10434 	uint16_t val;
   10435 	int rv;
   10436 
   10437 	if (sc->phy.acquire(sc)) {
   10438 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10439 		return 0;
   10440 	}
   10441 
   10442 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10443 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10444 		    || (reg == 31)) ? 1 : phy;
   10445 	/* Page 800 works differently than the rest so it has its own func */
   10446 	if (page == BM_WUC_PAGE) {
   10447 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10448 		rv = val;
   10449 		goto release;
   10450 	}
   10451 
   10452 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10453 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10454 		    && (sc->sc_type != WM_T_82583))
   10455 			wm_gmii_mdic_writereg(dev, phy,
   10456 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10457 		else
   10458 			wm_gmii_mdic_writereg(dev, phy,
   10459 			    BME1000_PHY_PAGE_SELECT, page);
   10460 	}
   10461 
   10462 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10463 
   10464 release:
   10465 	sc->phy.release(sc);
   10466 	return rv;
   10467 }
   10468 
   10469 /*
   10470  * wm_gmii_bm_writereg:	[mii interface function]
   10471  *
   10472  *	Write a PHY register on the kumeran.
   10473  * This could be handled by the PHY layer if we didn't have to lock the
   10474  * ressource ...
   10475  */
   10476 static void
   10477 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10478 {
   10479 	struct wm_softc *sc = device_private(dev);
   10480 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10481 
   10482 	if (sc->phy.acquire(sc)) {
   10483 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10484 		return;
   10485 	}
   10486 
   10487 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10488 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10489 		    || (reg == 31)) ? 1 : phy;
   10490 	/* Page 800 works differently than the rest so it has its own func */
   10491 	if (page == BM_WUC_PAGE) {
   10492 		uint16_t tmp;
   10493 
   10494 		tmp = val;
   10495 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10496 		goto release;
   10497 	}
   10498 
   10499 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10500 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10501 		    && (sc->sc_type != WM_T_82583))
   10502 			wm_gmii_mdic_writereg(dev, phy,
   10503 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10504 		else
   10505 			wm_gmii_mdic_writereg(dev, phy,
   10506 			    BME1000_PHY_PAGE_SELECT, page);
   10507 	}
   10508 
   10509 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10510 
   10511 release:
   10512 	sc->phy.release(sc);
   10513 }
   10514 
   10515 static void
   10516 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10517 {
   10518 	struct wm_softc *sc = device_private(dev);
   10519 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10520 	uint16_t wuce, reg;
   10521 
   10522 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10523 		device_xname(dev), __func__));
   10524 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10525 	if (sc->sc_type == WM_T_PCH) {
   10526 		/* XXX e1000 driver do nothing... why? */
   10527 	}
   10528 
   10529 	/*
   10530 	 * 1) Enable PHY wakeup register first.
   10531 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10532 	 */
   10533 
   10534 	/* Set page 769 */
   10535 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10536 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10537 
   10538 	/* Read WUCE and save it */
   10539 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10540 
   10541 	reg = wuce | BM_WUC_ENABLE_BIT;
   10542 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10543 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10544 
   10545 	/* Select page 800 */
   10546 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10547 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10548 
   10549 	/*
   10550 	 * 2) Access PHY wakeup register.
   10551 	 * See e1000_access_phy_wakeup_reg_bm.
   10552 	 */
   10553 
   10554 	/* Write page 800 */
   10555 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10556 
   10557 	if (rd)
   10558 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10559 	else
   10560 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10561 
   10562 	/*
   10563 	 * 3) Disable PHY wakeup register.
   10564 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10565 	 */
   10566 	/* Set page 769 */
   10567 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10568 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10569 
   10570 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10571 }
   10572 
   10573 /*
   10574  * wm_gmii_hv_readreg:	[mii interface function]
   10575  *
   10576  *	Read a PHY register on the kumeran
   10577  * This could be handled by the PHY layer if we didn't have to lock the
   10578  * ressource ...
   10579  */
   10580 static int
   10581 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10582 {
   10583 	struct wm_softc *sc = device_private(dev);
   10584 	uint16_t val;
   10585 
   10586 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10587 		device_xname(dev), __func__));
   10588 	if (sc->phy.acquire(sc)) {
   10589 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10590 		return 0;
   10591 	}
   10592 
   10593 	wm_gmii_hv_readreg_locked(dev, phy, reg, &val);
   10594 	sc->phy.release(sc);
   10595 	return val;
   10596 }
   10597 
   10598 static int
   10599 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10600 {
   10601 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10602 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10603 
   10604 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10605 
   10606 	/* Page 800 works differently than the rest so it has its own func */
   10607 	if (page == BM_WUC_PAGE) {
   10608 		wm_access_phy_wakeup_reg_bm(dev, reg, val, 1);
   10609 		return 0;
   10610 	}
   10611 
   10612 	/*
   10613 	 * Lower than page 768 works differently than the rest so it has its
   10614 	 * own func
   10615 	 */
   10616 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10617 		printf("gmii_hv_readreg!!!\n");
   10618 		return 0;
   10619 	}
   10620 
   10621 	/*
   10622 	 * XXX I21[789] documents say that the SMBus Address register is at
   10623 	 * PHY address 01, Page 0 (not 768), Register 26.
   10624 	 */
   10625 	if (page == HV_INTC_FC_PAGE_START)
   10626 		page = 0;
   10627 
   10628 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10629 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10630 		    page << BME1000_PAGE_SHIFT);
   10631 	}
   10632 
   10633 	*val = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10634 	return 0;
   10635 }
   10636 
   10637 /*
   10638  * wm_gmii_hv_writereg:	[mii interface function]
   10639  *
   10640  *	Write a PHY register on the kumeran.
   10641  * This could be handled by the PHY layer if we didn't have to lock the
   10642  * ressource ...
   10643  */
   10644 static void
   10645 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10646 {
   10647 	struct wm_softc *sc = device_private(dev);
   10648 
   10649 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10650 		device_xname(dev), __func__));
   10651 
   10652 	if (sc->phy.acquire(sc)) {
   10653 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10654 		return;
   10655 	}
   10656 
   10657 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10658 	sc->phy.release(sc);
   10659 }
   10660 
   10661 static int
   10662 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10663 {
   10664 	struct wm_softc *sc = device_private(dev);
   10665 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10666 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10667 
   10668 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10669 
   10670 	/* Page 800 works differently than the rest so it has its own func */
   10671 	if (page == BM_WUC_PAGE) {
   10672 		uint16_t tmp;
   10673 
   10674 		tmp = val;
   10675 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10676 		return 0;
   10677 	}
   10678 
   10679 	/*
   10680 	 * Lower than page 768 works differently than the rest so it has its
   10681 	 * own func
   10682 	 */
   10683 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10684 		printf("gmii_hv_writereg!!!\n");
   10685 		return -1;
   10686 	}
   10687 
   10688 	{
   10689 		/*
   10690 		 * XXX I21[789] documents say that the SMBus Address register
   10691 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10692 		 */
   10693 		if (page == HV_INTC_FC_PAGE_START)
   10694 			page = 0;
   10695 
   10696 		/*
   10697 		 * XXX Workaround MDIO accesses being disabled after entering
   10698 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10699 		 * register is set)
   10700 		 */
   10701 		if (sc->sc_phytype == WMPHY_82578) {
   10702 			struct mii_softc *child;
   10703 
   10704 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10705 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10706 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10707 			    && ((val & (1 << 11)) != 0)) {
   10708 				printf("XXX need workaround\n");
   10709 			}
   10710 		}
   10711 
   10712 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10713 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10714 			    page << BME1000_PAGE_SHIFT);
   10715 		}
   10716 	}
   10717 
   10718 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10719 
   10720 	return 0;
   10721 }
   10722 
   10723 /*
   10724  * wm_gmii_82580_readreg:	[mii interface function]
   10725  *
   10726  *	Read a PHY register on the 82580 and I350.
   10727  * This could be handled by the PHY layer if we didn't have to lock the
   10728  * ressource ...
   10729  */
   10730 static int
   10731 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10732 {
   10733 	struct wm_softc *sc = device_private(dev);
   10734 	int rv;
   10735 
   10736 	if (sc->phy.acquire(sc) != 0) {
   10737 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10738 		return 0;
   10739 	}
   10740 
   10741 #ifdef DIAGNOSTIC
   10742 	if (reg > MII_ADDRMASK) {
   10743 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10744 		    __func__, sc->sc_phytype, reg);
   10745 		reg &= MII_ADDRMASK;
   10746 	}
   10747 #endif
   10748 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10749 
   10750 	sc->phy.release(sc);
   10751 	return rv;
   10752 }
   10753 
   10754 /*
   10755  * wm_gmii_82580_writereg:	[mii interface function]
   10756  *
   10757  *	Write a PHY register on the 82580 and I350.
   10758  * This could be handled by the PHY layer if we didn't have to lock the
   10759  * ressource ...
   10760  */
   10761 static void
   10762 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10763 {
   10764 	struct wm_softc *sc = device_private(dev);
   10765 
   10766 	if (sc->phy.acquire(sc) != 0) {
   10767 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10768 		return;
   10769 	}
   10770 
   10771 #ifdef DIAGNOSTIC
   10772 	if (reg > MII_ADDRMASK) {
   10773 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10774 		    __func__, sc->sc_phytype, reg);
   10775 		reg &= MII_ADDRMASK;
   10776 	}
   10777 #endif
   10778 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10779 
   10780 	sc->phy.release(sc);
   10781 }
   10782 
   10783 /*
   10784  * wm_gmii_gs40g_readreg:	[mii interface function]
   10785  *
   10786  *	Read a PHY register on the I2100 and I211.
   10787  * This could be handled by the PHY layer if we didn't have to lock the
   10788  * ressource ...
   10789  */
   10790 static int
   10791 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10792 {
   10793 	struct wm_softc *sc = device_private(dev);
   10794 	int page, offset;
   10795 	int rv;
   10796 
   10797 	/* Acquire semaphore */
   10798 	if (sc->phy.acquire(sc)) {
   10799 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10800 		return 0;
   10801 	}
   10802 
   10803 	/* Page select */
   10804 	page = reg >> GS40G_PAGE_SHIFT;
   10805 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10806 
   10807 	/* Read reg */
   10808 	offset = reg & GS40G_OFFSET_MASK;
   10809 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10810 
   10811 	sc->phy.release(sc);
   10812 	return rv;
   10813 }
   10814 
   10815 /*
   10816  * wm_gmii_gs40g_writereg:	[mii interface function]
   10817  *
   10818  *	Write a PHY register on the I210 and I211.
   10819  * This could be handled by the PHY layer if we didn't have to lock the
   10820  * ressource ...
   10821  */
   10822 static void
   10823 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10824 {
   10825 	struct wm_softc *sc = device_private(dev);
   10826 	int page, offset;
   10827 
   10828 	/* Acquire semaphore */
   10829 	if (sc->phy.acquire(sc)) {
   10830 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10831 		return;
   10832 	}
   10833 
   10834 	/* Page select */
   10835 	page = reg >> GS40G_PAGE_SHIFT;
   10836 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10837 
   10838 	/* Write reg */
   10839 	offset = reg & GS40G_OFFSET_MASK;
   10840 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10841 
   10842 	/* Release semaphore */
   10843 	sc->phy.release(sc);
   10844 }
   10845 
   10846 /*
   10847  * wm_gmii_statchg:	[mii interface function]
   10848  *
   10849  *	Callback from MII layer when media changes.
   10850  */
   10851 static void
   10852 wm_gmii_statchg(struct ifnet *ifp)
   10853 {
   10854 	struct wm_softc *sc = ifp->if_softc;
   10855 	struct mii_data *mii = &sc->sc_mii;
   10856 
   10857 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10858 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10859 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10860 
   10861 	/*
   10862 	 * Get flow control negotiation result.
   10863 	 */
   10864 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10865 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10866 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10867 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10868 	}
   10869 
   10870 	if (sc->sc_flowflags & IFM_FLOW) {
   10871 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10872 			sc->sc_ctrl |= CTRL_TFCE;
   10873 			sc->sc_fcrtl |= FCRTL_XONE;
   10874 		}
   10875 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10876 			sc->sc_ctrl |= CTRL_RFCE;
   10877 	}
   10878 
   10879 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10880 		DPRINTF(WM_DEBUG_LINK,
   10881 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10882 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10883 	} else {
   10884 		DPRINTF(WM_DEBUG_LINK,
   10885 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10886 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10887 	}
   10888 
   10889 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10890 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10891 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10892 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10893 	if (sc->sc_type == WM_T_80003) {
   10894 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10895 		case IFM_1000_T:
   10896 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10897 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10898 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10899 			break;
   10900 		default:
   10901 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10902 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10903 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10904 			break;
   10905 		}
   10906 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10907 	}
   10908 }
   10909 
   10910 /* kumeran related (80003, ICH* and PCH*) */
   10911 
   10912 /*
   10913  * wm_kmrn_readreg:
   10914  *
   10915  *	Read a kumeran register
   10916  */
   10917 static int
   10918 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   10919 {
   10920 	int rv;
   10921 
   10922 	if (sc->sc_type == WM_T_80003)
   10923 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10924 	else
   10925 		rv = sc->phy.acquire(sc);
   10926 	if (rv != 0) {
   10927 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10928 		    __func__);
   10929 		return rv;
   10930 	}
   10931 
   10932 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   10933 
   10934 	if (sc->sc_type == WM_T_80003)
   10935 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10936 	else
   10937 		sc->phy.release(sc);
   10938 
   10939 	return rv;
   10940 }
   10941 
   10942 static int
   10943 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   10944 {
   10945 
   10946 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10947 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10948 	    KUMCTRLSTA_REN);
   10949 	CSR_WRITE_FLUSH(sc);
   10950 	delay(2);
   10951 
   10952 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10953 
   10954 	return 0;
   10955 }
   10956 
   10957 /*
   10958  * wm_kmrn_writereg:
   10959  *
   10960  *	Write a kumeran register
   10961  */
   10962 static int
   10963 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   10964 {
   10965 	int rv;
   10966 
   10967 	if (sc->sc_type == WM_T_80003)
   10968 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10969 	else
   10970 		rv = sc->phy.acquire(sc);
   10971 	if (rv != 0) {
   10972 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10973 		    __func__);
   10974 		return rv;
   10975 	}
   10976 
   10977 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   10978 
   10979 	if (sc->sc_type == WM_T_80003)
   10980 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10981 	else
   10982 		sc->phy.release(sc);
   10983 
   10984 	return rv;
   10985 }
   10986 
   10987 static int
   10988 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   10989 {
   10990 
   10991 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10992 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   10993 
   10994 	return 0;
   10995 }
   10996 
   10997 /* SGMII related */
   10998 
   10999 /*
   11000  * wm_sgmii_uses_mdio
   11001  *
   11002  * Check whether the transaction is to the internal PHY or the external
   11003  * MDIO interface. Return true if it's MDIO.
   11004  */
   11005 static bool
   11006 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11007 {
   11008 	uint32_t reg;
   11009 	bool ismdio = false;
   11010 
   11011 	switch (sc->sc_type) {
   11012 	case WM_T_82575:
   11013 	case WM_T_82576:
   11014 		reg = CSR_READ(sc, WMREG_MDIC);
   11015 		ismdio = ((reg & MDIC_DEST) != 0);
   11016 		break;
   11017 	case WM_T_82580:
   11018 	case WM_T_I350:
   11019 	case WM_T_I354:
   11020 	case WM_T_I210:
   11021 	case WM_T_I211:
   11022 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11023 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11024 		break;
   11025 	default:
   11026 		break;
   11027 	}
   11028 
   11029 	return ismdio;
   11030 }
   11031 
   11032 /*
   11033  * wm_sgmii_readreg:	[mii interface function]
   11034  *
   11035  *	Read a PHY register on the SGMII
   11036  * This could be handled by the PHY layer if we didn't have to lock the
   11037  * ressource ...
   11038  */
   11039 static int
   11040 wm_sgmii_readreg(device_t dev, int phy, int reg)
   11041 {
   11042 	struct wm_softc *sc = device_private(dev);
   11043 	uint32_t i2ccmd;
   11044 	int i, rv;
   11045 
   11046 	if (sc->phy.acquire(sc)) {
   11047 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11048 		return 0;
   11049 	}
   11050 
   11051 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11052 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11053 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11054 
   11055 	/* Poll the ready bit */
   11056 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11057 		delay(50);
   11058 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11059 		if (i2ccmd & I2CCMD_READY)
   11060 			break;
   11061 	}
   11062 	if ((i2ccmd & I2CCMD_READY) == 0)
   11063 		device_printf(dev, "I2CCMD Read did not complete\n");
   11064 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11065 		device_printf(dev, "I2CCMD Error bit set\n");
   11066 
   11067 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11068 
   11069 	sc->phy.release(sc);
   11070 	return rv;
   11071 }
   11072 
   11073 /*
   11074  * wm_sgmii_writereg:	[mii interface function]
   11075  *
   11076  *	Write a PHY register on the SGMII.
   11077  * This could be handled by the PHY layer if we didn't have to lock the
   11078  * ressource ...
   11079  */
   11080 static void
   11081 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   11082 {
   11083 	struct wm_softc *sc = device_private(dev);
   11084 	uint32_t i2ccmd;
   11085 	int i;
   11086 	int swapdata;
   11087 
   11088 	if (sc->phy.acquire(sc) != 0) {
   11089 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11090 		return;
   11091 	}
   11092 	/* Swap the data bytes for the I2C interface */
   11093 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11094 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11095 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11096 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11097 
   11098 	/* Poll the ready bit */
   11099 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11100 		delay(50);
   11101 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11102 		if (i2ccmd & I2CCMD_READY)
   11103 			break;
   11104 	}
   11105 	if ((i2ccmd & I2CCMD_READY) == 0)
   11106 		device_printf(dev, "I2CCMD Write did not complete\n");
   11107 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11108 		device_printf(dev, "I2CCMD Error bit set\n");
   11109 
   11110 	sc->phy.release(sc);
   11111 }
   11112 
   11113 /* TBI related */
   11114 
   11115 static bool
   11116 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11117 {
   11118 	bool sig;
   11119 
   11120 	sig = ctrl & CTRL_SWDPIN(1);
   11121 
   11122 	/*
   11123 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11124 	 * detect a signal, 1 if they don't.
   11125 	 */
   11126 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11127 		sig = !sig;
   11128 
   11129 	return sig;
   11130 }
   11131 
   11132 /*
   11133  * wm_tbi_mediainit:
   11134  *
   11135  *	Initialize media for use on 1000BASE-X devices.
   11136  */
   11137 static void
   11138 wm_tbi_mediainit(struct wm_softc *sc)
   11139 {
   11140 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11141 	const char *sep = "";
   11142 
   11143 	if (sc->sc_type < WM_T_82543)
   11144 		sc->sc_tipg = TIPG_WM_DFLT;
   11145 	else
   11146 		sc->sc_tipg = TIPG_LG_DFLT;
   11147 
   11148 	sc->sc_tbi_serdes_anegticks = 5;
   11149 
   11150 	/* Initialize our media structures */
   11151 	sc->sc_mii.mii_ifp = ifp;
   11152 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11153 
   11154 	if ((sc->sc_type >= WM_T_82575)
   11155 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11156 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11157 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11158 	else
   11159 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11160 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11161 
   11162 	/*
   11163 	 * SWD Pins:
   11164 	 *
   11165 	 *	0 = Link LED (output)
   11166 	 *	1 = Loss Of Signal (input)
   11167 	 */
   11168 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11169 
   11170 	/* XXX Perhaps this is only for TBI */
   11171 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11172 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11173 
   11174 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11175 		sc->sc_ctrl &= ~CTRL_LRST;
   11176 
   11177 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11178 
   11179 #define	ADD(ss, mm, dd)							\
   11180 do {									\
   11181 	aprint_normal("%s%s", sep, ss);					\
   11182 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11183 	sep = ", ";							\
   11184 } while (/*CONSTCOND*/0)
   11185 
   11186 	aprint_normal_dev(sc->sc_dev, "");
   11187 
   11188 	if (sc->sc_type == WM_T_I354) {
   11189 		uint32_t status;
   11190 
   11191 		status = CSR_READ(sc, WMREG_STATUS);
   11192 		if (((status & STATUS_2P5_SKU) != 0)
   11193 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11194 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11195 		} else
   11196 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11197 	} else if (sc->sc_type == WM_T_82545) {
   11198 		/* Only 82545 is LX (XXX except SFP) */
   11199 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11200 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11201 	} else {
   11202 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11203 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11204 	}
   11205 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11206 	aprint_normal("\n");
   11207 
   11208 #undef ADD
   11209 
   11210 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11211 }
   11212 
   11213 /*
   11214  * wm_tbi_mediachange:	[ifmedia interface function]
   11215  *
   11216  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11217  */
   11218 static int
   11219 wm_tbi_mediachange(struct ifnet *ifp)
   11220 {
   11221 	struct wm_softc *sc = ifp->if_softc;
   11222 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11223 	uint32_t status, ctrl;
   11224 	bool signal;
   11225 	int i;
   11226 
   11227 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11228 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11229 		/* XXX need some work for >= 82571 and < 82575 */
   11230 		if (sc->sc_type < WM_T_82575)
   11231 			return 0;
   11232 	}
   11233 
   11234 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11235 	    || (sc->sc_type >= WM_T_82575))
   11236 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11237 
   11238 	sc->sc_ctrl &= ~CTRL_LRST;
   11239 	sc->sc_txcw = TXCW_ANE;
   11240 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11241 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11242 	else if (ife->ifm_media & IFM_FDX)
   11243 		sc->sc_txcw |= TXCW_FD;
   11244 	else
   11245 		sc->sc_txcw |= TXCW_HD;
   11246 
   11247 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11248 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11249 
   11250 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11251 		device_xname(sc->sc_dev), sc->sc_txcw));
   11252 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11253 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11254 	CSR_WRITE_FLUSH(sc);
   11255 	delay(1000);
   11256 
   11257 	ctrl =  CSR_READ(sc, WMREG_CTRL);
   11258 	signal = wm_tbi_havesignal(sc, ctrl);
   11259 
   11260 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11261 		signal));
   11262 
   11263 	if (signal) {
   11264 		/* Have signal; wait for the link to come up. */
   11265 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11266 			delay(10000);
   11267 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11268 				break;
   11269 		}
   11270 
   11271 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11272 			device_xname(sc->sc_dev),i));
   11273 
   11274 		status = CSR_READ(sc, WMREG_STATUS);
   11275 		DPRINTF(WM_DEBUG_LINK,
   11276 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11277 			device_xname(sc->sc_dev),status, STATUS_LU));
   11278 		if (status & STATUS_LU) {
   11279 			/* Link is up. */
   11280 			DPRINTF(WM_DEBUG_LINK,
   11281 			    ("%s: LINK: set media -> link up %s\n",
   11282 				device_xname(sc->sc_dev),
   11283 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11284 
   11285 			/*
   11286 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11287 			 * so we should update sc->sc_ctrl
   11288 			 */
   11289 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11290 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11291 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11292 			if (status & STATUS_FD)
   11293 				sc->sc_tctl |=
   11294 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11295 			else
   11296 				sc->sc_tctl |=
   11297 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11298 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11299 				sc->sc_fcrtl |= FCRTL_XONE;
   11300 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11301 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11302 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11303 			sc->sc_tbi_linkup = 1;
   11304 		} else {
   11305 			if (i == WM_LINKUP_TIMEOUT)
   11306 				wm_check_for_link(sc);
   11307 			/* Link is down. */
   11308 			DPRINTF(WM_DEBUG_LINK,
   11309 			    ("%s: LINK: set media -> link down\n",
   11310 				device_xname(sc->sc_dev)));
   11311 			sc->sc_tbi_linkup = 0;
   11312 		}
   11313 	} else {
   11314 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11315 			device_xname(sc->sc_dev)));
   11316 		sc->sc_tbi_linkup = 0;
   11317 	}
   11318 
   11319 	wm_tbi_serdes_set_linkled(sc);
   11320 
   11321 	return 0;
   11322 }
   11323 
   11324 /*
   11325  * wm_tbi_mediastatus:	[ifmedia interface function]
   11326  *
   11327  *	Get the current interface media status on a 1000BASE-X device.
   11328  */
   11329 static void
   11330 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11331 {
   11332 	struct wm_softc *sc = ifp->if_softc;
   11333 	uint32_t ctrl, status;
   11334 
   11335 	ifmr->ifm_status = IFM_AVALID;
   11336 	ifmr->ifm_active = IFM_ETHER;
   11337 
   11338 	status = CSR_READ(sc, WMREG_STATUS);
   11339 	if ((status & STATUS_LU) == 0) {
   11340 		ifmr->ifm_active |= IFM_NONE;
   11341 		return;
   11342 	}
   11343 
   11344 	ifmr->ifm_status |= IFM_ACTIVE;
   11345 	/* Only 82545 is LX */
   11346 	if (sc->sc_type == WM_T_82545)
   11347 		ifmr->ifm_active |= IFM_1000_LX;
   11348 	else
   11349 		ifmr->ifm_active |= IFM_1000_SX;
   11350 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11351 		ifmr->ifm_active |= IFM_FDX;
   11352 	else
   11353 		ifmr->ifm_active |= IFM_HDX;
   11354 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11355 	if (ctrl & CTRL_RFCE)
   11356 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11357 	if (ctrl & CTRL_TFCE)
   11358 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11359 }
   11360 
   11361 /* XXX TBI only */
   11362 static int
   11363 wm_check_for_link(struct wm_softc *sc)
   11364 {
   11365 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11366 	uint32_t rxcw;
   11367 	uint32_t ctrl;
   11368 	uint32_t status;
   11369 	bool signal;
   11370 
   11371 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11372 		device_xname(sc->sc_dev), __func__));
   11373 
   11374 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11375 		/* XXX need some work for >= 82571 */
   11376 		if (sc->sc_type >= WM_T_82571) {
   11377 			sc->sc_tbi_linkup = 1;
   11378 			return 0;
   11379 		}
   11380 	}
   11381 
   11382 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11383 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11384 	status = CSR_READ(sc, WMREG_STATUS);
   11385 	signal = wm_tbi_havesignal(sc, ctrl);
   11386 
   11387 	DPRINTF(WM_DEBUG_LINK,
   11388 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11389 		device_xname(sc->sc_dev), __func__, signal,
   11390 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11391 
   11392 	/*
   11393 	 * SWDPIN   LU RXCW
   11394 	 *	0    0	  0
   11395 	 *	0    0	  1	(should not happen)
   11396 	 *	0    1	  0	(should not happen)
   11397 	 *	0    1	  1	(should not happen)
   11398 	 *	1    0	  0	Disable autonego and force linkup
   11399 	 *	1    0	  1	got /C/ but not linkup yet
   11400 	 *	1    1	  0	(linkup)
   11401 	 *	1    1	  1	If IFM_AUTO, back to autonego
   11402 	 *
   11403 	 */
   11404 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   11405 		DPRINTF(WM_DEBUG_LINK,
   11406 		    ("%s: %s: force linkup and fullduplex\n",
   11407 			device_xname(sc->sc_dev), __func__));
   11408 		sc->sc_tbi_linkup = 0;
   11409 		/* Disable auto-negotiation in the TXCW register */
   11410 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11411 
   11412 		/*
   11413 		 * Force link-up and also force full-duplex.
   11414 		 *
   11415 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11416 		 * so we should update sc->sc_ctrl
   11417 		 */
   11418 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11419 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11420 	} else if (((status & STATUS_LU) != 0)
   11421 	    && ((rxcw & RXCW_C) != 0)
   11422 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11423 		sc->sc_tbi_linkup = 1;
   11424 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   11425 			device_xname(sc->sc_dev),
   11426 			__func__));
   11427 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11428 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11429 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   11430 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   11431 			device_xname(sc->sc_dev), __func__));
   11432 	} else {
   11433 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   11434 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   11435 			status));
   11436 	}
   11437 
   11438 	return 0;
   11439 }
   11440 
   11441 /*
   11442  * wm_tbi_tick:
   11443  *
   11444  *	Check the link on TBI devices.
   11445  *	This function acts as mii_tick().
   11446  */
   11447 static void
   11448 wm_tbi_tick(struct wm_softc *sc)
   11449 {
   11450 	struct mii_data *mii = &sc->sc_mii;
   11451 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11452 	uint32_t status;
   11453 
   11454 	KASSERT(WM_CORE_LOCKED(sc));
   11455 
   11456 	status = CSR_READ(sc, WMREG_STATUS);
   11457 
   11458 	/* XXX is this needed? */
   11459 	(void)CSR_READ(sc, WMREG_RXCW);
   11460 	(void)CSR_READ(sc, WMREG_CTRL);
   11461 
   11462 	/* set link status */
   11463 	if ((status & STATUS_LU) == 0) {
   11464 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   11465 			device_xname(sc->sc_dev)));
   11466 		sc->sc_tbi_linkup = 0;
   11467 	} else if (sc->sc_tbi_linkup == 0) {
   11468 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   11469 			device_xname(sc->sc_dev),
   11470 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11471 		sc->sc_tbi_linkup = 1;
   11472 		sc->sc_tbi_serdes_ticks = 0;
   11473 	}
   11474 
   11475 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11476 		goto setled;
   11477 
   11478 	if ((status & STATUS_LU) == 0) {
   11479 		sc->sc_tbi_linkup = 0;
   11480 		/* If the timer expired, retry autonegotiation */
   11481 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11482 		    && (++sc->sc_tbi_serdes_ticks
   11483 			>= sc->sc_tbi_serdes_anegticks)) {
   11484 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11485 			sc->sc_tbi_serdes_ticks = 0;
   11486 			/*
   11487 			 * Reset the link, and let autonegotiation do
   11488 			 * its thing
   11489 			 */
   11490 			sc->sc_ctrl |= CTRL_LRST;
   11491 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11492 			CSR_WRITE_FLUSH(sc);
   11493 			delay(1000);
   11494 			sc->sc_ctrl &= ~CTRL_LRST;
   11495 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11496 			CSR_WRITE_FLUSH(sc);
   11497 			delay(1000);
   11498 			CSR_WRITE(sc, WMREG_TXCW,
   11499 			    sc->sc_txcw & ~TXCW_ANE);
   11500 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11501 		}
   11502 	}
   11503 
   11504 setled:
   11505 	wm_tbi_serdes_set_linkled(sc);
   11506 }
   11507 
   11508 /* SERDES related */
   11509 static void
   11510 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11511 {
   11512 	uint32_t reg;
   11513 
   11514 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11515 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11516 		return;
   11517 
   11518 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11519 	reg |= PCS_CFG_PCS_EN;
   11520 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11521 
   11522 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11523 	reg &= ~CTRL_EXT_SWDPIN(3);
   11524 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11525 	CSR_WRITE_FLUSH(sc);
   11526 }
   11527 
   11528 static int
   11529 wm_serdes_mediachange(struct ifnet *ifp)
   11530 {
   11531 	struct wm_softc *sc = ifp->if_softc;
   11532 	bool pcs_autoneg = true; /* XXX */
   11533 	uint32_t ctrl_ext, pcs_lctl, reg;
   11534 
   11535 	/* XXX Currently, this function is not called on 8257[12] */
   11536 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11537 	    || (sc->sc_type >= WM_T_82575))
   11538 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11539 
   11540 	wm_serdes_power_up_link_82575(sc);
   11541 
   11542 	sc->sc_ctrl |= CTRL_SLU;
   11543 
   11544 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11545 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11546 
   11547 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11548 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11549 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11550 	case CTRL_EXT_LINK_MODE_SGMII:
   11551 		pcs_autoneg = true;
   11552 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11553 		break;
   11554 	case CTRL_EXT_LINK_MODE_1000KX:
   11555 		pcs_autoneg = false;
   11556 		/* FALLTHROUGH */
   11557 	default:
   11558 		if ((sc->sc_type == WM_T_82575)
   11559 		    || (sc->sc_type == WM_T_82576)) {
   11560 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11561 				pcs_autoneg = false;
   11562 		}
   11563 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11564 		    | CTRL_FRCFDX;
   11565 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11566 	}
   11567 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11568 
   11569 	if (pcs_autoneg) {
   11570 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11571 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11572 
   11573 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11574 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11575 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11576 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11577 	} else
   11578 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11579 
   11580 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11581 
   11582 
   11583 	return 0;
   11584 }
   11585 
   11586 static void
   11587 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11588 {
   11589 	struct wm_softc *sc = ifp->if_softc;
   11590 	struct mii_data *mii = &sc->sc_mii;
   11591 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11592 	uint32_t pcs_adv, pcs_lpab, reg;
   11593 
   11594 	ifmr->ifm_status = IFM_AVALID;
   11595 	ifmr->ifm_active = IFM_ETHER;
   11596 
   11597 	/* Check PCS */
   11598 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11599 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11600 		ifmr->ifm_active |= IFM_NONE;
   11601 		sc->sc_tbi_linkup = 0;
   11602 		goto setled;
   11603 	}
   11604 
   11605 	sc->sc_tbi_linkup = 1;
   11606 	ifmr->ifm_status |= IFM_ACTIVE;
   11607 	if (sc->sc_type == WM_T_I354) {
   11608 		uint32_t status;
   11609 
   11610 		status = CSR_READ(sc, WMREG_STATUS);
   11611 		if (((status & STATUS_2P5_SKU) != 0)
   11612 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11613 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11614 		} else
   11615 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11616 	} else {
   11617 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11618 		case PCS_LSTS_SPEED_10:
   11619 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11620 			break;
   11621 		case PCS_LSTS_SPEED_100:
   11622 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11623 			break;
   11624 		case PCS_LSTS_SPEED_1000:
   11625 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11626 			break;
   11627 		default:
   11628 			device_printf(sc->sc_dev, "Unknown speed\n");
   11629 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11630 			break;
   11631 		}
   11632 	}
   11633 	if ((reg & PCS_LSTS_FDX) != 0)
   11634 		ifmr->ifm_active |= IFM_FDX;
   11635 	else
   11636 		ifmr->ifm_active |= IFM_HDX;
   11637 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11638 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11639 		/* Check flow */
   11640 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11641 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11642 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11643 			goto setled;
   11644 		}
   11645 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11646 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11647 		DPRINTF(WM_DEBUG_LINK,
   11648 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11649 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11650 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11651 			mii->mii_media_active |= IFM_FLOW
   11652 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11653 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11654 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11655 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11656 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11657 			mii->mii_media_active |= IFM_FLOW
   11658 			    | IFM_ETH_TXPAUSE;
   11659 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11660 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11661 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11662 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11663 			mii->mii_media_active |= IFM_FLOW
   11664 			    | IFM_ETH_RXPAUSE;
   11665 		}
   11666 	}
   11667 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11668 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11669 setled:
   11670 	wm_tbi_serdes_set_linkled(sc);
   11671 }
   11672 
   11673 /*
   11674  * wm_serdes_tick:
   11675  *
   11676  *	Check the link on serdes devices.
   11677  */
   11678 static void
   11679 wm_serdes_tick(struct wm_softc *sc)
   11680 {
   11681 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11682 	struct mii_data *mii = &sc->sc_mii;
   11683 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11684 	uint32_t reg;
   11685 
   11686 	KASSERT(WM_CORE_LOCKED(sc));
   11687 
   11688 	mii->mii_media_status = IFM_AVALID;
   11689 	mii->mii_media_active = IFM_ETHER;
   11690 
   11691 	/* Check PCS */
   11692 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11693 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11694 		mii->mii_media_status |= IFM_ACTIVE;
   11695 		sc->sc_tbi_linkup = 1;
   11696 		sc->sc_tbi_serdes_ticks = 0;
   11697 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11698 		if ((reg & PCS_LSTS_FDX) != 0)
   11699 			mii->mii_media_active |= IFM_FDX;
   11700 		else
   11701 			mii->mii_media_active |= IFM_HDX;
   11702 	} else {
   11703 		mii->mii_media_status |= IFM_NONE;
   11704 		sc->sc_tbi_linkup = 0;
   11705 		/* If the timer expired, retry autonegotiation */
   11706 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11707 		    && (++sc->sc_tbi_serdes_ticks
   11708 			>= sc->sc_tbi_serdes_anegticks)) {
   11709 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11710 			sc->sc_tbi_serdes_ticks = 0;
   11711 			/* XXX */
   11712 			wm_serdes_mediachange(ifp);
   11713 		}
   11714 	}
   11715 
   11716 	wm_tbi_serdes_set_linkled(sc);
   11717 }
   11718 
   11719 /* SFP related */
   11720 
   11721 static int
   11722 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11723 {
   11724 	uint32_t i2ccmd;
   11725 	int i;
   11726 
   11727 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11728 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11729 
   11730 	/* Poll the ready bit */
   11731 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11732 		delay(50);
   11733 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11734 		if (i2ccmd & I2CCMD_READY)
   11735 			break;
   11736 	}
   11737 	if ((i2ccmd & I2CCMD_READY) == 0)
   11738 		return -1;
   11739 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11740 		return -1;
   11741 
   11742 	*data = i2ccmd & 0x00ff;
   11743 
   11744 	return 0;
   11745 }
   11746 
   11747 static uint32_t
   11748 wm_sfp_get_media_type(struct wm_softc *sc)
   11749 {
   11750 	uint32_t ctrl_ext;
   11751 	uint8_t val = 0;
   11752 	int timeout = 3;
   11753 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11754 	int rv = -1;
   11755 
   11756 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11757 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11758 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11759 	CSR_WRITE_FLUSH(sc);
   11760 
   11761 	/* Read SFP module data */
   11762 	while (timeout) {
   11763 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11764 		if (rv == 0)
   11765 			break;
   11766 		delay(100*1000); /* XXX too big */
   11767 		timeout--;
   11768 	}
   11769 	if (rv != 0)
   11770 		goto out;
   11771 	switch (val) {
   11772 	case SFF_SFP_ID_SFF:
   11773 		aprint_normal_dev(sc->sc_dev,
   11774 		    "Module/Connector soldered to board\n");
   11775 		break;
   11776 	case SFF_SFP_ID_SFP:
   11777 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11778 		break;
   11779 	case SFF_SFP_ID_UNKNOWN:
   11780 		goto out;
   11781 	default:
   11782 		break;
   11783 	}
   11784 
   11785 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11786 	if (rv != 0) {
   11787 		goto out;
   11788 	}
   11789 
   11790 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11791 		mediatype = WM_MEDIATYPE_SERDES;
   11792 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   11793 		sc->sc_flags |= WM_F_SGMII;
   11794 		mediatype = WM_MEDIATYPE_COPPER;
   11795 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   11796 		sc->sc_flags |= WM_F_SGMII;
   11797 		mediatype = WM_MEDIATYPE_SERDES;
   11798 	}
   11799 
   11800 out:
   11801 	/* Restore I2C interface setting */
   11802 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11803 
   11804 	return mediatype;
   11805 }
   11806 
   11807 /*
   11808  * NVM related.
   11809  * Microwire, SPI (w/wo EERD) and Flash.
   11810  */
   11811 
   11812 /* Both spi and uwire */
   11813 
   11814 /*
   11815  * wm_eeprom_sendbits:
   11816  *
   11817  *	Send a series of bits to the EEPROM.
   11818  */
   11819 static void
   11820 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11821 {
   11822 	uint32_t reg;
   11823 	int x;
   11824 
   11825 	reg = CSR_READ(sc, WMREG_EECD);
   11826 
   11827 	for (x = nbits; x > 0; x--) {
   11828 		if (bits & (1U << (x - 1)))
   11829 			reg |= EECD_DI;
   11830 		else
   11831 			reg &= ~EECD_DI;
   11832 		CSR_WRITE(sc, WMREG_EECD, reg);
   11833 		CSR_WRITE_FLUSH(sc);
   11834 		delay(2);
   11835 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11836 		CSR_WRITE_FLUSH(sc);
   11837 		delay(2);
   11838 		CSR_WRITE(sc, WMREG_EECD, reg);
   11839 		CSR_WRITE_FLUSH(sc);
   11840 		delay(2);
   11841 	}
   11842 }
   11843 
   11844 /*
   11845  * wm_eeprom_recvbits:
   11846  *
   11847  *	Receive a series of bits from the EEPROM.
   11848  */
   11849 static void
   11850 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11851 {
   11852 	uint32_t reg, val;
   11853 	int x;
   11854 
   11855 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11856 
   11857 	val = 0;
   11858 	for (x = nbits; x > 0; x--) {
   11859 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11860 		CSR_WRITE_FLUSH(sc);
   11861 		delay(2);
   11862 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11863 			val |= (1U << (x - 1));
   11864 		CSR_WRITE(sc, WMREG_EECD, reg);
   11865 		CSR_WRITE_FLUSH(sc);
   11866 		delay(2);
   11867 	}
   11868 	*valp = val;
   11869 }
   11870 
   11871 /* Microwire */
   11872 
   11873 /*
   11874  * wm_nvm_read_uwire:
   11875  *
   11876  *	Read a word from the EEPROM using the MicroWire protocol.
   11877  */
   11878 static int
   11879 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11880 {
   11881 	uint32_t reg, val;
   11882 	int i;
   11883 
   11884 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11885 		device_xname(sc->sc_dev), __func__));
   11886 
   11887 	if (sc->nvm.acquire(sc) != 0)
   11888 		return -1;
   11889 
   11890 	for (i = 0; i < wordcnt; i++) {
   11891 		/* Clear SK and DI. */
   11892 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11893 		CSR_WRITE(sc, WMREG_EECD, reg);
   11894 
   11895 		/*
   11896 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11897 		 * and Xen.
   11898 		 *
   11899 		 * We use this workaround only for 82540 because qemu's
   11900 		 * e1000 act as 82540.
   11901 		 */
   11902 		if (sc->sc_type == WM_T_82540) {
   11903 			reg |= EECD_SK;
   11904 			CSR_WRITE(sc, WMREG_EECD, reg);
   11905 			reg &= ~EECD_SK;
   11906 			CSR_WRITE(sc, WMREG_EECD, reg);
   11907 			CSR_WRITE_FLUSH(sc);
   11908 			delay(2);
   11909 		}
   11910 		/* XXX: end of workaround */
   11911 
   11912 		/* Set CHIP SELECT. */
   11913 		reg |= EECD_CS;
   11914 		CSR_WRITE(sc, WMREG_EECD, reg);
   11915 		CSR_WRITE_FLUSH(sc);
   11916 		delay(2);
   11917 
   11918 		/* Shift in the READ command. */
   11919 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11920 
   11921 		/* Shift in address. */
   11922 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11923 
   11924 		/* Shift out the data. */
   11925 		wm_eeprom_recvbits(sc, &val, 16);
   11926 		data[i] = val & 0xffff;
   11927 
   11928 		/* Clear CHIP SELECT. */
   11929 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11930 		CSR_WRITE(sc, WMREG_EECD, reg);
   11931 		CSR_WRITE_FLUSH(sc);
   11932 		delay(2);
   11933 	}
   11934 
   11935 	sc->nvm.release(sc);
   11936 	return 0;
   11937 }
   11938 
   11939 /* SPI */
   11940 
   11941 /*
   11942  * Set SPI and FLASH related information from the EECD register.
   11943  * For 82541 and 82547, the word size is taken from EEPROM.
   11944  */
   11945 static int
   11946 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11947 {
   11948 	int size;
   11949 	uint32_t reg;
   11950 	uint16_t data;
   11951 
   11952 	reg = CSR_READ(sc, WMREG_EECD);
   11953 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11954 
   11955 	/* Read the size of NVM from EECD by default */
   11956 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11957 	switch (sc->sc_type) {
   11958 	case WM_T_82541:
   11959 	case WM_T_82541_2:
   11960 	case WM_T_82547:
   11961 	case WM_T_82547_2:
   11962 		/* Set dummy value to access EEPROM */
   11963 		sc->sc_nvm_wordsize = 64;
   11964 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   11965 			aprint_error_dev(sc->sc_dev,
   11966 			    "%s: failed to read EEPROM size\n", __func__);
   11967 		}
   11968 		reg = data;
   11969 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11970 		if (size == 0)
   11971 			size = 6; /* 64 word size */
   11972 		else
   11973 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11974 		break;
   11975 	case WM_T_80003:
   11976 	case WM_T_82571:
   11977 	case WM_T_82572:
   11978 	case WM_T_82573: /* SPI case */
   11979 	case WM_T_82574: /* SPI case */
   11980 	case WM_T_82583: /* SPI case */
   11981 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11982 		if (size > 14)
   11983 			size = 14;
   11984 		break;
   11985 	case WM_T_82575:
   11986 	case WM_T_82576:
   11987 	case WM_T_82580:
   11988 	case WM_T_I350:
   11989 	case WM_T_I354:
   11990 	case WM_T_I210:
   11991 	case WM_T_I211:
   11992 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11993 		if (size > 15)
   11994 			size = 15;
   11995 		break;
   11996 	default:
   11997 		aprint_error_dev(sc->sc_dev,
   11998 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11999 		return -1;
   12000 		break;
   12001 	}
   12002 
   12003 	sc->sc_nvm_wordsize = 1 << size;
   12004 
   12005 	return 0;
   12006 }
   12007 
   12008 /*
   12009  * wm_nvm_ready_spi:
   12010  *
   12011  *	Wait for a SPI EEPROM to be ready for commands.
   12012  */
   12013 static int
   12014 wm_nvm_ready_spi(struct wm_softc *sc)
   12015 {
   12016 	uint32_t val;
   12017 	int usec;
   12018 
   12019 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12020 		device_xname(sc->sc_dev), __func__));
   12021 
   12022 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12023 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12024 		wm_eeprom_recvbits(sc, &val, 8);
   12025 		if ((val & SPI_SR_RDY) == 0)
   12026 			break;
   12027 	}
   12028 	if (usec >= SPI_MAX_RETRIES) {
   12029 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12030 		return -1;
   12031 	}
   12032 	return 0;
   12033 }
   12034 
   12035 /*
   12036  * wm_nvm_read_spi:
   12037  *
   12038  *	Read a work from the EEPROM using the SPI protocol.
   12039  */
   12040 static int
   12041 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12042 {
   12043 	uint32_t reg, val;
   12044 	int i;
   12045 	uint8_t opc;
   12046 	int rv = 0;
   12047 
   12048 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12049 		device_xname(sc->sc_dev), __func__));
   12050 
   12051 	if (sc->nvm.acquire(sc) != 0)
   12052 		return -1;
   12053 
   12054 	/* Clear SK and CS. */
   12055 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12056 	CSR_WRITE(sc, WMREG_EECD, reg);
   12057 	CSR_WRITE_FLUSH(sc);
   12058 	delay(2);
   12059 
   12060 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12061 		goto out;
   12062 
   12063 	/* Toggle CS to flush commands. */
   12064 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12065 	CSR_WRITE_FLUSH(sc);
   12066 	delay(2);
   12067 	CSR_WRITE(sc, WMREG_EECD, reg);
   12068 	CSR_WRITE_FLUSH(sc);
   12069 	delay(2);
   12070 
   12071 	opc = SPI_OPC_READ;
   12072 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12073 		opc |= SPI_OPC_A8;
   12074 
   12075 	wm_eeprom_sendbits(sc, opc, 8);
   12076 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12077 
   12078 	for (i = 0; i < wordcnt; i++) {
   12079 		wm_eeprom_recvbits(sc, &val, 16);
   12080 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12081 	}
   12082 
   12083 	/* Raise CS and clear SK. */
   12084 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12085 	CSR_WRITE(sc, WMREG_EECD, reg);
   12086 	CSR_WRITE_FLUSH(sc);
   12087 	delay(2);
   12088 
   12089 out:
   12090 	sc->nvm.release(sc);
   12091 	return rv;
   12092 }
   12093 
   12094 /* Using with EERD */
   12095 
   12096 static int
   12097 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12098 {
   12099 	uint32_t attempts = 100000;
   12100 	uint32_t i, reg = 0;
   12101 	int32_t done = -1;
   12102 
   12103 	for (i = 0; i < attempts; i++) {
   12104 		reg = CSR_READ(sc, rw);
   12105 
   12106 		if (reg & EERD_DONE) {
   12107 			done = 0;
   12108 			break;
   12109 		}
   12110 		delay(5);
   12111 	}
   12112 
   12113 	return done;
   12114 }
   12115 
   12116 static int
   12117 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12118 {
   12119 	int i, eerd = 0;
   12120 	int rv = 0;
   12121 
   12122 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12123 		device_xname(sc->sc_dev), __func__));
   12124 
   12125 	if (sc->nvm.acquire(sc) != 0)
   12126 		return -1;
   12127 
   12128 	for (i = 0; i < wordcnt; i++) {
   12129 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12130 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12131 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12132 		if (rv != 0) {
   12133 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12134 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12135 			break;
   12136 		}
   12137 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12138 	}
   12139 
   12140 	sc->nvm.release(sc);
   12141 	return rv;
   12142 }
   12143 
   12144 /* Flash */
   12145 
   12146 static int
   12147 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12148 {
   12149 	uint32_t eecd;
   12150 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12151 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12152 	uint32_t nvm_dword = 0;
   12153 	uint8_t sig_byte = 0;
   12154 	int rv;
   12155 
   12156 	switch (sc->sc_type) {
   12157 	case WM_T_PCH_SPT:
   12158 	case WM_T_PCH_CNP:
   12159 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12160 		act_offset = ICH_NVM_SIG_WORD * 2;
   12161 
   12162 		/* set bank to 0 in case flash read fails. */
   12163 		*bank = 0;
   12164 
   12165 		/* Check bank 0 */
   12166 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12167 		if (rv != 0)
   12168 			return rv;
   12169 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12170 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12171 			*bank = 0;
   12172 			return 0;
   12173 		}
   12174 
   12175 		/* Check bank 1 */
   12176 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12177 		    &nvm_dword);
   12178 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12179 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12180 			*bank = 1;
   12181 			return 0;
   12182 		}
   12183 		aprint_error_dev(sc->sc_dev,
   12184 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12185 		return -1;
   12186 	case WM_T_ICH8:
   12187 	case WM_T_ICH9:
   12188 		eecd = CSR_READ(sc, WMREG_EECD);
   12189 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12190 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12191 			return 0;
   12192 		}
   12193 		/* FALLTHROUGH */
   12194 	default:
   12195 		/* Default to 0 */
   12196 		*bank = 0;
   12197 
   12198 		/* Check bank 0 */
   12199 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12200 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12201 			*bank = 0;
   12202 			return 0;
   12203 		}
   12204 
   12205 		/* Check bank 1 */
   12206 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12207 		    &sig_byte);
   12208 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12209 			*bank = 1;
   12210 			return 0;
   12211 		}
   12212 	}
   12213 
   12214 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12215 		device_xname(sc->sc_dev)));
   12216 	return -1;
   12217 }
   12218 
   12219 /******************************************************************************
   12220  * This function does initial flash setup so that a new read/write/erase cycle
   12221  * can be started.
   12222  *
   12223  * sc - The pointer to the hw structure
   12224  ****************************************************************************/
   12225 static int32_t
   12226 wm_ich8_cycle_init(struct wm_softc *sc)
   12227 {
   12228 	uint16_t hsfsts;
   12229 	int32_t error = 1;
   12230 	int32_t i     = 0;
   12231 
   12232 	if (sc->sc_type >= WM_T_PCH_SPT)
   12233 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12234 	else
   12235 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12236 
   12237 	/* May be check the Flash Des Valid bit in Hw status */
   12238 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   12239 		return error;
   12240 
   12241 	/* Clear FCERR in Hw status by writing 1 */
   12242 	/* Clear DAEL in Hw status by writing a 1 */
   12243 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12244 
   12245 	if (sc->sc_type >= WM_T_PCH_SPT)
   12246 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12247 	else
   12248 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12249 
   12250 	/*
   12251 	 * Either we should have a hardware SPI cycle in progress bit to check
   12252 	 * against, in order to start a new cycle or FDONE bit should be
   12253 	 * changed in the hardware so that it is 1 after harware reset, which
   12254 	 * can then be used as an indication whether a cycle is in progress or
   12255 	 * has been completed .. we should also have some software semaphore
   12256 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12257 	 * threads access to those bits can be sequentiallized or a way so that
   12258 	 * 2 threads dont start the cycle at the same time
   12259 	 */
   12260 
   12261 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12262 		/*
   12263 		 * There is no cycle running at present, so we can start a
   12264 		 * cycle
   12265 		 */
   12266 
   12267 		/* Begin by setting Flash Cycle Done. */
   12268 		hsfsts |= HSFSTS_DONE;
   12269 		if (sc->sc_type >= WM_T_PCH_SPT)
   12270 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12271 			    hsfsts & 0xffffUL);
   12272 		else
   12273 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12274 		error = 0;
   12275 	} else {
   12276 		/*
   12277 		 * otherwise poll for sometime so the current cycle has a
   12278 		 * chance to end before giving up.
   12279 		 */
   12280 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12281 			if (sc->sc_type >= WM_T_PCH_SPT)
   12282 				hsfsts = ICH8_FLASH_READ32(sc,
   12283 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12284 			else
   12285 				hsfsts = ICH8_FLASH_READ16(sc,
   12286 				    ICH_FLASH_HSFSTS);
   12287 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12288 				error = 0;
   12289 				break;
   12290 			}
   12291 			delay(1);
   12292 		}
   12293 		if (error == 0) {
   12294 			/*
   12295 			 * Successful in waiting for previous cycle to timeout,
   12296 			 * now set the Flash Cycle Done.
   12297 			 */
   12298 			hsfsts |= HSFSTS_DONE;
   12299 			if (sc->sc_type >= WM_T_PCH_SPT)
   12300 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12301 				    hsfsts & 0xffffUL);
   12302 			else
   12303 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12304 				    hsfsts);
   12305 		}
   12306 	}
   12307 	return error;
   12308 }
   12309 
   12310 /******************************************************************************
   12311  * This function starts a flash cycle and waits for its completion
   12312  *
   12313  * sc - The pointer to the hw structure
   12314  ****************************************************************************/
   12315 static int32_t
   12316 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12317 {
   12318 	uint16_t hsflctl;
   12319 	uint16_t hsfsts;
   12320 	int32_t error = 1;
   12321 	uint32_t i = 0;
   12322 
   12323 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12324 	if (sc->sc_type >= WM_T_PCH_SPT)
   12325 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12326 	else
   12327 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12328 	hsflctl |= HSFCTL_GO;
   12329 	if (sc->sc_type >= WM_T_PCH_SPT)
   12330 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12331 		    (uint32_t)hsflctl << 16);
   12332 	else
   12333 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12334 
   12335 	/* Wait till FDONE bit is set to 1 */
   12336 	do {
   12337 		if (sc->sc_type >= WM_T_PCH_SPT)
   12338 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12339 			    & 0xffffUL;
   12340 		else
   12341 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12342 		if (hsfsts & HSFSTS_DONE)
   12343 			break;
   12344 		delay(1);
   12345 		i++;
   12346 	} while (i < timeout);
   12347 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12348 		error = 0;
   12349 
   12350 	return error;
   12351 }
   12352 
   12353 /******************************************************************************
   12354  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12355  *
   12356  * sc - The pointer to the hw structure
   12357  * index - The index of the byte or word to read.
   12358  * size - Size of data to read, 1=byte 2=word, 4=dword
   12359  * data - Pointer to the word to store the value read.
   12360  *****************************************************************************/
   12361 static int32_t
   12362 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12363     uint32_t size, uint32_t *data)
   12364 {
   12365 	uint16_t hsfsts;
   12366 	uint16_t hsflctl;
   12367 	uint32_t flash_linear_address;
   12368 	uint32_t flash_data = 0;
   12369 	int32_t error = 1;
   12370 	int32_t count = 0;
   12371 
   12372 	if (size < 1  || size > 4 || data == 0x0 ||
   12373 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12374 		return error;
   12375 
   12376 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12377 	    sc->sc_ich8_flash_base;
   12378 
   12379 	do {
   12380 		delay(1);
   12381 		/* Steps */
   12382 		error = wm_ich8_cycle_init(sc);
   12383 		if (error)
   12384 			break;
   12385 
   12386 		if (sc->sc_type >= WM_T_PCH_SPT)
   12387 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12388 			    >> 16;
   12389 		else
   12390 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12391 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12392 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12393 		    & HSFCTL_BCOUNT_MASK;
   12394 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12395 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12396 			/*
   12397 			 * In SPT, This register is in Lan memory space, not
   12398 			 * flash. Therefore, only 32 bit access is supported.
   12399 			 */
   12400 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12401 			    (uint32_t)hsflctl << 16);
   12402 		} else
   12403 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12404 
   12405 		/*
   12406 		 * Write the last 24 bits of index into Flash Linear address
   12407 		 * field in Flash Address
   12408 		 */
   12409 		/* TODO: TBD maybe check the index against the size of flash */
   12410 
   12411 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12412 
   12413 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12414 
   12415 		/*
   12416 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12417 		 * the whole sequence a few more times, else read in (shift in)
   12418 		 * the Flash Data0, the order is least significant byte first
   12419 		 * msb to lsb
   12420 		 */
   12421 		if (error == 0) {
   12422 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12423 			if (size == 1)
   12424 				*data = (uint8_t)(flash_data & 0x000000FF);
   12425 			else if (size == 2)
   12426 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12427 			else if (size == 4)
   12428 				*data = (uint32_t)flash_data;
   12429 			break;
   12430 		} else {
   12431 			/*
   12432 			 * If we've gotten here, then things are probably
   12433 			 * completely hosed, but if the error condition is
   12434 			 * detected, it won't hurt to give it another try...
   12435 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12436 			 */
   12437 			if (sc->sc_type >= WM_T_PCH_SPT)
   12438 				hsfsts = ICH8_FLASH_READ32(sc,
   12439 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12440 			else
   12441 				hsfsts = ICH8_FLASH_READ16(sc,
   12442 				    ICH_FLASH_HSFSTS);
   12443 
   12444 			if (hsfsts & HSFSTS_ERR) {
   12445 				/* Repeat for some time before giving up. */
   12446 				continue;
   12447 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12448 				break;
   12449 		}
   12450 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12451 
   12452 	return error;
   12453 }
   12454 
   12455 /******************************************************************************
   12456  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12457  *
   12458  * sc - pointer to wm_hw structure
   12459  * index - The index of the byte to read.
   12460  * data - Pointer to a byte to store the value read.
   12461  *****************************************************************************/
   12462 static int32_t
   12463 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12464 {
   12465 	int32_t status;
   12466 	uint32_t word = 0;
   12467 
   12468 	status = wm_read_ich8_data(sc, index, 1, &word);
   12469 	if (status == 0)
   12470 		*data = (uint8_t)word;
   12471 	else
   12472 		*data = 0;
   12473 
   12474 	return status;
   12475 }
   12476 
   12477 /******************************************************************************
   12478  * Reads a word from the NVM using the ICH8 flash access registers.
   12479  *
   12480  * sc - pointer to wm_hw structure
   12481  * index - The starting byte index of the word to read.
   12482  * data - Pointer to a word to store the value read.
   12483  *****************************************************************************/
   12484 static int32_t
   12485 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12486 {
   12487 	int32_t status;
   12488 	uint32_t word = 0;
   12489 
   12490 	status = wm_read_ich8_data(sc, index, 2, &word);
   12491 	if (status == 0)
   12492 		*data = (uint16_t)word;
   12493 	else
   12494 		*data = 0;
   12495 
   12496 	return status;
   12497 }
   12498 
   12499 /******************************************************************************
   12500  * Reads a dword from the NVM using the ICH8 flash access registers.
   12501  *
   12502  * sc - pointer to wm_hw structure
   12503  * index - The starting byte index of the word to read.
   12504  * data - Pointer to a word to store the value read.
   12505  *****************************************************************************/
   12506 static int32_t
   12507 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12508 {
   12509 	int32_t status;
   12510 
   12511 	status = wm_read_ich8_data(sc, index, 4, data);
   12512 	return status;
   12513 }
   12514 
   12515 /******************************************************************************
   12516  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12517  * register.
   12518  *
   12519  * sc - Struct containing variables accessed by shared code
   12520  * offset - offset of word in the EEPROM to read
   12521  * data - word read from the EEPROM
   12522  * words - number of words to read
   12523  *****************************************************************************/
   12524 static int
   12525 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12526 {
   12527 	int32_t	 rv = 0;
   12528 	uint32_t flash_bank = 0;
   12529 	uint32_t act_offset = 0;
   12530 	uint32_t bank_offset = 0;
   12531 	uint16_t word = 0;
   12532 	uint16_t i = 0;
   12533 
   12534 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12535 		device_xname(sc->sc_dev), __func__));
   12536 
   12537 	if (sc->nvm.acquire(sc) != 0)
   12538 		return -1;
   12539 
   12540 	/*
   12541 	 * We need to know which is the valid flash bank.  In the event
   12542 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12543 	 * managing flash_bank. So it cannot be trusted and needs
   12544 	 * to be updated with each read.
   12545 	 */
   12546 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12547 	if (rv) {
   12548 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12549 			device_xname(sc->sc_dev)));
   12550 		flash_bank = 0;
   12551 	}
   12552 
   12553 	/*
   12554 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12555 	 * size
   12556 	 */
   12557 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12558 
   12559 	for (i = 0; i < words; i++) {
   12560 		/* The NVM part needs a byte offset, hence * 2 */
   12561 		act_offset = bank_offset + ((offset + i) * 2);
   12562 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12563 		if (rv) {
   12564 			aprint_error_dev(sc->sc_dev,
   12565 			    "%s: failed to read NVM\n", __func__);
   12566 			break;
   12567 		}
   12568 		data[i] = word;
   12569 	}
   12570 
   12571 	sc->nvm.release(sc);
   12572 	return rv;
   12573 }
   12574 
   12575 /******************************************************************************
   12576  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12577  * register.
   12578  *
   12579  * sc - Struct containing variables accessed by shared code
   12580  * offset - offset of word in the EEPROM to read
   12581  * data - word read from the EEPROM
   12582  * words - number of words to read
   12583  *****************************************************************************/
   12584 static int
   12585 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12586 {
   12587 	int32_t	 rv = 0;
   12588 	uint32_t flash_bank = 0;
   12589 	uint32_t act_offset = 0;
   12590 	uint32_t bank_offset = 0;
   12591 	uint32_t dword = 0;
   12592 	uint16_t i = 0;
   12593 
   12594 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12595 		device_xname(sc->sc_dev), __func__));
   12596 
   12597 	if (sc->nvm.acquire(sc) != 0)
   12598 		return -1;
   12599 
   12600 	/*
   12601 	 * We need to know which is the valid flash bank.  In the event
   12602 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12603 	 * managing flash_bank. So it cannot be trusted and needs
   12604 	 * to be updated with each read.
   12605 	 */
   12606 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12607 	if (rv) {
   12608 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12609 			device_xname(sc->sc_dev)));
   12610 		flash_bank = 0;
   12611 	}
   12612 
   12613 	/*
   12614 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12615 	 * size
   12616 	 */
   12617 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12618 
   12619 	for (i = 0; i < words; i++) {
   12620 		/* The NVM part needs a byte offset, hence * 2 */
   12621 		act_offset = bank_offset + ((offset + i) * 2);
   12622 		/* but we must read dword aligned, so mask ... */
   12623 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12624 		if (rv) {
   12625 			aprint_error_dev(sc->sc_dev,
   12626 			    "%s: failed to read NVM\n", __func__);
   12627 			break;
   12628 		}
   12629 		/* ... and pick out low or high word */
   12630 		if ((act_offset & 0x2) == 0)
   12631 			data[i] = (uint16_t)(dword & 0xFFFF);
   12632 		else
   12633 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12634 	}
   12635 
   12636 	sc->nvm.release(sc);
   12637 	return rv;
   12638 }
   12639 
   12640 /* iNVM */
   12641 
   12642 static int
   12643 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12644 {
   12645 	int32_t	 rv = 0;
   12646 	uint32_t invm_dword;
   12647 	uint16_t i;
   12648 	uint8_t record_type, word_address;
   12649 
   12650 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12651 		device_xname(sc->sc_dev), __func__));
   12652 
   12653 	for (i = 0; i < INVM_SIZE; i++) {
   12654 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12655 		/* Get record type */
   12656 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12657 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12658 			break;
   12659 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12660 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12661 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12662 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12663 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12664 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12665 			if (word_address == address) {
   12666 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12667 				rv = 0;
   12668 				break;
   12669 			}
   12670 		}
   12671 	}
   12672 
   12673 	return rv;
   12674 }
   12675 
   12676 static int
   12677 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12678 {
   12679 	int rv = 0;
   12680 	int i;
   12681 
   12682 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12683 		device_xname(sc->sc_dev), __func__));
   12684 
   12685 	if (sc->nvm.acquire(sc) != 0)
   12686 		return -1;
   12687 
   12688 	for (i = 0; i < words; i++) {
   12689 		switch (offset + i) {
   12690 		case NVM_OFF_MACADDR:
   12691 		case NVM_OFF_MACADDR1:
   12692 		case NVM_OFF_MACADDR2:
   12693 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12694 			if (rv != 0) {
   12695 				data[i] = 0xffff;
   12696 				rv = -1;
   12697 			}
   12698 			break;
   12699 		case NVM_OFF_CFG2:
   12700 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12701 			if (rv != 0) {
   12702 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12703 				rv = 0;
   12704 			}
   12705 			break;
   12706 		case NVM_OFF_CFG4:
   12707 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12708 			if (rv != 0) {
   12709 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12710 				rv = 0;
   12711 			}
   12712 			break;
   12713 		case NVM_OFF_LED_1_CFG:
   12714 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12715 			if (rv != 0) {
   12716 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12717 				rv = 0;
   12718 			}
   12719 			break;
   12720 		case NVM_OFF_LED_0_2_CFG:
   12721 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12722 			if (rv != 0) {
   12723 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12724 				rv = 0;
   12725 			}
   12726 			break;
   12727 		case NVM_OFF_ID_LED_SETTINGS:
   12728 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12729 			if (rv != 0) {
   12730 				*data = ID_LED_RESERVED_FFFF;
   12731 				rv = 0;
   12732 			}
   12733 			break;
   12734 		default:
   12735 			DPRINTF(WM_DEBUG_NVM,
   12736 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12737 			*data = NVM_RESERVED_WORD;
   12738 			break;
   12739 		}
   12740 	}
   12741 
   12742 	sc->nvm.release(sc);
   12743 	return rv;
   12744 }
   12745 
   12746 /* Lock, detecting NVM type, validate checksum, version and read */
   12747 
   12748 static int
   12749 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12750 {
   12751 	uint32_t eecd = 0;
   12752 
   12753 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12754 	    || sc->sc_type == WM_T_82583) {
   12755 		eecd = CSR_READ(sc, WMREG_EECD);
   12756 
   12757 		/* Isolate bits 15 & 16 */
   12758 		eecd = ((eecd >> 15) & 0x03);
   12759 
   12760 		/* If both bits are set, device is Flash type */
   12761 		if (eecd == 0x03)
   12762 			return 0;
   12763 	}
   12764 	return 1;
   12765 }
   12766 
   12767 static int
   12768 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   12769 {
   12770 	uint32_t eec;
   12771 
   12772 	eec = CSR_READ(sc, WMREG_EEC);
   12773 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12774 		return 1;
   12775 
   12776 	return 0;
   12777 }
   12778 
   12779 /*
   12780  * wm_nvm_validate_checksum
   12781  *
   12782  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12783  */
   12784 static int
   12785 wm_nvm_validate_checksum(struct wm_softc *sc)
   12786 {
   12787 	uint16_t checksum;
   12788 	uint16_t eeprom_data;
   12789 #ifdef WM_DEBUG
   12790 	uint16_t csum_wordaddr, valid_checksum;
   12791 #endif
   12792 	int i;
   12793 
   12794 	checksum = 0;
   12795 
   12796 	/* Don't check for I211 */
   12797 	if (sc->sc_type == WM_T_I211)
   12798 		return 0;
   12799 
   12800 #ifdef WM_DEBUG
   12801 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   12802 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   12803 		csum_wordaddr = NVM_OFF_COMPAT;
   12804 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12805 	} else {
   12806 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12807 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12808 	}
   12809 
   12810 	/* Dump EEPROM image for debug */
   12811 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12812 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12813 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12814 		/* XXX PCH_SPT? */
   12815 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12816 		if ((eeprom_data & valid_checksum) == 0) {
   12817 			DPRINTF(WM_DEBUG_NVM,
   12818 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12819 				device_xname(sc->sc_dev), eeprom_data,
   12820 				    valid_checksum));
   12821 		}
   12822 	}
   12823 
   12824 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12825 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12826 		for (i = 0; i < NVM_SIZE; i++) {
   12827 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12828 				printf("XXXX ");
   12829 			else
   12830 				printf("%04hx ", eeprom_data);
   12831 			if (i % 8 == 7)
   12832 				printf("\n");
   12833 		}
   12834 	}
   12835 
   12836 #endif /* WM_DEBUG */
   12837 
   12838 	for (i = 0; i < NVM_SIZE; i++) {
   12839 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12840 			return 1;
   12841 		checksum += eeprom_data;
   12842 	}
   12843 
   12844 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12845 #ifdef WM_DEBUG
   12846 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12847 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12848 #endif
   12849 	}
   12850 
   12851 	return 0;
   12852 }
   12853 
   12854 static void
   12855 wm_nvm_version_invm(struct wm_softc *sc)
   12856 {
   12857 	uint32_t dword;
   12858 
   12859 	/*
   12860 	 * Linux's code to decode version is very strange, so we don't
   12861 	 * obey that algorithm and just use word 61 as the document.
   12862 	 * Perhaps it's not perfect though...
   12863 	 *
   12864 	 * Example:
   12865 	 *
   12866 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12867 	 */
   12868 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12869 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12870 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12871 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12872 }
   12873 
   12874 static void
   12875 wm_nvm_version(struct wm_softc *sc)
   12876 {
   12877 	uint16_t major, minor, build, patch;
   12878 	uint16_t uid0, uid1;
   12879 	uint16_t nvm_data;
   12880 	uint16_t off;
   12881 	bool check_version = false;
   12882 	bool check_optionrom = false;
   12883 	bool have_build = false;
   12884 	bool have_uid = true;
   12885 
   12886 	/*
   12887 	 * Version format:
   12888 	 *
   12889 	 * XYYZ
   12890 	 * X0YZ
   12891 	 * X0YY
   12892 	 *
   12893 	 * Example:
   12894 	 *
   12895 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12896 	 *	82571	0x50a6	5.10.6?
   12897 	 *	82572	0x506a	5.6.10?
   12898 	 *	82572EI	0x5069	5.6.9?
   12899 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12900 	 *		0x2013	2.1.3?
   12901 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12902 	 */
   12903 
   12904 	/*
   12905 	 * XXX
   12906 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   12907 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   12908 	 */
   12909 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   12910 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   12911 		have_uid = false;
   12912 
   12913 	switch (sc->sc_type) {
   12914 	case WM_T_82571:
   12915 	case WM_T_82572:
   12916 	case WM_T_82574:
   12917 	case WM_T_82583:
   12918 		check_version = true;
   12919 		check_optionrom = true;
   12920 		have_build = true;
   12921 		break;
   12922 	case WM_T_82575:
   12923 	case WM_T_82576:
   12924 	case WM_T_82580:
   12925 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12926 			check_version = true;
   12927 		break;
   12928 	case WM_T_I211:
   12929 		wm_nvm_version_invm(sc);
   12930 		have_uid = false;
   12931 		goto printver;
   12932 	case WM_T_I210:
   12933 		if (!wm_nvm_flash_presence_i210(sc)) {
   12934 			wm_nvm_version_invm(sc);
   12935 			have_uid = false;
   12936 			goto printver;
   12937 		}
   12938 		/* FALLTHROUGH */
   12939 	case WM_T_I350:
   12940 	case WM_T_I354:
   12941 		check_version = true;
   12942 		check_optionrom = true;
   12943 		break;
   12944 	default:
   12945 		return;
   12946 	}
   12947 	if (check_version
   12948 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   12949 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12950 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12951 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12952 			build = nvm_data & NVM_BUILD_MASK;
   12953 			have_build = true;
   12954 		} else
   12955 			minor = nvm_data & 0x00ff;
   12956 
   12957 		/* Decimal */
   12958 		minor = (minor / 16) * 10 + (minor % 16);
   12959 		sc->sc_nvm_ver_major = major;
   12960 		sc->sc_nvm_ver_minor = minor;
   12961 
   12962 printver:
   12963 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12964 		    sc->sc_nvm_ver_minor);
   12965 		if (have_build) {
   12966 			sc->sc_nvm_ver_build = build;
   12967 			aprint_verbose(".%d", build);
   12968 		}
   12969 	}
   12970 
   12971 	/* Assume the Option ROM area is at avove NVM_SIZE */
   12972 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   12973 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   12974 		/* Option ROM Version */
   12975 		if ((off != 0x0000) && (off != 0xffff)) {
   12976 			int rv;
   12977 
   12978 			off += NVM_COMBO_VER_OFF;
   12979 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   12980 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   12981 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   12982 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12983 				/* 16bits */
   12984 				major = uid0 >> 8;
   12985 				build = (uid0 << 8) | (uid1 >> 8);
   12986 				patch = uid1 & 0x00ff;
   12987 				aprint_verbose(", option ROM Version %d.%d.%d",
   12988 				    major, build, patch);
   12989 			}
   12990 		}
   12991 	}
   12992 
   12993 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   12994 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12995 }
   12996 
   12997 /*
   12998  * wm_nvm_read:
   12999  *
   13000  *	Read data from the serial EEPROM.
   13001  */
   13002 static int
   13003 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13004 {
   13005 	int rv;
   13006 
   13007 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13008 		device_xname(sc->sc_dev), __func__));
   13009 
   13010 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13011 		return -1;
   13012 
   13013 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13014 
   13015 	return rv;
   13016 }
   13017 
   13018 /*
   13019  * Hardware semaphores.
   13020  * Very complexed...
   13021  */
   13022 
   13023 static int
   13024 wm_get_null(struct wm_softc *sc)
   13025 {
   13026 
   13027 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13028 		device_xname(sc->sc_dev), __func__));
   13029 	return 0;
   13030 }
   13031 
   13032 static void
   13033 wm_put_null(struct wm_softc *sc)
   13034 {
   13035 
   13036 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13037 		device_xname(sc->sc_dev), __func__));
   13038 	return;
   13039 }
   13040 
   13041 static int
   13042 wm_get_eecd(struct wm_softc *sc)
   13043 {
   13044 	uint32_t reg;
   13045 	int x;
   13046 
   13047 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13048 		device_xname(sc->sc_dev), __func__));
   13049 
   13050 	reg = CSR_READ(sc, WMREG_EECD);
   13051 
   13052 	/* Request EEPROM access. */
   13053 	reg |= EECD_EE_REQ;
   13054 	CSR_WRITE(sc, WMREG_EECD, reg);
   13055 
   13056 	/* ..and wait for it to be granted. */
   13057 	for (x = 0; x < 1000; x++) {
   13058 		reg = CSR_READ(sc, WMREG_EECD);
   13059 		if (reg & EECD_EE_GNT)
   13060 			break;
   13061 		delay(5);
   13062 	}
   13063 	if ((reg & EECD_EE_GNT) == 0) {
   13064 		aprint_error_dev(sc->sc_dev,
   13065 		    "could not acquire EEPROM GNT\n");
   13066 		reg &= ~EECD_EE_REQ;
   13067 		CSR_WRITE(sc, WMREG_EECD, reg);
   13068 		return -1;
   13069 	}
   13070 
   13071 	return 0;
   13072 }
   13073 
   13074 static void
   13075 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13076 {
   13077 
   13078 	*eecd |= EECD_SK;
   13079 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13080 	CSR_WRITE_FLUSH(sc);
   13081 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13082 		delay(1);
   13083 	else
   13084 		delay(50);
   13085 }
   13086 
   13087 static void
   13088 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13089 {
   13090 
   13091 	*eecd &= ~EECD_SK;
   13092 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13093 	CSR_WRITE_FLUSH(sc);
   13094 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13095 		delay(1);
   13096 	else
   13097 		delay(50);
   13098 }
   13099 
   13100 static void
   13101 wm_put_eecd(struct wm_softc *sc)
   13102 {
   13103 	uint32_t reg;
   13104 
   13105 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13106 		device_xname(sc->sc_dev), __func__));
   13107 
   13108 	/* Stop nvm */
   13109 	reg = CSR_READ(sc, WMREG_EECD);
   13110 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13111 		/* Pull CS high */
   13112 		reg |= EECD_CS;
   13113 		wm_nvm_eec_clock_lower(sc, &reg);
   13114 	} else {
   13115 		/* CS on Microwire is active-high */
   13116 		reg &= ~(EECD_CS | EECD_DI);
   13117 		CSR_WRITE(sc, WMREG_EECD, reg);
   13118 		wm_nvm_eec_clock_raise(sc, &reg);
   13119 		wm_nvm_eec_clock_lower(sc, &reg);
   13120 	}
   13121 
   13122 	reg = CSR_READ(sc, WMREG_EECD);
   13123 	reg &= ~EECD_EE_REQ;
   13124 	CSR_WRITE(sc, WMREG_EECD, reg);
   13125 
   13126 	return;
   13127 }
   13128 
   13129 /*
   13130  * Get hardware semaphore.
   13131  * Same as e1000_get_hw_semaphore_generic()
   13132  */
   13133 static int
   13134 wm_get_swsm_semaphore(struct wm_softc *sc)
   13135 {
   13136 	int32_t timeout;
   13137 	uint32_t swsm;
   13138 
   13139 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13140 		device_xname(sc->sc_dev), __func__));
   13141 	KASSERT(sc->sc_nvm_wordsize > 0);
   13142 
   13143 retry:
   13144 	/* Get the SW semaphore. */
   13145 	timeout = sc->sc_nvm_wordsize + 1;
   13146 	while (timeout) {
   13147 		swsm = CSR_READ(sc, WMREG_SWSM);
   13148 
   13149 		if ((swsm & SWSM_SMBI) == 0)
   13150 			break;
   13151 
   13152 		delay(50);
   13153 		timeout--;
   13154 	}
   13155 
   13156 	if (timeout == 0) {
   13157 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13158 			/*
   13159 			 * In rare circumstances, the SW semaphore may already
   13160 			 * be held unintentionally. Clear the semaphore once
   13161 			 * before giving up.
   13162 			 */
   13163 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13164 			wm_put_swsm_semaphore(sc);
   13165 			goto retry;
   13166 		}
   13167 		aprint_error_dev(sc->sc_dev,
   13168 		    "could not acquire SWSM SMBI\n");
   13169 		return 1;
   13170 	}
   13171 
   13172 	/* Get the FW semaphore. */
   13173 	timeout = sc->sc_nvm_wordsize + 1;
   13174 	while (timeout) {
   13175 		swsm = CSR_READ(sc, WMREG_SWSM);
   13176 		swsm |= SWSM_SWESMBI;
   13177 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13178 		/* If we managed to set the bit we got the semaphore. */
   13179 		swsm = CSR_READ(sc, WMREG_SWSM);
   13180 		if (swsm & SWSM_SWESMBI)
   13181 			break;
   13182 
   13183 		delay(50);
   13184 		timeout--;
   13185 	}
   13186 
   13187 	if (timeout == 0) {
   13188 		aprint_error_dev(sc->sc_dev,
   13189 		    "could not acquire SWSM SWESMBI\n");
   13190 		/* Release semaphores */
   13191 		wm_put_swsm_semaphore(sc);
   13192 		return 1;
   13193 	}
   13194 	return 0;
   13195 }
   13196 
   13197 /*
   13198  * Put hardware semaphore.
   13199  * Same as e1000_put_hw_semaphore_generic()
   13200  */
   13201 static void
   13202 wm_put_swsm_semaphore(struct wm_softc *sc)
   13203 {
   13204 	uint32_t swsm;
   13205 
   13206 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13207 		device_xname(sc->sc_dev), __func__));
   13208 
   13209 	swsm = CSR_READ(sc, WMREG_SWSM);
   13210 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13211 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13212 }
   13213 
   13214 /*
   13215  * Get SW/FW semaphore.
   13216  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13217  */
   13218 static int
   13219 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13220 {
   13221 	uint32_t swfw_sync;
   13222 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13223 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13224 	int timeout;
   13225 
   13226 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13227 		device_xname(sc->sc_dev), __func__));
   13228 
   13229 	if (sc->sc_type == WM_T_80003)
   13230 		timeout = 50;
   13231 	else
   13232 		timeout = 200;
   13233 
   13234 	while (timeout) {
   13235 		if (wm_get_swsm_semaphore(sc)) {
   13236 			aprint_error_dev(sc->sc_dev,
   13237 			    "%s: failed to get semaphore\n",
   13238 			    __func__);
   13239 			return 1;
   13240 		}
   13241 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13242 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13243 			swfw_sync |= swmask;
   13244 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13245 			wm_put_swsm_semaphore(sc);
   13246 			return 0;
   13247 		}
   13248 		wm_put_swsm_semaphore(sc);
   13249 		delay(5000);
   13250 		timeout--;
   13251 	}
   13252 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13253 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13254 	return 1;
   13255 }
   13256 
   13257 static void
   13258 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13259 {
   13260 	uint32_t swfw_sync;
   13261 
   13262 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13263 		device_xname(sc->sc_dev), __func__));
   13264 
   13265 	while (wm_get_swsm_semaphore(sc) != 0)
   13266 		continue;
   13267 
   13268 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13269 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13270 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13271 
   13272 	wm_put_swsm_semaphore(sc);
   13273 }
   13274 
   13275 static int
   13276 wm_get_nvm_80003(struct wm_softc *sc)
   13277 {
   13278 	int rv;
   13279 
   13280 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13281 		device_xname(sc->sc_dev), __func__));
   13282 
   13283 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13284 		aprint_error_dev(sc->sc_dev,
   13285 		    "%s: failed to get semaphore(SWFW)\n",
   13286 		    __func__);
   13287 		return rv;
   13288 	}
   13289 
   13290 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13291 	    && (rv = wm_get_eecd(sc)) != 0) {
   13292 		aprint_error_dev(sc->sc_dev,
   13293 		    "%s: failed to get semaphore(EECD)\n",
   13294 		    __func__);
   13295 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13296 		return rv;
   13297 	}
   13298 
   13299 	return 0;
   13300 }
   13301 
   13302 static void
   13303 wm_put_nvm_80003(struct wm_softc *sc)
   13304 {
   13305 
   13306 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13307 		device_xname(sc->sc_dev), __func__));
   13308 
   13309 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13310 		wm_put_eecd(sc);
   13311 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13312 }
   13313 
   13314 static int
   13315 wm_get_nvm_82571(struct wm_softc *sc)
   13316 {
   13317 	int rv;
   13318 
   13319 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13320 		device_xname(sc->sc_dev), __func__));
   13321 
   13322 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13323 		return rv;
   13324 
   13325 	switch (sc->sc_type) {
   13326 	case WM_T_82573:
   13327 		break;
   13328 	default:
   13329 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13330 			rv = wm_get_eecd(sc);
   13331 		break;
   13332 	}
   13333 
   13334 	if (rv != 0) {
   13335 		aprint_error_dev(sc->sc_dev,
   13336 		    "%s: failed to get semaphore\n",
   13337 		    __func__);
   13338 		wm_put_swsm_semaphore(sc);
   13339 	}
   13340 
   13341 	return rv;
   13342 }
   13343 
   13344 static void
   13345 wm_put_nvm_82571(struct wm_softc *sc)
   13346 {
   13347 
   13348 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13349 		device_xname(sc->sc_dev), __func__));
   13350 
   13351 	switch (sc->sc_type) {
   13352 	case WM_T_82573:
   13353 		break;
   13354 	default:
   13355 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13356 			wm_put_eecd(sc);
   13357 		break;
   13358 	}
   13359 
   13360 	wm_put_swsm_semaphore(sc);
   13361 }
   13362 
   13363 static int
   13364 wm_get_phy_82575(struct wm_softc *sc)
   13365 {
   13366 
   13367 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13368 		device_xname(sc->sc_dev), __func__));
   13369 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13370 }
   13371 
   13372 static void
   13373 wm_put_phy_82575(struct wm_softc *sc)
   13374 {
   13375 
   13376 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13377 		device_xname(sc->sc_dev), __func__));
   13378 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13379 }
   13380 
   13381 static int
   13382 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13383 {
   13384 	uint32_t ext_ctrl;
   13385 	int timeout = 200;
   13386 
   13387 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13388 		device_xname(sc->sc_dev), __func__));
   13389 
   13390 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13391 	for (timeout = 0; timeout < 200; timeout++) {
   13392 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13393 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13394 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13395 
   13396 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13397 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13398 			return 0;
   13399 		delay(5000);
   13400 	}
   13401 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13402 	    device_xname(sc->sc_dev), ext_ctrl);
   13403 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13404 	return 1;
   13405 }
   13406 
   13407 static void
   13408 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13409 {
   13410 	uint32_t ext_ctrl;
   13411 
   13412 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13413 		device_xname(sc->sc_dev), __func__));
   13414 
   13415 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13416 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13417 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13418 
   13419 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13420 }
   13421 
   13422 static int
   13423 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13424 {
   13425 	uint32_t ext_ctrl;
   13426 	int timeout;
   13427 
   13428 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13429 		device_xname(sc->sc_dev), __func__));
   13430 	mutex_enter(sc->sc_ich_phymtx);
   13431 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13432 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13433 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13434 			break;
   13435 		delay(1000);
   13436 	}
   13437 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13438 		printf("%s: SW has already locked the resource\n",
   13439 		    device_xname(sc->sc_dev));
   13440 		goto out;
   13441 	}
   13442 
   13443 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13444 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13445 	for (timeout = 0; timeout < 1000; timeout++) {
   13446 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13447 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13448 			break;
   13449 		delay(1000);
   13450 	}
   13451 	if (timeout >= 1000) {
   13452 		printf("%s: failed to acquire semaphore\n",
   13453 		    device_xname(sc->sc_dev));
   13454 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13455 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13456 		goto out;
   13457 	}
   13458 	return 0;
   13459 
   13460 out:
   13461 	mutex_exit(sc->sc_ich_phymtx);
   13462 	return 1;
   13463 }
   13464 
   13465 static void
   13466 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13467 {
   13468 	uint32_t ext_ctrl;
   13469 
   13470 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13471 		device_xname(sc->sc_dev), __func__));
   13472 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13473 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13474 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13475 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13476 	} else {
   13477 		printf("%s: Semaphore unexpectedly released\n",
   13478 		    device_xname(sc->sc_dev));
   13479 	}
   13480 
   13481 	mutex_exit(sc->sc_ich_phymtx);
   13482 }
   13483 
   13484 static int
   13485 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13486 {
   13487 
   13488 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13489 		device_xname(sc->sc_dev), __func__));
   13490 	mutex_enter(sc->sc_ich_nvmmtx);
   13491 
   13492 	return 0;
   13493 }
   13494 
   13495 static void
   13496 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13497 {
   13498 
   13499 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13500 		device_xname(sc->sc_dev), __func__));
   13501 	mutex_exit(sc->sc_ich_nvmmtx);
   13502 }
   13503 
   13504 static int
   13505 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13506 {
   13507 	int i = 0;
   13508 	uint32_t reg;
   13509 
   13510 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13511 		device_xname(sc->sc_dev), __func__));
   13512 
   13513 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13514 	do {
   13515 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13516 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13517 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13518 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13519 			break;
   13520 		delay(2*1000);
   13521 		i++;
   13522 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13523 
   13524 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13525 		wm_put_hw_semaphore_82573(sc);
   13526 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13527 		    device_xname(sc->sc_dev));
   13528 		return -1;
   13529 	}
   13530 
   13531 	return 0;
   13532 }
   13533 
   13534 static void
   13535 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13536 {
   13537 	uint32_t reg;
   13538 
   13539 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13540 		device_xname(sc->sc_dev), __func__));
   13541 
   13542 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13543 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13544 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13545 }
   13546 
   13547 /*
   13548  * Management mode and power management related subroutines.
   13549  * BMC, AMT, suspend/resume and EEE.
   13550  */
   13551 
   13552 #ifdef WM_WOL
   13553 static int
   13554 wm_check_mng_mode(struct wm_softc *sc)
   13555 {
   13556 	int rv;
   13557 
   13558 	switch (sc->sc_type) {
   13559 	case WM_T_ICH8:
   13560 	case WM_T_ICH9:
   13561 	case WM_T_ICH10:
   13562 	case WM_T_PCH:
   13563 	case WM_T_PCH2:
   13564 	case WM_T_PCH_LPT:
   13565 	case WM_T_PCH_SPT:
   13566 	case WM_T_PCH_CNP:
   13567 		rv = wm_check_mng_mode_ich8lan(sc);
   13568 		break;
   13569 	case WM_T_82574:
   13570 	case WM_T_82583:
   13571 		rv = wm_check_mng_mode_82574(sc);
   13572 		break;
   13573 	case WM_T_82571:
   13574 	case WM_T_82572:
   13575 	case WM_T_82573:
   13576 	case WM_T_80003:
   13577 		rv = wm_check_mng_mode_generic(sc);
   13578 		break;
   13579 	default:
   13580 		/* noting to do */
   13581 		rv = 0;
   13582 		break;
   13583 	}
   13584 
   13585 	return rv;
   13586 }
   13587 
   13588 static int
   13589 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13590 {
   13591 	uint32_t fwsm;
   13592 
   13593 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13594 
   13595 	if (((fwsm & FWSM_FW_VALID) != 0)
   13596 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13597 		return 1;
   13598 
   13599 	return 0;
   13600 }
   13601 
   13602 static int
   13603 wm_check_mng_mode_82574(struct wm_softc *sc)
   13604 {
   13605 	uint16_t data;
   13606 
   13607 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13608 
   13609 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13610 		return 1;
   13611 
   13612 	return 0;
   13613 }
   13614 
   13615 static int
   13616 wm_check_mng_mode_generic(struct wm_softc *sc)
   13617 {
   13618 	uint32_t fwsm;
   13619 
   13620 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13621 
   13622 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13623 		return 1;
   13624 
   13625 	return 0;
   13626 }
   13627 #endif /* WM_WOL */
   13628 
   13629 static int
   13630 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13631 {
   13632 	uint32_t manc, fwsm, factps;
   13633 
   13634 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13635 		return 0;
   13636 
   13637 	manc = CSR_READ(sc, WMREG_MANC);
   13638 
   13639 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13640 		device_xname(sc->sc_dev), manc));
   13641 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13642 		return 0;
   13643 
   13644 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13645 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13646 		factps = CSR_READ(sc, WMREG_FACTPS);
   13647 		if (((factps & FACTPS_MNGCG) == 0)
   13648 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13649 			return 1;
   13650 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13651 		uint16_t data;
   13652 
   13653 		factps = CSR_READ(sc, WMREG_FACTPS);
   13654 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13655 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13656 			device_xname(sc->sc_dev), factps, data));
   13657 		if (((factps & FACTPS_MNGCG) == 0)
   13658 		    && ((data & NVM_CFG2_MNGM_MASK)
   13659 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13660 			return 1;
   13661 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13662 	    && ((manc & MANC_ASF_EN) == 0))
   13663 		return 1;
   13664 
   13665 	return 0;
   13666 }
   13667 
   13668 static bool
   13669 wm_phy_resetisblocked(struct wm_softc *sc)
   13670 {
   13671 	bool blocked = false;
   13672 	uint32_t reg;
   13673 	int i = 0;
   13674 
   13675 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13676 		device_xname(sc->sc_dev), __func__));
   13677 
   13678 	switch (sc->sc_type) {
   13679 	case WM_T_ICH8:
   13680 	case WM_T_ICH9:
   13681 	case WM_T_ICH10:
   13682 	case WM_T_PCH:
   13683 	case WM_T_PCH2:
   13684 	case WM_T_PCH_LPT:
   13685 	case WM_T_PCH_SPT:
   13686 	case WM_T_PCH_CNP:
   13687 		do {
   13688 			reg = CSR_READ(sc, WMREG_FWSM);
   13689 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13690 				blocked = true;
   13691 				delay(10*1000);
   13692 				continue;
   13693 			}
   13694 			blocked = false;
   13695 		} while (blocked && (i++ < 30));
   13696 		return blocked;
   13697 		break;
   13698 	case WM_T_82571:
   13699 	case WM_T_82572:
   13700 	case WM_T_82573:
   13701 	case WM_T_82574:
   13702 	case WM_T_82583:
   13703 	case WM_T_80003:
   13704 		reg = CSR_READ(sc, WMREG_MANC);
   13705 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13706 			return true;
   13707 		else
   13708 			return false;
   13709 		break;
   13710 	default:
   13711 		/* no problem */
   13712 		break;
   13713 	}
   13714 
   13715 	return false;
   13716 }
   13717 
   13718 static void
   13719 wm_get_hw_control(struct wm_softc *sc)
   13720 {
   13721 	uint32_t reg;
   13722 
   13723 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13724 		device_xname(sc->sc_dev), __func__));
   13725 
   13726 	if (sc->sc_type == WM_T_82573) {
   13727 		reg = CSR_READ(sc, WMREG_SWSM);
   13728 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13729 	} else if (sc->sc_type >= WM_T_82571) {
   13730 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13731 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13732 	}
   13733 }
   13734 
   13735 static void
   13736 wm_release_hw_control(struct wm_softc *sc)
   13737 {
   13738 	uint32_t reg;
   13739 
   13740 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13741 		device_xname(sc->sc_dev), __func__));
   13742 
   13743 	if (sc->sc_type == WM_T_82573) {
   13744 		reg = CSR_READ(sc, WMREG_SWSM);
   13745 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13746 	} else if (sc->sc_type >= WM_T_82571) {
   13747 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13748 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13749 	}
   13750 }
   13751 
   13752 static void
   13753 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13754 {
   13755 	uint32_t reg;
   13756 
   13757 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13758 		device_xname(sc->sc_dev), __func__));
   13759 
   13760 	if (sc->sc_type < WM_T_PCH2)
   13761 		return;
   13762 
   13763 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13764 
   13765 	if (gate)
   13766 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13767 	else
   13768 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13769 
   13770 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13771 }
   13772 
   13773 static void
   13774 wm_smbustopci(struct wm_softc *sc)
   13775 {
   13776 	uint32_t fwsm, reg;
   13777 	int rv = 0;
   13778 
   13779 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13780 		device_xname(sc->sc_dev), __func__));
   13781 
   13782 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13783 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13784 
   13785 	/* Disable ULP */
   13786 	wm_ulp_disable(sc);
   13787 
   13788 	/* Acquire PHY semaphore */
   13789 	sc->phy.acquire(sc);
   13790 
   13791 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13792 	switch (sc->sc_type) {
   13793 	case WM_T_PCH_LPT:
   13794 	case WM_T_PCH_SPT:
   13795 	case WM_T_PCH_CNP:
   13796 		if (wm_phy_is_accessible_pchlan(sc))
   13797 			break;
   13798 
   13799 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13800 		reg |= CTRL_EXT_FORCE_SMBUS;
   13801 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13802 #if 0
   13803 		/* XXX Isn't this required??? */
   13804 		CSR_WRITE_FLUSH(sc);
   13805 #endif
   13806 		delay(50 * 1000);
   13807 		/* FALLTHROUGH */
   13808 	case WM_T_PCH2:
   13809 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13810 			break;
   13811 		/* FALLTHROUGH */
   13812 	case WM_T_PCH:
   13813 		if (sc->sc_type == WM_T_PCH)
   13814 			if ((fwsm & FWSM_FW_VALID) != 0)
   13815 				break;
   13816 
   13817 		if (wm_phy_resetisblocked(sc) == true) {
   13818 			printf("XXX reset is blocked(3)\n");
   13819 			break;
   13820 		}
   13821 
   13822 		wm_toggle_lanphypc_pch_lpt(sc);
   13823 
   13824 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13825 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13826 				break;
   13827 
   13828 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13829 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13830 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13831 
   13832 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13833 				break;
   13834 			rv = -1;
   13835 		}
   13836 		break;
   13837 	default:
   13838 		break;
   13839 	}
   13840 
   13841 	/* Release semaphore */
   13842 	sc->phy.release(sc);
   13843 
   13844 	if (rv == 0) {
   13845 		if (wm_phy_resetisblocked(sc)) {
   13846 			printf("XXX reset is blocked(4)\n");
   13847 			goto out;
   13848 		}
   13849 		wm_reset_phy(sc);
   13850 		if (wm_phy_resetisblocked(sc))
   13851 			printf("XXX reset is blocked(4)\n");
   13852 	}
   13853 
   13854 out:
   13855 	/*
   13856 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13857 	 */
   13858 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13859 		delay(10*1000);
   13860 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13861 	}
   13862 }
   13863 
   13864 static void
   13865 wm_init_manageability(struct wm_softc *sc)
   13866 {
   13867 
   13868 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13869 		device_xname(sc->sc_dev), __func__));
   13870 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13871 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13872 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13873 
   13874 		/* Disable hardware interception of ARP */
   13875 		manc &= ~MANC_ARP_EN;
   13876 
   13877 		/* Enable receiving management packets to the host */
   13878 		if (sc->sc_type >= WM_T_82571) {
   13879 			manc |= MANC_EN_MNG2HOST;
   13880 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   13881 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13882 		}
   13883 
   13884 		CSR_WRITE(sc, WMREG_MANC, manc);
   13885 	}
   13886 }
   13887 
   13888 static void
   13889 wm_release_manageability(struct wm_softc *sc)
   13890 {
   13891 
   13892 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13893 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13894 
   13895 		manc |= MANC_ARP_EN;
   13896 		if (sc->sc_type >= WM_T_82571)
   13897 			manc &= ~MANC_EN_MNG2HOST;
   13898 
   13899 		CSR_WRITE(sc, WMREG_MANC, manc);
   13900 	}
   13901 }
   13902 
   13903 static void
   13904 wm_get_wakeup(struct wm_softc *sc)
   13905 {
   13906 
   13907 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13908 	switch (sc->sc_type) {
   13909 	case WM_T_82573:
   13910 	case WM_T_82583:
   13911 		sc->sc_flags |= WM_F_HAS_AMT;
   13912 		/* FALLTHROUGH */
   13913 	case WM_T_80003:
   13914 	case WM_T_82575:
   13915 	case WM_T_82576:
   13916 	case WM_T_82580:
   13917 	case WM_T_I350:
   13918 	case WM_T_I354:
   13919 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13920 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13921 		/* FALLTHROUGH */
   13922 	case WM_T_82541:
   13923 	case WM_T_82541_2:
   13924 	case WM_T_82547:
   13925 	case WM_T_82547_2:
   13926 	case WM_T_82571:
   13927 	case WM_T_82572:
   13928 	case WM_T_82574:
   13929 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13930 		break;
   13931 	case WM_T_ICH8:
   13932 	case WM_T_ICH9:
   13933 	case WM_T_ICH10:
   13934 	case WM_T_PCH:
   13935 	case WM_T_PCH2:
   13936 	case WM_T_PCH_LPT:
   13937 	case WM_T_PCH_SPT:
   13938 	case WM_T_PCH_CNP:
   13939 		sc->sc_flags |= WM_F_HAS_AMT;
   13940 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13941 		break;
   13942 	default:
   13943 		break;
   13944 	}
   13945 
   13946 	/* 1: HAS_MANAGE */
   13947 	if (wm_enable_mng_pass_thru(sc) != 0)
   13948 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13949 
   13950 	/*
   13951 	 * Note that the WOL flags is set after the resetting of the eeprom
   13952 	 * stuff
   13953 	 */
   13954 }
   13955 
   13956 /*
   13957  * Unconfigure Ultra Low Power mode.
   13958  * Only for I217 and newer (see below).
   13959  */
   13960 static int
   13961 wm_ulp_disable(struct wm_softc *sc)
   13962 {
   13963 	uint32_t reg;
   13964 	uint16_t phyreg;
   13965 	int i = 0, rv = 0;
   13966 
   13967 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13968 		device_xname(sc->sc_dev), __func__));
   13969 	/* Exclude old devices */
   13970 	if ((sc->sc_type < WM_T_PCH_LPT)
   13971 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13972 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13973 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13974 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13975 		return 0;
   13976 
   13977 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13978 		/* Request ME un-configure ULP mode in the PHY */
   13979 		reg = CSR_READ(sc, WMREG_H2ME);
   13980 		reg &= ~H2ME_ULP;
   13981 		reg |= H2ME_ENFORCE_SETTINGS;
   13982 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13983 
   13984 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13985 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13986 			if (i++ == 30) {
   13987 				printf("%s timed out\n", __func__);
   13988 				return -1;
   13989 			}
   13990 			delay(10 * 1000);
   13991 		}
   13992 		reg = CSR_READ(sc, WMREG_H2ME);
   13993 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13994 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13995 
   13996 		return 0;
   13997 	}
   13998 
   13999 	/* Acquire semaphore */
   14000 	sc->phy.acquire(sc);
   14001 
   14002 	/* Toggle LANPHYPC */
   14003 	wm_toggle_lanphypc_pch_lpt(sc);
   14004 
   14005 	/* Unforce SMBus mode in PHY */
   14006 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14007 	if (rv != 0) {
   14008 		uint32_t reg2;
   14009 
   14010 		printf("%s: Force SMBus first.\n", __func__);
   14011 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14012 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14013 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14014 		delay(50 * 1000);
   14015 
   14016 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14017 		    &phyreg);
   14018 		if (rv != 0)
   14019 			goto release;
   14020 	}
   14021 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14022 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14023 
   14024 	/* Unforce SMBus mode in MAC */
   14025 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14026 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14027 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14028 
   14029 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14030 	if (rv != 0)
   14031 		goto release;
   14032 	phyreg |= HV_PM_CTRL_K1_ENA;
   14033 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14034 
   14035 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14036 		&phyreg);
   14037 	if (rv != 0)
   14038 		goto release;
   14039 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14040 	    | I218_ULP_CONFIG1_STICKY_ULP
   14041 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14042 	    | I218_ULP_CONFIG1_WOL_HOST
   14043 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14044 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14045 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14046 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14047 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14048 	phyreg |= I218_ULP_CONFIG1_START;
   14049 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14050 
   14051 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14052 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14053 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14054 
   14055 release:
   14056 	/* Release semaphore */
   14057 	sc->phy.release(sc);
   14058 	wm_gmii_reset(sc);
   14059 	delay(50 * 1000);
   14060 
   14061 	return rv;
   14062 }
   14063 
   14064 /* WOL in the newer chipset interfaces (pchlan) */
   14065 static void
   14066 wm_enable_phy_wakeup(struct wm_softc *sc)
   14067 {
   14068 #if 0
   14069 	uint16_t preg;
   14070 
   14071 	/* Copy MAC RARs to PHY RARs */
   14072 
   14073 	/* Copy MAC MTA to PHY MTA */
   14074 
   14075 	/* Configure PHY Rx Control register */
   14076 
   14077 	/* Enable PHY wakeup in MAC register */
   14078 
   14079 	/* Configure and enable PHY wakeup in PHY registers */
   14080 
   14081 	/* Activate PHY wakeup */
   14082 
   14083 	/* XXX */
   14084 #endif
   14085 }
   14086 
   14087 /* Power down workaround on D3 */
   14088 static void
   14089 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14090 {
   14091 	uint32_t reg;
   14092 	int i;
   14093 
   14094 	for (i = 0; i < 2; i++) {
   14095 		/* Disable link */
   14096 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14097 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14098 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14099 
   14100 		/*
   14101 		 * Call gig speed drop workaround on Gig disable before
   14102 		 * accessing any PHY registers
   14103 		 */
   14104 		if (sc->sc_type == WM_T_ICH8)
   14105 			wm_gig_downshift_workaround_ich8lan(sc);
   14106 
   14107 		/* Write VR power-down enable */
   14108 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   14109 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14110 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14111 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   14112 
   14113 		/* Read it back and test */
   14114 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   14115 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14116 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14117 			break;
   14118 
   14119 		/* Issue PHY reset and repeat at most one more time */
   14120 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14121 	}
   14122 }
   14123 
   14124 static void
   14125 wm_enable_wakeup(struct wm_softc *sc)
   14126 {
   14127 	uint32_t reg, pmreg;
   14128 	pcireg_t pmode;
   14129 
   14130 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14131 		device_xname(sc->sc_dev), __func__));
   14132 
   14133 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14134 		&pmreg, NULL) == 0)
   14135 		return;
   14136 
   14137 	/* Advertise the wakeup capability */
   14138 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   14139 	    | CTRL_SWDPIN(3));
   14140 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   14141 
   14142 	/* ICH workaround */
   14143 	switch (sc->sc_type) {
   14144 	case WM_T_ICH8:
   14145 	case WM_T_ICH9:
   14146 	case WM_T_ICH10:
   14147 	case WM_T_PCH:
   14148 	case WM_T_PCH2:
   14149 	case WM_T_PCH_LPT:
   14150 	case WM_T_PCH_SPT:
   14151 	case WM_T_PCH_CNP:
   14152 		/* Disable gig during WOL */
   14153 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14154 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   14155 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14156 		if (sc->sc_type == WM_T_PCH)
   14157 			wm_gmii_reset(sc);
   14158 
   14159 		/* Power down workaround */
   14160 		if (sc->sc_phytype == WMPHY_82577) {
   14161 			struct mii_softc *child;
   14162 
   14163 			/* Assume that the PHY is copper */
   14164 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14165 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   14166 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   14167 				    (768 << 5) | 25, 0x0444); /* magic num */
   14168 		}
   14169 		break;
   14170 	default:
   14171 		break;
   14172 	}
   14173 
   14174 	/* Keep the laser running on fiber adapters */
   14175 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   14176 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   14177 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14178 		reg |= CTRL_EXT_SWDPIN(3);
   14179 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14180 	}
   14181 
   14182 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   14183 #if 0	/* for the multicast packet */
   14184 	reg |= WUFC_MC;
   14185 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   14186 #endif
   14187 
   14188 	if (sc->sc_type >= WM_T_PCH)
   14189 		wm_enable_phy_wakeup(sc);
   14190 	else {
   14191 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   14192 		CSR_WRITE(sc, WMREG_WUFC, reg);
   14193 	}
   14194 
   14195 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14196 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14197 		|| (sc->sc_type == WM_T_PCH2))
   14198 	    && (sc->sc_phytype == WMPHY_IGP_3))
   14199 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   14200 
   14201 	/* Request PME */
   14202 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   14203 #if 0
   14204 	/* Disable WOL */
   14205 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   14206 #else
   14207 	/* For WOL */
   14208 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   14209 #endif
   14210 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   14211 }
   14212 
   14213 /* Disable ASPM L0s and/or L1 for workaround */
   14214 static void
   14215 wm_disable_aspm(struct wm_softc *sc)
   14216 {
   14217 	pcireg_t reg, mask = 0;
   14218 	unsigned const char *str = "";
   14219 
   14220 	/*
   14221 	 *  Only for PCIe device which has PCIe capability in the PCI config
   14222 	 * space.
   14223 	 */
   14224 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   14225 		return;
   14226 
   14227 	switch (sc->sc_type) {
   14228 	case WM_T_82571:
   14229 	case WM_T_82572:
   14230 		/*
   14231 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   14232 		 * State Power management L1 State (ASPM L1).
   14233 		 */
   14234 		mask = PCIE_LCSR_ASPM_L1;
   14235 		str = "L1 is";
   14236 		break;
   14237 	case WM_T_82573:
   14238 	case WM_T_82574:
   14239 	case WM_T_82583:
   14240 		/*
   14241 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   14242 		 *
   14243 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   14244 		 * some chipset.  The document of 82574 and 82583 says that
   14245 		 * disabling L0s with some specific chipset is sufficient,
   14246 		 * but we follow as of the Intel em driver does.
   14247 		 *
   14248 		 * References:
   14249 		 * Errata 8 of the Specification Update of i82573.
   14250 		 * Errata 20 of the Specification Update of i82574.
   14251 		 * Errata 9 of the Specification Update of i82583.
   14252 		 */
   14253 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   14254 		str = "L0s and L1 are";
   14255 		break;
   14256 	default:
   14257 		return;
   14258 	}
   14259 
   14260 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14261 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   14262 	reg &= ~mask;
   14263 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14264 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   14265 
   14266 	/* Print only in wm_attach() */
   14267 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   14268 		aprint_verbose_dev(sc->sc_dev,
   14269 		    "ASPM %s disabled to workaround the errata.\n", str);
   14270 }
   14271 
   14272 /* LPLU */
   14273 
   14274 static void
   14275 wm_lplu_d0_disable(struct wm_softc *sc)
   14276 {
   14277 	struct mii_data *mii = &sc->sc_mii;
   14278 	uint32_t reg;
   14279 
   14280 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14281 		device_xname(sc->sc_dev), __func__));
   14282 
   14283 	if (sc->sc_phytype == WMPHY_IFE)
   14284 		return;
   14285 
   14286 	switch (sc->sc_type) {
   14287 	case WM_T_82571:
   14288 	case WM_T_82572:
   14289 	case WM_T_82573:
   14290 	case WM_T_82575:
   14291 	case WM_T_82576:
   14292 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   14293 		reg &= ~PMR_D0_LPLU;
   14294 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   14295 		break;
   14296 	case WM_T_82580:
   14297 	case WM_T_I350:
   14298 	case WM_T_I210:
   14299 	case WM_T_I211:
   14300 		reg = CSR_READ(sc, WMREG_PHPM);
   14301 		reg &= ~PHPM_D0A_LPLU;
   14302 		CSR_WRITE(sc, WMREG_PHPM, reg);
   14303 		break;
   14304 	case WM_T_82574:
   14305 	case WM_T_82583:
   14306 	case WM_T_ICH8:
   14307 	case WM_T_ICH9:
   14308 	case WM_T_ICH10:
   14309 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14310 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   14311 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14312 		CSR_WRITE_FLUSH(sc);
   14313 		break;
   14314 	case WM_T_PCH:
   14315 	case WM_T_PCH2:
   14316 	case WM_T_PCH_LPT:
   14317 	case WM_T_PCH_SPT:
   14318 	case WM_T_PCH_CNP:
   14319 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   14320 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   14321 		if (wm_phy_resetisblocked(sc) == false)
   14322 			reg |= HV_OEM_BITS_ANEGNOW;
   14323 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   14324 		break;
   14325 	default:
   14326 		break;
   14327 	}
   14328 }
   14329 
   14330 /* EEE */
   14331 
   14332 static void
   14333 wm_set_eee_i350(struct wm_softc *sc)
   14334 {
   14335 	uint32_t ipcnfg, eeer;
   14336 
   14337 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   14338 	eeer = CSR_READ(sc, WMREG_EEER);
   14339 
   14340 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   14341 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14342 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14343 		    | EEER_LPI_FC);
   14344 	} else {
   14345 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14346 		ipcnfg &= ~IPCNFG_10BASE_TE;
   14347 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14348 		    | EEER_LPI_FC);
   14349 	}
   14350 
   14351 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   14352 	CSR_WRITE(sc, WMREG_EEER, eeer);
   14353 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   14354 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   14355 }
   14356 
   14357 /*
   14358  * Workarounds (mainly PHY related).
   14359  * Basically, PHY's workarounds are in the PHY drivers.
   14360  */
   14361 
   14362 /* Work-around for 82566 Kumeran PCS lock loss */
   14363 static void
   14364 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   14365 {
   14366 	struct mii_data *mii = &sc->sc_mii;
   14367 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14368 	int i;
   14369 	int reg;
   14370 
   14371 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14372 		device_xname(sc->sc_dev), __func__));
   14373 
   14374 	/* If the link is not up, do nothing */
   14375 	if ((status & STATUS_LU) == 0)
   14376 		return;
   14377 
   14378 	/* Nothing to do if the link is other than 1Gbps */
   14379 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   14380 		return;
   14381 
   14382 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14383 	for (i = 0; i < 10; i++) {
   14384 		/* read twice */
   14385 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14386 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14387 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   14388 			goto out;	/* GOOD! */
   14389 
   14390 		/* Reset the PHY */
   14391 		wm_reset_phy(sc);
   14392 		delay(5*1000);
   14393 	}
   14394 
   14395 	/* Disable GigE link negotiation */
   14396 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14397 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14398 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14399 
   14400 	/*
   14401 	 * Call gig speed drop workaround on Gig disable before accessing
   14402 	 * any PHY registers.
   14403 	 */
   14404 	wm_gig_downshift_workaround_ich8lan(sc);
   14405 
   14406 out:
   14407 	return;
   14408 }
   14409 
   14410 /* WOL from S5 stops working */
   14411 static void
   14412 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   14413 {
   14414 	uint16_t kmreg;
   14415 
   14416 	/* Only for igp3 */
   14417 	if (sc->sc_phytype == WMPHY_IGP_3) {
   14418 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   14419 			return;
   14420 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   14421 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   14422 			return;
   14423 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   14424 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   14425 	}
   14426 }
   14427 
   14428 /*
   14429  * Workaround for pch's PHYs
   14430  * XXX should be moved to new PHY driver?
   14431  */
   14432 static void
   14433 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   14434 {
   14435 
   14436 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14437 		device_xname(sc->sc_dev), __func__));
   14438 	KASSERT(sc->sc_type == WM_T_PCH);
   14439 
   14440 	if (sc->sc_phytype == WMPHY_82577)
   14441 		wm_set_mdio_slow_mode_hv(sc);
   14442 
   14443 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   14444 
   14445 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   14446 
   14447 	/* 82578 */
   14448 	if (sc->sc_phytype == WMPHY_82578) {
   14449 		struct mii_softc *child;
   14450 
   14451 		/*
   14452 		 * Return registers to default by doing a soft reset then
   14453 		 * writing 0x3140 to the control register
   14454 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   14455 		 */
   14456 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14457 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   14458 			PHY_RESET(child);
   14459 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   14460 			    0x3140);
   14461 		}
   14462 	}
   14463 
   14464 	/* Select page 0 */
   14465 	sc->phy.acquire(sc);
   14466 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   14467 	sc->phy.release(sc);
   14468 
   14469 	/*
   14470 	 * Configure the K1 Si workaround during phy reset assuming there is
   14471 	 * link so that it disables K1 if link is in 1Gbps.
   14472 	 */
   14473 	wm_k1_gig_workaround_hv(sc, 1);
   14474 }
   14475 
   14476 static void
   14477 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   14478 {
   14479 
   14480 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14481 		device_xname(sc->sc_dev), __func__));
   14482 	KASSERT(sc->sc_type == WM_T_PCH2);
   14483 
   14484 	wm_set_mdio_slow_mode_hv(sc);
   14485 }
   14486 
   14487 /**
   14488  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   14489  *  @link: link up bool flag
   14490  *
   14491  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   14492  *  preventing further DMA write requests.  Workaround the issue by disabling
   14493  *  the de-assertion of the clock request when in 1Gpbs mode.
   14494  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   14495  *  speeds in order to avoid Tx hangs.
   14496  **/
   14497 static int
   14498 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   14499 {
   14500 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   14501 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14502 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   14503 	uint16_t phyreg;
   14504 
   14505 	if (link && (speed == STATUS_SPEED_1000)) {
   14506 		sc->phy.acquire(sc);
   14507 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14508 		    &phyreg);
   14509 		if (rv != 0)
   14510 			goto release;
   14511 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14512 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   14513 		if (rv != 0)
   14514 			goto release;
   14515 		delay(20);
   14516 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   14517 
   14518 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14519 		    &phyreg);
   14520 release:
   14521 		sc->phy.release(sc);
   14522 		return rv;
   14523 	}
   14524 
   14525 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   14526 
   14527 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14528 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   14529 	    || !link
   14530 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   14531 		goto update_fextnvm6;
   14532 
   14533 	phyreg = wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL);
   14534 
   14535 	/* Clear link status transmit timeout */
   14536 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   14537 	if (speed == STATUS_SPEED_100) {
   14538 		/* Set inband Tx timeout to 5x10us for 100Half */
   14539 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   14540 
   14541 		/* Do not extend the K1 entry latency for 100Half */
   14542 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   14543 	} else {
   14544 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   14545 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   14546 
   14547 		/* Extend the K1 entry latency for 10 Mbps */
   14548 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   14549 	}
   14550 
   14551 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   14552 
   14553 update_fextnvm6:
   14554 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   14555 	return 0;
   14556 }
   14557 
   14558 static int
   14559 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   14560 {
   14561 	int k1_enable = sc->sc_nvm_k1_enabled;
   14562 
   14563 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14564 		device_xname(sc->sc_dev), __func__));
   14565 
   14566 	if (sc->phy.acquire(sc) != 0)
   14567 		return -1;
   14568 
   14569 	if (link) {
   14570 		k1_enable = 0;
   14571 
   14572 		/* Link stall fix for link up */
   14573 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14574 		    0x0100);
   14575 	} else {
   14576 		/* Link stall fix for link down */
   14577 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14578 		    0x4100);
   14579 	}
   14580 
   14581 	wm_configure_k1_ich8lan(sc, k1_enable);
   14582 	sc->phy.release(sc);
   14583 
   14584 	return 0;
   14585 }
   14586 
   14587 static void
   14588 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   14589 {
   14590 	uint32_t reg;
   14591 
   14592 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   14593 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   14594 	    reg | HV_KMRN_MDIO_SLOW);
   14595 }
   14596 
   14597 static void
   14598 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   14599 {
   14600 	uint32_t ctrl, ctrl_ext, tmp;
   14601 	uint16_t kmreg;
   14602 	int rv;
   14603 
   14604 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14605 
   14606 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   14607 	if (rv != 0)
   14608 		return;
   14609 
   14610 	if (k1_enable)
   14611 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   14612 	else
   14613 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   14614 
   14615 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   14616 	if (rv != 0)
   14617 		return;
   14618 
   14619 	delay(20);
   14620 
   14621 	ctrl = CSR_READ(sc, WMREG_CTRL);
   14622 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14623 
   14624 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   14625 	tmp |= CTRL_FRCSPD;
   14626 
   14627 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   14628 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   14629 	CSR_WRITE_FLUSH(sc);
   14630 	delay(20);
   14631 
   14632 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   14633 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14634 	CSR_WRITE_FLUSH(sc);
   14635 	delay(20);
   14636 
   14637 	return;
   14638 }
   14639 
   14640 /* special case - for 82575 - need to do manual init ... */
   14641 static void
   14642 wm_reset_init_script_82575(struct wm_softc *sc)
   14643 {
   14644 	/*
   14645 	 * remark: this is untested code - we have no board without EEPROM
   14646 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   14647 	 */
   14648 
   14649 	/* SerDes configuration via SERDESCTRL */
   14650 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   14651 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   14652 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   14653 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   14654 
   14655 	/* CCM configuration via CCMCTL register */
   14656 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   14657 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   14658 
   14659 	/* PCIe lanes configuration */
   14660 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   14661 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   14662 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   14663 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   14664 
   14665 	/* PCIe PLL Configuration */
   14666 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   14667 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   14668 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   14669 }
   14670 
   14671 static void
   14672 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   14673 {
   14674 	uint32_t reg;
   14675 	uint16_t nvmword;
   14676 	int rv;
   14677 
   14678 	if (sc->sc_type != WM_T_82580)
   14679 		return;
   14680 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   14681 		return;
   14682 
   14683 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   14684 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   14685 	if (rv != 0) {
   14686 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   14687 		    __func__);
   14688 		return;
   14689 	}
   14690 
   14691 	reg = CSR_READ(sc, WMREG_MDICNFG);
   14692 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   14693 		reg |= MDICNFG_DEST;
   14694 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   14695 		reg |= MDICNFG_COM_MDIO;
   14696 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14697 }
   14698 
   14699 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   14700 
   14701 static bool
   14702 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   14703 {
   14704 	uint32_t reg;
   14705 	uint16_t id1, id2;
   14706 	int i, rv;
   14707 
   14708 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14709 		device_xname(sc->sc_dev), __func__));
   14710 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14711 
   14712 	id1 = id2 = 0xffff;
   14713 	for (i = 0; i < 2; i++) {
   14714 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   14715 		    &id1);
   14716 		if ((rv != 0) || MII_INVALIDID(id1))
   14717 			continue;
   14718 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   14719 		    &id2);
   14720 		if ((rv != 0) || MII_INVALIDID(id2))
   14721 			continue;
   14722 		break;
   14723 	}
   14724 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   14725 		goto out;
   14726 
   14727 	/*
   14728 	 * In case the PHY needs to be in mdio slow mode,
   14729 	 * set slow mode and try to get the PHY id again.
   14730 	 */
   14731 	if (sc->sc_type < WM_T_PCH_LPT) {
   14732 		sc->phy.release(sc);
   14733 		wm_set_mdio_slow_mode_hv(sc);
   14734 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   14735 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   14736 		sc->phy.acquire(sc);
   14737 	}
   14738 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   14739 		printf("XXX return with false\n");
   14740 		return false;
   14741 	}
   14742 out:
   14743 	if (sc->sc_type >= WM_T_PCH_LPT) {
   14744 		/* Only unforce SMBus if ME is not active */
   14745 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14746 			uint16_t phyreg;
   14747 
   14748 			/* Unforce SMBus mode in PHY */
   14749 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   14750 			    CV_SMB_CTRL, &phyreg);
   14751 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14752 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   14753 			    CV_SMB_CTRL, phyreg);
   14754 
   14755 			/* Unforce SMBus mode in MAC */
   14756 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14757 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14758 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14759 		}
   14760 	}
   14761 	return true;
   14762 }
   14763 
   14764 static void
   14765 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   14766 {
   14767 	uint32_t reg;
   14768 	int i;
   14769 
   14770 	/* Set PHY Config Counter to 50msec */
   14771 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   14772 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   14773 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   14774 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   14775 
   14776 	/* Toggle LANPHYPC */
   14777 	reg = CSR_READ(sc, WMREG_CTRL);
   14778 	reg |= CTRL_LANPHYPC_OVERRIDE;
   14779 	reg &= ~CTRL_LANPHYPC_VALUE;
   14780 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14781 	CSR_WRITE_FLUSH(sc);
   14782 	delay(1000);
   14783 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   14784 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14785 	CSR_WRITE_FLUSH(sc);
   14786 
   14787 	if (sc->sc_type < WM_T_PCH_LPT)
   14788 		delay(50 * 1000);
   14789 	else {
   14790 		i = 20;
   14791 
   14792 		do {
   14793 			delay(5 * 1000);
   14794 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   14795 		    && i--);
   14796 
   14797 		delay(30 * 1000);
   14798 	}
   14799 }
   14800 
   14801 static int
   14802 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   14803 {
   14804 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   14805 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   14806 	uint32_t rxa;
   14807 	uint16_t scale = 0, lat_enc = 0;
   14808 	int32_t obff_hwm = 0;
   14809 	int64_t lat_ns, value;
   14810 
   14811 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14812 		device_xname(sc->sc_dev), __func__));
   14813 
   14814 	if (link) {
   14815 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   14816 		uint32_t status;
   14817 		uint16_t speed;
   14818 		pcireg_t preg;
   14819 
   14820 		status = CSR_READ(sc, WMREG_STATUS);
   14821 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   14822 		case STATUS_SPEED_10:
   14823 			speed = 10;
   14824 			break;
   14825 		case STATUS_SPEED_100:
   14826 			speed = 100;
   14827 			break;
   14828 		case STATUS_SPEED_1000:
   14829 			speed = 1000;
   14830 			break;
   14831 		default:
   14832 			device_printf(sc->sc_dev, "Unknown speed "
   14833 			    "(status = %08x)\n", status);
   14834 			return -1;
   14835 		}
   14836 
   14837 		/* Rx Packet Buffer Allocation size (KB) */
   14838 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   14839 
   14840 		/*
   14841 		 * Determine the maximum latency tolerated by the device.
   14842 		 *
   14843 		 * Per the PCIe spec, the tolerated latencies are encoded as
   14844 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   14845 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   14846 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14847 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14848 		 */
   14849 		lat_ns = ((int64_t)rxa * 1024 -
   14850 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14851 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14852 		if (lat_ns < 0)
   14853 			lat_ns = 0;
   14854 		else
   14855 			lat_ns /= speed;
   14856 		value = lat_ns;
   14857 
   14858 		while (value > LTRV_VALUE) {
   14859 			scale ++;
   14860 			value = howmany(value, __BIT(5));
   14861 		}
   14862 		if (scale > LTRV_SCALE_MAX) {
   14863 			printf("%s: Invalid LTR latency scale %d\n",
   14864 			    device_xname(sc->sc_dev), scale);
   14865 			return -1;
   14866 		}
   14867 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14868 
   14869 		/* Determine the maximum latency tolerated by the platform */
   14870 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14871 		    WM_PCI_LTR_CAP_LPT);
   14872 		max_snoop = preg & 0xffff;
   14873 		max_nosnoop = preg >> 16;
   14874 
   14875 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14876 
   14877 		if (lat_enc > max_ltr_enc) {
   14878 			lat_enc = max_ltr_enc;
   14879 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14880 			    * PCI_LTR_SCALETONS(
   14881 				    __SHIFTOUT(lat_enc,
   14882 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14883 		}
   14884 
   14885 		if (lat_ns) {
   14886 			lat_ns *= speed * 1000;
   14887 			lat_ns /= 8;
   14888 			lat_ns /= 1000000000;
   14889 			obff_hwm = (int32_t)(rxa - lat_ns);
   14890 		}
   14891 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14892 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14893 			    "(rxa = %d, lat_ns = %d)\n",
   14894 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14895 			return -1;
   14896 		}
   14897 	}
   14898 	/* Snoop and No-Snoop latencies the same */
   14899 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14900 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14901 
   14902 	/* Set OBFF high water mark */
   14903 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14904 	reg |= obff_hwm;
   14905 	CSR_WRITE(sc, WMREG_SVT, reg);
   14906 
   14907 	/* Enable OBFF */
   14908 	reg = CSR_READ(sc, WMREG_SVCR);
   14909 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14910 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14911 
   14912 	return 0;
   14913 }
   14914 
   14915 /*
   14916  * I210 Errata 25 and I211 Errata 10
   14917  * Slow System Clock.
   14918  */
   14919 static void
   14920 wm_pll_workaround_i210(struct wm_softc *sc)
   14921 {
   14922 	uint32_t mdicnfg, wuc;
   14923 	uint32_t reg;
   14924 	pcireg_t pcireg;
   14925 	uint32_t pmreg;
   14926 	uint16_t nvmword, tmp_nvmword;
   14927 	int phyval;
   14928 	bool wa_done = false;
   14929 	int i;
   14930 
   14931 	/* Save WUC and MDICNFG registers */
   14932 	wuc = CSR_READ(sc, WMREG_WUC);
   14933 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14934 
   14935 	reg = mdicnfg & ~MDICNFG_DEST;
   14936 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14937 
   14938 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14939 		nvmword = INVM_DEFAULT_AL;
   14940 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14941 
   14942 	/* Get Power Management cap offset */
   14943 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14944 		&pmreg, NULL) == 0)
   14945 		return;
   14946 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14947 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14948 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14949 
   14950 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14951 			break; /* OK */
   14952 		}
   14953 
   14954 		wa_done = true;
   14955 		/* Directly reset the internal PHY */
   14956 		reg = CSR_READ(sc, WMREG_CTRL);
   14957 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14958 
   14959 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14960 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14961 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14962 
   14963 		CSR_WRITE(sc, WMREG_WUC, 0);
   14964 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14965 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14966 
   14967 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14968 		    pmreg + PCI_PMCSR);
   14969 		pcireg |= PCI_PMCSR_STATE_D3;
   14970 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14971 		    pmreg + PCI_PMCSR, pcireg);
   14972 		delay(1000);
   14973 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14974 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14975 		    pmreg + PCI_PMCSR, pcireg);
   14976 
   14977 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14978 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14979 
   14980 		/* Restore WUC register */
   14981 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14982 	}
   14983 
   14984 	/* Restore MDICNFG setting */
   14985 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14986 	if (wa_done)
   14987 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14988 }
   14989 
   14990 static void
   14991 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14992 {
   14993 	uint32_t reg;
   14994 
   14995 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14996 		device_xname(sc->sc_dev), __func__));
   14997 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   14998 	    || (sc->sc_type == WM_T_PCH_CNP));
   14999 
   15000 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15001 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   15002 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15003 
   15004 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   15005 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   15006 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   15007 }
   15008