Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.594
      1 /*	$NetBSD: if_wm.c,v 1.594 2018/11/02 08:16:49 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.594 2018/11/02 08:16:49 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    161 #else
    162 #define	DPRINTF(x, y)	/* nothing */
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544. We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 64 DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    203  * m_defrag() is called to reduce it.
    204  */
    205 #define	WM_NTXSEGS		64
    206 #define	WM_IFQUEUELEN		256
    207 #define	WM_TXQUEUELEN_MAX	64
    208 #define	WM_TXQUEUELEN_MAX_82547	16
    209 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    210 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    211 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    212 #define	WM_NTXDESC_82542	256
    213 #define	WM_NTXDESC_82544	4096
    214 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    215 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    216 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    217 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    218 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    219 
    220 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    221 
    222 #define	WM_TXINTERQSIZE		256
    223 
    224 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    225 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    226 #endif
    227 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    228 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    229 #endif
    230 
    231 /*
    232  * Receive descriptor list size.  We have one Rx buffer for normal
    233  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    234  * packet.  We allocate 256 receive descriptors, each with a 2k
    235  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    236  */
    237 #define	WM_NRXDESC		256
    238 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    239 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    240 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    241 
    242 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 typedef union txdescs {
    250 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    251 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    252 } txdescs_t;
    253 
    254 typedef union rxdescs {
    255 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    256 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    257 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    258 } rxdescs_t;
    259 
    260 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    261 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    262 
    263 /*
    264  * Software state for transmit jobs.
    265  */
    266 struct wm_txsoft {
    267 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    268 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    269 	int txs_firstdesc;		/* first descriptor in packet */
    270 	int txs_lastdesc;		/* last descriptor in packet */
    271 	int txs_ndesc;			/* # of descriptors used */
    272 };
    273 
    274 /*
    275  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    276  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    277  * them together.
    278  */
    279 struct wm_rxsoft {
    280 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    281 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    282 };
    283 
    284 #define WM_LINKUP_TIMEOUT	50
    285 
    286 static uint16_t swfwphysem[] = {
    287 	SWFW_PHY0_SM,
    288 	SWFW_PHY1_SM,
    289 	SWFW_PHY2_SM,
    290 	SWFW_PHY3_SM
    291 };
    292 
    293 static const uint32_t wm_82580_rxpbs_table[] = {
    294 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    295 };
    296 
    297 struct wm_softc;
    298 
    299 #ifdef WM_EVENT_COUNTERS
    300 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    301 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    302 	struct evcnt qname##_ev_##evname;
    303 
    304 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    305 	do {								\
    306 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    307 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    308 		    "%s%02d%s", #qname, (qnum), #evname);		\
    309 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    310 		    (evtype), NULL, (xname),				\
    311 		    (q)->qname##_##evname##_evcnt_name);		\
    312 	} while (0)
    313 
    314 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    315 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    316 
    317 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    318 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    319 
    320 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    321 	evcnt_detach(&(q)->qname##_ev_##evname);
    322 #endif /* WM_EVENT_COUNTERS */
    323 
    324 struct wm_txqueue {
    325 	kmutex_t *txq_lock;		/* lock for tx operations */
    326 
    327 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    328 
    329 	/* Software state for the transmit descriptors. */
    330 	int txq_num;			/* must be a power of two */
    331 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    332 
    333 	/* TX control data structures. */
    334 	int txq_ndesc;			/* must be a power of two */
    335 	size_t txq_descsize;		/* a tx descriptor size */
    336 	txdescs_t *txq_descs_u;
    337 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    338 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    339 	int txq_desc_rseg;		/* real number of control segment */
    340 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    341 #define	txq_descs	txq_descs_u->sctxu_txdescs
    342 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    343 
    344 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    345 
    346 	int txq_free;			/* number of free Tx descriptors */
    347 	int txq_next;			/* next ready Tx descriptor */
    348 
    349 	int txq_sfree;			/* number of free Tx jobs */
    350 	int txq_snext;			/* next free Tx job */
    351 	int txq_sdirty;			/* dirty Tx jobs */
    352 
    353 	/* These 4 variables are used only on the 82547. */
    354 	int txq_fifo_size;		/* Tx FIFO size */
    355 	int txq_fifo_head;		/* current head of FIFO */
    356 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    357 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    358 
    359 	/*
    360 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    361 	 * CPUs. This queue intermediate them without block.
    362 	 */
    363 	pcq_t *txq_interq;
    364 
    365 	/*
    366 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    367 	 * to manage Tx H/W queue's busy flag.
    368 	 */
    369 	int txq_flags;			/* flags for H/W queue, see below */
    370 #define	WM_TXQ_NO_SPACE	0x1
    371 
    372 	bool txq_stopping;
    373 
    374 	bool txq_sending;
    375 	time_t txq_lastsent;
    376 
    377 	uint32_t txq_packets;		/* for AIM */
    378 	uint32_t txq_bytes;		/* for AIM */
    379 #ifdef WM_EVENT_COUNTERS
    380 	/* TX event counters */
    381 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    382 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    383 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    384 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    385 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    386 					    /* XXX not used? */
    387 
    388 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    389 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    392 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    393 	WM_Q_EVCNT_DEFINE(txq, tsopain)     /* Painful header manip. for TSO */
    394 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    395 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    396 					    /* other than toomanyseg */
    397 
    398 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    399 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    400 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    401 
    402 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    403 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    404 #endif /* WM_EVENT_COUNTERS */
    405 };
    406 
    407 struct wm_rxqueue {
    408 	kmutex_t *rxq_lock;		/* lock for rx operations */
    409 
    410 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    411 
    412 	/* Software state for the receive descriptors. */
    413 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    414 
    415 	/* RX control data structures. */
    416 	int rxq_ndesc;			/* must be a power of two */
    417 	size_t rxq_descsize;		/* a rx descriptor size */
    418 	rxdescs_t *rxq_descs_u;
    419 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    420 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    421 	int rxq_desc_rseg;		/* real number of control segment */
    422 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    423 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    424 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    425 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    426 
    427 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    428 
    429 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    430 	int rxq_discard;
    431 	int rxq_len;
    432 	struct mbuf *rxq_head;
    433 	struct mbuf *rxq_tail;
    434 	struct mbuf **rxq_tailp;
    435 
    436 	bool rxq_stopping;
    437 
    438 	uint32_t rxq_packets;		/* for AIM */
    439 	uint32_t rxq_bytes;		/* for AIM */
    440 #ifdef WM_EVENT_COUNTERS
    441 	/* RX event counters */
    442 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    443 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    444 
    445 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    446 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    447 #endif
    448 };
    449 
    450 struct wm_queue {
    451 	int wmq_id;			/* index of TX/RX queues */
    452 	int wmq_intr_idx;		/* index of MSI-X tables */
    453 
    454 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    455 	bool wmq_set_itr;
    456 
    457 	struct wm_txqueue wmq_txq;
    458 	struct wm_rxqueue wmq_rxq;
    459 
    460 	void *wmq_si;
    461 };
    462 
    463 struct wm_phyop {
    464 	int (*acquire)(struct wm_softc *);
    465 	void (*release)(struct wm_softc *);
    466 	int reset_delay_us;
    467 };
    468 
    469 struct wm_nvmop {
    470 	int (*acquire)(struct wm_softc *);
    471 	void (*release)(struct wm_softc *);
    472 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    473 };
    474 
    475 /*
    476  * Software state per device.
    477  */
    478 struct wm_softc {
    479 	device_t sc_dev;		/* generic device information */
    480 	bus_space_tag_t sc_st;		/* bus space tag */
    481 	bus_space_handle_t sc_sh;	/* bus space handle */
    482 	bus_size_t sc_ss;		/* bus space size */
    483 	bus_space_tag_t sc_iot;		/* I/O space tag */
    484 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    485 	bus_size_t sc_ios;		/* I/O space size */
    486 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    487 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    488 	bus_size_t sc_flashs;		/* flash registers space size */
    489 	off_t sc_flashreg_offset;	/*
    490 					 * offset to flash registers from
    491 					 * start of BAR
    492 					 */
    493 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    494 
    495 	struct ethercom sc_ethercom;	/* ethernet common data */
    496 	struct mii_data sc_mii;		/* MII/media information */
    497 
    498 	pci_chipset_tag_t sc_pc;
    499 	pcitag_t sc_pcitag;
    500 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    501 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    502 
    503 	uint16_t sc_pcidevid;		/* PCI device ID */
    504 	wm_chip_type sc_type;		/* MAC type */
    505 	int sc_rev;			/* MAC revision */
    506 	wm_phy_type sc_phytype;		/* PHY type */
    507 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    508 #define	WM_MEDIATYPE_UNKNOWN		0x00
    509 #define	WM_MEDIATYPE_FIBER		0x01
    510 #define	WM_MEDIATYPE_COPPER		0x02
    511 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    512 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    513 	int sc_flags;			/* flags; see below */
    514 	int sc_if_flags;		/* last if_flags */
    515 	int sc_flowflags;		/* 802.3x flow control flags */
    516 	int sc_align_tweak;
    517 
    518 	void *sc_ihs[WM_MAX_NINTR];	/*
    519 					 * interrupt cookie.
    520 					 * - legacy and msi use sc_ihs[0] only
    521 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    522 					 */
    523 	pci_intr_handle_t *sc_intrs;	/*
    524 					 * legacy and msi use sc_intrs[0] only
    525 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    526 					 */
    527 	int sc_nintrs;			/* number of interrupts */
    528 
    529 	int sc_link_intr_idx;		/* index of MSI-X tables */
    530 
    531 	callout_t sc_tick_ch;		/* tick callout */
    532 	bool sc_core_stopping;
    533 
    534 	int sc_nvm_ver_major;
    535 	int sc_nvm_ver_minor;
    536 	int sc_nvm_ver_build;
    537 	int sc_nvm_addrbits;		/* NVM address bits */
    538 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    539 	int sc_ich8_flash_base;
    540 	int sc_ich8_flash_bank_size;
    541 	int sc_nvm_k1_enabled;
    542 
    543 	int sc_nqueues;
    544 	struct wm_queue *sc_queue;
    545 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
    546 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
    547 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    548 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    549 
    550 	int sc_affinity_offset;
    551 
    552 #ifdef WM_EVENT_COUNTERS
    553 	/* Event counters. */
    554 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    555 
    556 	/* WM_T_82542_2_1 only */
    557 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    558 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    559 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    560 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    561 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    562 #endif /* WM_EVENT_COUNTERS */
    563 
    564 	/* This variable are used only on the 82547. */
    565 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    566 
    567 	uint32_t sc_ctrl;		/* prototype CTRL register */
    568 #if 0
    569 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    570 #endif
    571 	uint32_t sc_icr;		/* prototype interrupt bits */
    572 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    573 	uint32_t sc_tctl;		/* prototype TCTL register */
    574 	uint32_t sc_rctl;		/* prototype RCTL register */
    575 	uint32_t sc_txcw;		/* prototype TXCW register */
    576 	uint32_t sc_tipg;		/* prototype TIPG register */
    577 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    578 	uint32_t sc_pba;		/* prototype PBA register */
    579 
    580 	int sc_tbi_linkup;		/* TBI link status */
    581 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    582 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    583 
    584 	int sc_mchash_type;		/* multicast filter offset */
    585 
    586 	krndsource_t rnd_source;	/* random source */
    587 
    588 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    589 
    590 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    591 	kmutex_t *sc_ich_phymtx;	/*
    592 					 * 82574/82583/ICH/PCH specific PHY
    593 					 * mutex. For 82574/82583, the mutex
    594 					 * is used for both PHY and NVM.
    595 					 */
    596 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    597 
    598 	struct wm_phyop phy;
    599 	struct wm_nvmop nvm;
    600 };
    601 
    602 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    603 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    604 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    605 
    606 #define	WM_RXCHAIN_RESET(rxq)						\
    607 do {									\
    608 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    609 	*(rxq)->rxq_tailp = NULL;					\
    610 	(rxq)->rxq_len = 0;						\
    611 } while (/*CONSTCOND*/0)
    612 
    613 #define	WM_RXCHAIN_LINK(rxq, m)						\
    614 do {									\
    615 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    616 	(rxq)->rxq_tailp = &(m)->m_next;				\
    617 } while (/*CONSTCOND*/0)
    618 
    619 #ifdef WM_EVENT_COUNTERS
    620 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    621 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    622 
    623 #define WM_Q_EVCNT_INCR(qname, evname)			\
    624 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    625 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    626 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    627 #else /* !WM_EVENT_COUNTERS */
    628 #define	WM_EVCNT_INCR(ev)	/* nothing */
    629 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    630 
    631 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    632 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    633 #endif /* !WM_EVENT_COUNTERS */
    634 
    635 #define	CSR_READ(sc, reg)						\
    636 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    637 #define	CSR_WRITE(sc, reg, val)						\
    638 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    639 #define	CSR_WRITE_FLUSH(sc)						\
    640 	(void) CSR_READ((sc), WMREG_STATUS)
    641 
    642 #define ICH8_FLASH_READ32(sc, reg)					\
    643 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    644 	    (reg) + sc->sc_flashreg_offset)
    645 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    646 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    647 	    (reg) + sc->sc_flashreg_offset, (data))
    648 
    649 #define ICH8_FLASH_READ16(sc, reg)					\
    650 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    651 	    (reg) + sc->sc_flashreg_offset)
    652 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    653 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    654 	    (reg) + sc->sc_flashreg_offset, (data))
    655 
    656 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    657 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    658 
    659 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    660 #define	WM_CDTXADDR_HI(txq, x)						\
    661 	(sizeof(bus_addr_t) == 8 ?					\
    662 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    663 
    664 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    665 #define	WM_CDRXADDR_HI(rxq, x)						\
    666 	(sizeof(bus_addr_t) == 8 ?					\
    667 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    668 
    669 /*
    670  * Register read/write functions.
    671  * Other than CSR_{READ|WRITE}().
    672  */
    673 #if 0
    674 static inline uint32_t wm_io_read(struct wm_softc *, int);
    675 #endif
    676 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    677 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    678     uint32_t, uint32_t);
    679 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    680 
    681 /*
    682  * Descriptor sync/init functions.
    683  */
    684 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    685 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    686 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    687 
    688 /*
    689  * Device driver interface functions and commonly used functions.
    690  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    691  */
    692 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    693 static int	wm_match(device_t, cfdata_t, void *);
    694 static void	wm_attach(device_t, device_t, void *);
    695 static int	wm_detach(device_t, int);
    696 static bool	wm_suspend(device_t, const pmf_qual_t *);
    697 static bool	wm_resume(device_t, const pmf_qual_t *);
    698 static void	wm_watchdog(struct ifnet *);
    699 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    700     uint16_t *);
    701 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    702     uint16_t *);
    703 static void	wm_tick(void *);
    704 static int	wm_ifflags_cb(struct ethercom *);
    705 static int	wm_ioctl(struct ifnet *, u_long, void *);
    706 /* MAC address related */
    707 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    708 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    709 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    710 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    711 static void	wm_set_filter(struct wm_softc *);
    712 /* Reset and init related */
    713 static void	wm_set_vlan(struct wm_softc *);
    714 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    715 static void	wm_get_auto_rd_done(struct wm_softc *);
    716 static void	wm_lan_init_done(struct wm_softc *);
    717 static void	wm_get_cfg_done(struct wm_softc *);
    718 static void	wm_phy_post_reset(struct wm_softc *);
    719 static void	wm_write_smbus_addr(struct wm_softc *);
    720 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    721 static void	wm_initialize_hardware_bits(struct wm_softc *);
    722 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    723 static void	wm_reset_phy(struct wm_softc *);
    724 static void	wm_flush_desc_rings(struct wm_softc *);
    725 static void	wm_reset(struct wm_softc *);
    726 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    727 static void	wm_rxdrain(struct wm_rxqueue *);
    728 static void	wm_init_rss(struct wm_softc *);
    729 static void	wm_adjust_qnum(struct wm_softc *, int);
    730 static inline bool	wm_is_using_msix(struct wm_softc *);
    731 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    732 static int	wm_softint_establish(struct wm_softc *, int, int);
    733 static int	wm_setup_legacy(struct wm_softc *);
    734 static int	wm_setup_msix(struct wm_softc *);
    735 static int	wm_init(struct ifnet *);
    736 static int	wm_init_locked(struct ifnet *);
    737 static void	wm_unset_stopping_flags(struct wm_softc *);
    738 static void	wm_set_stopping_flags(struct wm_softc *);
    739 static void	wm_stop(struct ifnet *, int);
    740 static void	wm_stop_locked(struct ifnet *, int);
    741 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    742 static void	wm_82547_txfifo_stall(void *);
    743 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    744 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    745 /* DMA related */
    746 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    747 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    748 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    749 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    750     struct wm_txqueue *);
    751 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    752 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    753 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    754     struct wm_rxqueue *);
    755 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    756 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    757 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    758 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    759 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    760 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    761 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    762     struct wm_txqueue *);
    763 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    764     struct wm_rxqueue *);
    765 static int	wm_alloc_txrx_queues(struct wm_softc *);
    766 static void	wm_free_txrx_queues(struct wm_softc *);
    767 static int	wm_init_txrx_queues(struct wm_softc *);
    768 /* Start */
    769 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    770     struct wm_txsoft *, uint32_t *, uint8_t *);
    771 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    772 static void	wm_start(struct ifnet *);
    773 static void	wm_start_locked(struct ifnet *);
    774 static int	wm_transmit(struct ifnet *, struct mbuf *);
    775 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    776 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    777     bool);
    778 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    779     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    780 static void	wm_nq_start(struct ifnet *);
    781 static void	wm_nq_start_locked(struct ifnet *);
    782 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    783 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    784 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    785     bool);
    786 static void	wm_deferred_start_locked(struct wm_txqueue *);
    787 static void	wm_handle_queue(void *);
    788 /* Interrupt */
    789 static bool	wm_txeof(struct wm_txqueue *, u_int);
    790 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    791 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    792 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    793 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    794 static void	wm_linkintr(struct wm_softc *, uint32_t);
    795 static int	wm_intr_legacy(void *);
    796 static inline void	wm_txrxintr_disable(struct wm_queue *);
    797 static inline void	wm_txrxintr_enable(struct wm_queue *);
    798 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    799 static int	wm_txrxintr_msix(void *);
    800 static int	wm_linkintr_msix(void *);
    801 
    802 /*
    803  * Media related.
    804  * GMII, SGMII, TBI, SERDES and SFP.
    805  */
    806 /* Common */
    807 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    808 /* GMII related */
    809 static void	wm_gmii_reset(struct wm_softc *);
    810 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    811 static int	wm_get_phy_id_82575(struct wm_softc *);
    812 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    813 static int	wm_gmii_mediachange(struct ifnet *);
    814 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    815 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    816 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    817 static int	wm_gmii_i82543_readreg(device_t, int, int);
    818 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    819 static int	wm_gmii_mdic_readreg(device_t, int, int);
    820 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    821 static int	wm_gmii_i82544_readreg(device_t, int, int);
    822 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    823 static int	wm_gmii_i80003_readreg(device_t, int, int);
    824 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    825 static int	wm_gmii_bm_readreg(device_t, int, int);
    826 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    827 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    828 static int	wm_gmii_hv_readreg(device_t, int, int);
    829 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    830 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    831 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    832 static int	wm_gmii_82580_readreg(device_t, int, int);
    833 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    834 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    835 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    836 static void	wm_gmii_statchg(struct ifnet *);
    837 /*
    838  * kumeran related (80003, ICH* and PCH*).
    839  * These functions are not for accessing MII registers but for accessing
    840  * kumeran specific registers.
    841  */
    842 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    843 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    844 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    845 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    846 /* SGMII */
    847 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    848 static int	wm_sgmii_readreg(device_t, int, int);
    849 static void	wm_sgmii_writereg(device_t, int, int, int);
    850 /* TBI related */
    851 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    852 static void	wm_tbi_mediainit(struct wm_softc *);
    853 static int	wm_tbi_mediachange(struct ifnet *);
    854 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    855 static int	wm_check_for_link(struct wm_softc *);
    856 static void	wm_tbi_tick(struct wm_softc *);
    857 /* SERDES related */
    858 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    859 static int	wm_serdes_mediachange(struct ifnet *);
    860 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    861 static void	wm_serdes_tick(struct wm_softc *);
    862 /* SFP related */
    863 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    864 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    865 
    866 /*
    867  * NVM related.
    868  * Microwire, SPI (w/wo EERD) and Flash.
    869  */
    870 /* Misc functions */
    871 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    872 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    873 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    874 /* Microwire */
    875 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    876 /* SPI */
    877 static int	wm_nvm_ready_spi(struct wm_softc *);
    878 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    879 /* Using with EERD */
    880 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    881 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    882 /* Flash */
    883 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    884     unsigned int *);
    885 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    886 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    887 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    888     uint32_t *);
    889 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    890 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    891 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    892 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    893 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    894 /* iNVM */
    895 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    896 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    897 /* Lock, detecting NVM type, validate checksum and read */
    898 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    899 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    900 static int	wm_nvm_validate_checksum(struct wm_softc *);
    901 static void	wm_nvm_version_invm(struct wm_softc *);
    902 static void	wm_nvm_version(struct wm_softc *);
    903 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    904 
    905 /*
    906  * Hardware semaphores.
    907  * Very complexed...
    908  */
    909 static int	wm_get_null(struct wm_softc *);
    910 static void	wm_put_null(struct wm_softc *);
    911 static int	wm_get_eecd(struct wm_softc *);
    912 static void	wm_put_eecd(struct wm_softc *);
    913 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    914 static void	wm_put_swsm_semaphore(struct wm_softc *);
    915 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    916 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    917 static int	wm_get_nvm_80003(struct wm_softc *);
    918 static void	wm_put_nvm_80003(struct wm_softc *);
    919 static int	wm_get_nvm_82571(struct wm_softc *);
    920 static void	wm_put_nvm_82571(struct wm_softc *);
    921 static int	wm_get_phy_82575(struct wm_softc *);
    922 static void	wm_put_phy_82575(struct wm_softc *);
    923 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    924 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    925 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    926 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    927 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    928 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    929 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    930 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    931 
    932 /*
    933  * Management mode and power management related subroutines.
    934  * BMC, AMT, suspend/resume and EEE.
    935  */
    936 #if 0
    937 static int	wm_check_mng_mode(struct wm_softc *);
    938 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    939 static int	wm_check_mng_mode_82574(struct wm_softc *);
    940 static int	wm_check_mng_mode_generic(struct wm_softc *);
    941 #endif
    942 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    943 static bool	wm_phy_resetisblocked(struct wm_softc *);
    944 static void	wm_get_hw_control(struct wm_softc *);
    945 static void	wm_release_hw_control(struct wm_softc *);
    946 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    947 static void	wm_smbustopci(struct wm_softc *);
    948 static void	wm_init_manageability(struct wm_softc *);
    949 static void	wm_release_manageability(struct wm_softc *);
    950 static void	wm_get_wakeup(struct wm_softc *);
    951 static void	wm_ulp_disable(struct wm_softc *);
    952 static void	wm_enable_phy_wakeup(struct wm_softc *);
    953 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    954 static void	wm_enable_wakeup(struct wm_softc *);
    955 static void	wm_disable_aspm(struct wm_softc *);
    956 /* LPLU (Low Power Link Up) */
    957 static void	wm_lplu_d0_disable(struct wm_softc *);
    958 /* EEE */
    959 static void	wm_set_eee_i350(struct wm_softc *);
    960 
    961 /*
    962  * Workarounds (mainly PHY related).
    963  * Basically, PHY's workarounds are in the PHY drivers.
    964  */
    965 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    966 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    967 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    968 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    969 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    970 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    971 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    972 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    973 static void	wm_reset_init_script_82575(struct wm_softc *);
    974 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    975 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    976 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    977 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    978 static void	wm_pll_workaround_i210(struct wm_softc *);
    979 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    980 
    981 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    982     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    983 
    984 /*
    985  * Devices supported by this driver.
    986  */
    987 static const struct wm_product {
    988 	pci_vendor_id_t		wmp_vendor;
    989 	pci_product_id_t	wmp_product;
    990 	const char		*wmp_name;
    991 	wm_chip_type		wmp_type;
    992 	uint32_t		wmp_flags;
    993 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    994 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    995 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    996 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    997 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    998 } wm_products[] = {
    999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1000 	  "Intel i82542 1000BASE-X Ethernet",
   1001 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1002 
   1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1004 	  "Intel i82543GC 1000BASE-X Ethernet",
   1005 	  WM_T_82543,		WMP_F_FIBER },
   1006 
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1008 	  "Intel i82543GC 1000BASE-T Ethernet",
   1009 	  WM_T_82543,		WMP_F_COPPER },
   1010 
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1012 	  "Intel i82544EI 1000BASE-T Ethernet",
   1013 	  WM_T_82544,		WMP_F_COPPER },
   1014 
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1016 	  "Intel i82544EI 1000BASE-X Ethernet",
   1017 	  WM_T_82544,		WMP_F_FIBER },
   1018 
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1020 	  "Intel i82544GC 1000BASE-T Ethernet",
   1021 	  WM_T_82544,		WMP_F_COPPER },
   1022 
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1024 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1025 	  WM_T_82544,		WMP_F_COPPER },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1028 	  "Intel i82540EM 1000BASE-T Ethernet",
   1029 	  WM_T_82540,		WMP_F_COPPER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1032 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1033 	  WM_T_82540,		WMP_F_COPPER },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1036 	  "Intel i82540EP 1000BASE-T Ethernet",
   1037 	  WM_T_82540,		WMP_F_COPPER },
   1038 
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1040 	  "Intel i82540EP 1000BASE-T Ethernet",
   1041 	  WM_T_82540,		WMP_F_COPPER },
   1042 
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1044 	  "Intel i82540EP 1000BASE-T Ethernet",
   1045 	  WM_T_82540,		WMP_F_COPPER },
   1046 
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1048 	  "Intel i82545EM 1000BASE-T Ethernet",
   1049 	  WM_T_82545,		WMP_F_COPPER },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1052 	  "Intel i82545GM 1000BASE-T Ethernet",
   1053 	  WM_T_82545_3,		WMP_F_COPPER },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1056 	  "Intel i82545GM 1000BASE-X Ethernet",
   1057 	  WM_T_82545_3,		WMP_F_FIBER },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1060 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1061 	  WM_T_82545_3,		WMP_F_SERDES },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1064 	  "Intel i82546EB 1000BASE-T Ethernet",
   1065 	  WM_T_82546,		WMP_F_COPPER },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1068 	  "Intel i82546EB 1000BASE-T Ethernet",
   1069 	  WM_T_82546,		WMP_F_COPPER },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1072 	  "Intel i82545EM 1000BASE-X Ethernet",
   1073 	  WM_T_82545,		WMP_F_FIBER },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1076 	  "Intel i82546EB 1000BASE-X Ethernet",
   1077 	  WM_T_82546,		WMP_F_FIBER },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1080 	  "Intel i82546GB 1000BASE-T Ethernet",
   1081 	  WM_T_82546_3,		WMP_F_COPPER },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1084 	  "Intel i82546GB 1000BASE-X Ethernet",
   1085 	  WM_T_82546_3,		WMP_F_FIBER },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1088 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1089 	  WM_T_82546_3,		WMP_F_SERDES },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1092 	  "i82546GB quad-port Gigabit Ethernet",
   1093 	  WM_T_82546_3,		WMP_F_COPPER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1096 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1097 	  WM_T_82546_3,		WMP_F_COPPER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1100 	  "Intel PRO/1000MT (82546GB)",
   1101 	  WM_T_82546_3,		WMP_F_COPPER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1104 	  "Intel i82541EI 1000BASE-T Ethernet",
   1105 	  WM_T_82541,		WMP_F_COPPER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1108 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1109 	  WM_T_82541,		WMP_F_COPPER },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1112 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1113 	  WM_T_82541,		WMP_F_COPPER },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1116 	  "Intel i82541ER 1000BASE-T Ethernet",
   1117 	  WM_T_82541_2,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1120 	  "Intel i82541GI 1000BASE-T Ethernet",
   1121 	  WM_T_82541_2,		WMP_F_COPPER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1124 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1125 	  WM_T_82541_2,		WMP_F_COPPER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1128 	  "Intel i82541PI 1000BASE-T Ethernet",
   1129 	  WM_T_82541_2,		WMP_F_COPPER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1132 	  "Intel i82547EI 1000BASE-T Ethernet",
   1133 	  WM_T_82547,		WMP_F_COPPER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1136 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1137 	  WM_T_82547,		WMP_F_COPPER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1140 	  "Intel i82547GI 1000BASE-T Ethernet",
   1141 	  WM_T_82547_2,		WMP_F_COPPER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1144 	  "Intel PRO/1000 PT (82571EB)",
   1145 	  WM_T_82571,		WMP_F_COPPER },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1148 	  "Intel PRO/1000 PF (82571EB)",
   1149 	  WM_T_82571,		WMP_F_FIBER },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1152 	  "Intel PRO/1000 PB (82571EB)",
   1153 	  WM_T_82571,		WMP_F_SERDES },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1156 	  "Intel PRO/1000 QT (82571EB)",
   1157 	  WM_T_82571,		WMP_F_COPPER },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1160 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1161 	  WM_T_82571,		WMP_F_COPPER, },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1164 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1165 	  WM_T_82571,		WMP_F_COPPER, },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1168 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1169 	  WM_T_82571,		WMP_F_SERDES, },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1172 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1173 	  WM_T_82571,		WMP_F_SERDES, },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1176 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1177 	  WM_T_82571,		WMP_F_FIBER, },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1180 	  "Intel i82572EI 1000baseT Ethernet",
   1181 	  WM_T_82572,		WMP_F_COPPER },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1184 	  "Intel i82572EI 1000baseX Ethernet",
   1185 	  WM_T_82572,		WMP_F_FIBER },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1188 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1189 	  WM_T_82572,		WMP_F_SERDES },
   1190 
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1192 	  "Intel i82572EI 1000baseT Ethernet",
   1193 	  WM_T_82572,		WMP_F_COPPER },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1196 	  "Intel i82573E",
   1197 	  WM_T_82573,		WMP_F_COPPER },
   1198 
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1200 	  "Intel i82573E IAMT",
   1201 	  WM_T_82573,		WMP_F_COPPER },
   1202 
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1204 	  "Intel i82573L Gigabit Ethernet",
   1205 	  WM_T_82573,		WMP_F_COPPER },
   1206 
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1208 	  "Intel i82574L",
   1209 	  WM_T_82574,		WMP_F_COPPER },
   1210 
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1212 	  "Intel i82574L",
   1213 	  WM_T_82574,		WMP_F_COPPER },
   1214 
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1216 	  "Intel i82583V",
   1217 	  WM_T_82583,		WMP_F_COPPER },
   1218 
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1220 	  "i80003 dual 1000baseT Ethernet",
   1221 	  WM_T_80003,		WMP_F_COPPER },
   1222 
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1224 	  "i80003 dual 1000baseX Ethernet",
   1225 	  WM_T_80003,		WMP_F_COPPER },
   1226 
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1228 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1229 	  WM_T_80003,		WMP_F_SERDES },
   1230 
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1232 	  "Intel i80003 1000baseT Ethernet",
   1233 	  WM_T_80003,		WMP_F_COPPER },
   1234 
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1236 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1237 	  WM_T_80003,		WMP_F_SERDES },
   1238 
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1240 	  "Intel i82801H (M_AMT) LAN Controller",
   1241 	  WM_T_ICH8,		WMP_F_COPPER },
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1243 	  "Intel i82801H (AMT) LAN Controller",
   1244 	  WM_T_ICH8,		WMP_F_COPPER },
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1246 	  "Intel i82801H LAN Controller",
   1247 	  WM_T_ICH8,		WMP_F_COPPER },
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1249 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1250 	  WM_T_ICH8,		WMP_F_COPPER },
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1252 	  "Intel i82801H (M) LAN Controller",
   1253 	  WM_T_ICH8,		WMP_F_COPPER },
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1255 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1256 	  WM_T_ICH8,		WMP_F_COPPER },
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1258 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1259 	  WM_T_ICH8,		WMP_F_COPPER },
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1261 	  "82567V-3 LAN Controller",
   1262 	  WM_T_ICH8,		WMP_F_COPPER },
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1264 	  "82801I (AMT) LAN Controller",
   1265 	  WM_T_ICH9,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1267 	  "82801I 10/100 LAN Controller",
   1268 	  WM_T_ICH9,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1270 	  "82801I (G) 10/100 LAN Controller",
   1271 	  WM_T_ICH9,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1273 	  "82801I (GT) 10/100 LAN Controller",
   1274 	  WM_T_ICH9,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1276 	  "82801I (C) LAN Controller",
   1277 	  WM_T_ICH9,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1279 	  "82801I mobile LAN Controller",
   1280 	  WM_T_ICH9,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1282 	  "82801I mobile (V) LAN Controller",
   1283 	  WM_T_ICH9,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1285 	  "82801I mobile (AMT) LAN Controller",
   1286 	  WM_T_ICH9,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1288 	  "82567LM-4 LAN Controller",
   1289 	  WM_T_ICH9,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1291 	  "82567LM-2 LAN Controller",
   1292 	  WM_T_ICH10,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1294 	  "82567LF-2 LAN Controller",
   1295 	  WM_T_ICH10,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1297 	  "82567LM-3 LAN Controller",
   1298 	  WM_T_ICH10,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1300 	  "82567LF-3 LAN Controller",
   1301 	  WM_T_ICH10,		WMP_F_COPPER },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1303 	  "82567V-2 LAN Controller",
   1304 	  WM_T_ICH10,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1306 	  "82567V-3? LAN Controller",
   1307 	  WM_T_ICH10,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1309 	  "HANKSVILLE LAN Controller",
   1310 	  WM_T_ICH10,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1312 	  "PCH LAN (82577LM) Controller",
   1313 	  WM_T_PCH,		WMP_F_COPPER },
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1315 	  "PCH LAN (82577LC) Controller",
   1316 	  WM_T_PCH,		WMP_F_COPPER },
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1318 	  "PCH LAN (82578DM) Controller",
   1319 	  WM_T_PCH,		WMP_F_COPPER },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1321 	  "PCH LAN (82578DC) Controller",
   1322 	  WM_T_PCH,		WMP_F_COPPER },
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1324 	  "PCH2 LAN (82579LM) Controller",
   1325 	  WM_T_PCH2,		WMP_F_COPPER },
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1327 	  "PCH2 LAN (82579V) Controller",
   1328 	  WM_T_PCH2,		WMP_F_COPPER },
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1330 	  "82575EB dual-1000baseT Ethernet",
   1331 	  WM_T_82575,		WMP_F_COPPER },
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1333 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1334 	  WM_T_82575,		WMP_F_SERDES },
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1336 	  "82575GB quad-1000baseT Ethernet",
   1337 	  WM_T_82575,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1339 	  "82575GB quad-1000baseT Ethernet (PM)",
   1340 	  WM_T_82575,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1342 	  "82576 1000BaseT Ethernet",
   1343 	  WM_T_82576,		WMP_F_COPPER },
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1345 	  "82576 1000BaseX Ethernet",
   1346 	  WM_T_82576,		WMP_F_FIBER },
   1347 
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1349 	  "82576 gigabit Ethernet (SERDES)",
   1350 	  WM_T_82576,		WMP_F_SERDES },
   1351 
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1353 	  "82576 quad-1000BaseT Ethernet",
   1354 	  WM_T_82576,		WMP_F_COPPER },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1357 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1358 	  WM_T_82576,		WMP_F_COPPER },
   1359 
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1361 	  "82576 gigabit Ethernet",
   1362 	  WM_T_82576,		WMP_F_COPPER },
   1363 
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1365 	  "82576 gigabit Ethernet (SERDES)",
   1366 	  WM_T_82576,		WMP_F_SERDES },
   1367 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1368 	  "82576 quad-gigabit Ethernet (SERDES)",
   1369 	  WM_T_82576,		WMP_F_SERDES },
   1370 
   1371 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1372 	  "82580 1000BaseT Ethernet",
   1373 	  WM_T_82580,		WMP_F_COPPER },
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1375 	  "82580 1000BaseX Ethernet",
   1376 	  WM_T_82580,		WMP_F_FIBER },
   1377 
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1379 	  "82580 1000BaseT Ethernet (SERDES)",
   1380 	  WM_T_82580,		WMP_F_SERDES },
   1381 
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1383 	  "82580 gigabit Ethernet (SGMII)",
   1384 	  WM_T_82580,		WMP_F_COPPER },
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1386 	  "82580 dual-1000BaseT Ethernet",
   1387 	  WM_T_82580,		WMP_F_COPPER },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1390 	  "82580 quad-1000BaseX Ethernet",
   1391 	  WM_T_82580,		WMP_F_FIBER },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1394 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1395 	  WM_T_82580,		WMP_F_COPPER },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1398 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1399 	  WM_T_82580,		WMP_F_SERDES },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1402 	  "DH89XXCC 1000BASE-KX Ethernet",
   1403 	  WM_T_82580,		WMP_F_SERDES },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1406 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1407 	  WM_T_82580,		WMP_F_SERDES },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1410 	  "I350 Gigabit Network Connection",
   1411 	  WM_T_I350,		WMP_F_COPPER },
   1412 
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1414 	  "I350 Gigabit Fiber Network Connection",
   1415 	  WM_T_I350,		WMP_F_FIBER },
   1416 
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1418 	  "I350 Gigabit Backplane Connection",
   1419 	  WM_T_I350,		WMP_F_SERDES },
   1420 
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1422 	  "I350 Quad Port Gigabit Ethernet",
   1423 	  WM_T_I350,		WMP_F_SERDES },
   1424 
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1426 	  "I350 Gigabit Connection",
   1427 	  WM_T_I350,		WMP_F_COPPER },
   1428 
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1430 	  "I354 Gigabit Ethernet (KX)",
   1431 	  WM_T_I354,		WMP_F_SERDES },
   1432 
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1434 	  "I354 Gigabit Ethernet (SGMII)",
   1435 	  WM_T_I354,		WMP_F_COPPER },
   1436 
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1438 	  "I354 Gigabit Ethernet (2.5G)",
   1439 	  WM_T_I354,		WMP_F_COPPER },
   1440 
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1442 	  "I210-T1 Ethernet Server Adapter",
   1443 	  WM_T_I210,		WMP_F_COPPER },
   1444 
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1446 	  "I210 Ethernet (Copper OEM)",
   1447 	  WM_T_I210,		WMP_F_COPPER },
   1448 
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1450 	  "I210 Ethernet (Copper IT)",
   1451 	  WM_T_I210,		WMP_F_COPPER },
   1452 
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1454 	  "I210 Ethernet (FLASH less)",
   1455 	  WM_T_I210,		WMP_F_COPPER },
   1456 
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1458 	  "I210 Gigabit Ethernet (Fiber)",
   1459 	  WM_T_I210,		WMP_F_FIBER },
   1460 
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1462 	  "I210 Gigabit Ethernet (SERDES)",
   1463 	  WM_T_I210,		WMP_F_SERDES },
   1464 
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1466 	  "I210 Gigabit Ethernet (FLASH less)",
   1467 	  WM_T_I210,		WMP_F_SERDES },
   1468 
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1470 	  "I210 Gigabit Ethernet (SGMII)",
   1471 	  WM_T_I210,		WMP_F_COPPER },
   1472 
   1473 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1474 	  "I211 Ethernet (COPPER)",
   1475 	  WM_T_I211,		WMP_F_COPPER },
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1477 	  "I217 V Ethernet Connection",
   1478 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1480 	  "I217 LM Ethernet Connection",
   1481 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1483 	  "I218 V Ethernet Connection",
   1484 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1486 	  "I218 V Ethernet Connection",
   1487 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1488 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1489 	  "I218 V Ethernet Connection",
   1490 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1492 	  "I218 LM Ethernet Connection",
   1493 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1495 	  "I218 LM Ethernet Connection",
   1496 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1497 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1498 	  "I218 LM Ethernet Connection",
   1499 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1500 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1501 	  "I219 V Ethernet Connection",
   1502 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1504 	  "I219 V Ethernet Connection",
   1505 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1507 	  "I219 V Ethernet Connection",
   1508 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1509 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1510 	  "I219 V Ethernet Connection",
   1511 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1512 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1513 	  "I219 LM Ethernet Connection",
   1514 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1516 	  "I219 LM Ethernet Connection",
   1517 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1519 	  "I219 LM Ethernet Connection",
   1520 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1522 	  "I219 LM Ethernet Connection",
   1523 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1525 	  "I219 LM Ethernet Connection",
   1526 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1528 	  "I219 V Ethernet Connection",
   1529 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1530 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1531 	  "I219 V Ethernet Connection",
   1532 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1534 	  "I219 LM Ethernet Connection",
   1535 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1537 	  "I219 LM Ethernet Connection",
   1538 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1539 	{ 0,			0,
   1540 	  NULL,
   1541 	  0,			0 },
   1542 };
   1543 
   1544 /*
   1545  * Register read/write functions.
   1546  * Other than CSR_{READ|WRITE}().
   1547  */
   1548 
   1549 #if 0 /* Not currently used */
   1550 static inline uint32_t
   1551 wm_io_read(struct wm_softc *sc, int reg)
   1552 {
   1553 
   1554 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1555 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1556 }
   1557 #endif
   1558 
   1559 static inline void
   1560 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1561 {
   1562 
   1563 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1564 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1565 }
   1566 
   1567 static inline void
   1568 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1569     uint32_t data)
   1570 {
   1571 	uint32_t regval;
   1572 	int i;
   1573 
   1574 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1575 
   1576 	CSR_WRITE(sc, reg, regval);
   1577 
   1578 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1579 		delay(5);
   1580 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1581 			break;
   1582 	}
   1583 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1584 		aprint_error("%s: WARNING:"
   1585 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1586 		    device_xname(sc->sc_dev), reg);
   1587 	}
   1588 }
   1589 
   1590 static inline void
   1591 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1592 {
   1593 	wa->wa_low = htole32(v & 0xffffffffU);
   1594 	if (sizeof(bus_addr_t) == 8)
   1595 		wa->wa_high = htole32((uint64_t) v >> 32);
   1596 	else
   1597 		wa->wa_high = 0;
   1598 }
   1599 
   1600 /*
   1601  * Descriptor sync/init functions.
   1602  */
   1603 static inline void
   1604 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1605 {
   1606 	struct wm_softc *sc = txq->txq_sc;
   1607 
   1608 	/* If it will wrap around, sync to the end of the ring. */
   1609 	if ((start + num) > WM_NTXDESC(txq)) {
   1610 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1611 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1612 		    (WM_NTXDESC(txq) - start), ops);
   1613 		num -= (WM_NTXDESC(txq) - start);
   1614 		start = 0;
   1615 	}
   1616 
   1617 	/* Now sync whatever is left. */
   1618 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1619 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1620 }
   1621 
   1622 static inline void
   1623 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1624 {
   1625 	struct wm_softc *sc = rxq->rxq_sc;
   1626 
   1627 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1628 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1629 }
   1630 
   1631 static inline void
   1632 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1633 {
   1634 	struct wm_softc *sc = rxq->rxq_sc;
   1635 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1636 	struct mbuf *m = rxs->rxs_mbuf;
   1637 
   1638 	/*
   1639 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1640 	 * so that the payload after the Ethernet header is aligned
   1641 	 * to a 4-byte boundary.
   1642 
   1643 	 * XXX BRAINDAMAGE ALERT!
   1644 	 * The stupid chip uses the same size for every buffer, which
   1645 	 * is set in the Receive Control register.  We are using the 2K
   1646 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1647 	 * reason, we can't "scoot" packets longer than the standard
   1648 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1649 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1650 	 * the upper layer copy the headers.
   1651 	 */
   1652 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1653 
   1654 	if (sc->sc_type == WM_T_82574) {
   1655 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1656 		rxd->erx_data.erxd_addr =
   1657 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1658 		rxd->erx_data.erxd_dd = 0;
   1659 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1660 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1661 
   1662 		rxd->nqrx_data.nrxd_paddr =
   1663 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1664 		/* Currently, split header is not supported. */
   1665 		rxd->nqrx_data.nrxd_haddr = 0;
   1666 	} else {
   1667 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1668 
   1669 		wm_set_dma_addr(&rxd->wrx_addr,
   1670 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1671 		rxd->wrx_len = 0;
   1672 		rxd->wrx_cksum = 0;
   1673 		rxd->wrx_status = 0;
   1674 		rxd->wrx_errors = 0;
   1675 		rxd->wrx_special = 0;
   1676 	}
   1677 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1678 
   1679 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1680 }
   1681 
   1682 /*
   1683  * Device driver interface functions and commonly used functions.
   1684  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1685  */
   1686 
   1687 /* Lookup supported device table */
   1688 static const struct wm_product *
   1689 wm_lookup(const struct pci_attach_args *pa)
   1690 {
   1691 	const struct wm_product *wmp;
   1692 
   1693 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1694 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1695 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1696 			return wmp;
   1697 	}
   1698 	return NULL;
   1699 }
   1700 
   1701 /* The match function (ca_match) */
   1702 static int
   1703 wm_match(device_t parent, cfdata_t cf, void *aux)
   1704 {
   1705 	struct pci_attach_args *pa = aux;
   1706 
   1707 	if (wm_lookup(pa) != NULL)
   1708 		return 1;
   1709 
   1710 	return 0;
   1711 }
   1712 
   1713 /* The attach function (ca_attach) */
   1714 static void
   1715 wm_attach(device_t parent, device_t self, void *aux)
   1716 {
   1717 	struct wm_softc *sc = device_private(self);
   1718 	struct pci_attach_args *pa = aux;
   1719 	prop_dictionary_t dict;
   1720 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1721 	pci_chipset_tag_t pc = pa->pa_pc;
   1722 	int counts[PCI_INTR_TYPE_SIZE];
   1723 	pci_intr_type_t max_type;
   1724 	const char *eetype, *xname;
   1725 	bus_space_tag_t memt;
   1726 	bus_space_handle_t memh;
   1727 	bus_size_t memsize;
   1728 	int memh_valid;
   1729 	int i, error;
   1730 	const struct wm_product *wmp;
   1731 	prop_data_t ea;
   1732 	prop_number_t pn;
   1733 	uint8_t enaddr[ETHER_ADDR_LEN];
   1734 	char buf[256];
   1735 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1736 	pcireg_t preg, memtype;
   1737 	uint16_t eeprom_data, apme_mask;
   1738 	bool force_clear_smbi;
   1739 	uint32_t link_mode;
   1740 	uint32_t reg;
   1741 
   1742 	sc->sc_dev = self;
   1743 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1744 	sc->sc_core_stopping = false;
   1745 
   1746 	wmp = wm_lookup(pa);
   1747 #ifdef DIAGNOSTIC
   1748 	if (wmp == NULL) {
   1749 		printf("\n");
   1750 		panic("wm_attach: impossible");
   1751 	}
   1752 #endif
   1753 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1754 
   1755 	sc->sc_pc = pa->pa_pc;
   1756 	sc->sc_pcitag = pa->pa_tag;
   1757 
   1758 	if (pci_dma64_available(pa))
   1759 		sc->sc_dmat = pa->pa_dmat64;
   1760 	else
   1761 		sc->sc_dmat = pa->pa_dmat;
   1762 
   1763 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1764 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1765 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1766 
   1767 	sc->sc_type = wmp->wmp_type;
   1768 
   1769 	/* Set default function pointers */
   1770 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1771 	sc->phy.release = sc->nvm.release = wm_put_null;
   1772 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1773 
   1774 	if (sc->sc_type < WM_T_82543) {
   1775 		if (sc->sc_rev < 2) {
   1776 			aprint_error_dev(sc->sc_dev,
   1777 			    "i82542 must be at least rev. 2\n");
   1778 			return;
   1779 		}
   1780 		if (sc->sc_rev < 3)
   1781 			sc->sc_type = WM_T_82542_2_0;
   1782 	}
   1783 
   1784 	/*
   1785 	 * Disable MSI for Errata:
   1786 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1787 	 *
   1788 	 *  82544: Errata 25
   1789 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1790 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1791 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1792 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1793 	 *
   1794 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1795 	 *
   1796 	 *  82571 & 82572: Errata 63
   1797 	 */
   1798 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1799 	    || (sc->sc_type == WM_T_82572))
   1800 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1801 
   1802 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1803 	    || (sc->sc_type == WM_T_82580)
   1804 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1805 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1806 		sc->sc_flags |= WM_F_NEWQUEUE;
   1807 
   1808 	/* Set device properties (mactype) */
   1809 	dict = device_properties(sc->sc_dev);
   1810 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1811 
   1812 	/*
   1813 	 * Map the device.  All devices support memory-mapped acccess,
   1814 	 * and it is really required for normal operation.
   1815 	 */
   1816 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1817 	switch (memtype) {
   1818 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1819 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1820 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1821 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1822 		break;
   1823 	default:
   1824 		memh_valid = 0;
   1825 		break;
   1826 	}
   1827 
   1828 	if (memh_valid) {
   1829 		sc->sc_st = memt;
   1830 		sc->sc_sh = memh;
   1831 		sc->sc_ss = memsize;
   1832 	} else {
   1833 		aprint_error_dev(sc->sc_dev,
   1834 		    "unable to map device registers\n");
   1835 		return;
   1836 	}
   1837 
   1838 	/*
   1839 	 * In addition, i82544 and later support I/O mapped indirect
   1840 	 * register access.  It is not desirable (nor supported in
   1841 	 * this driver) to use it for normal operation, though it is
   1842 	 * required to work around bugs in some chip versions.
   1843 	 */
   1844 	if (sc->sc_type >= WM_T_82544) {
   1845 		/* First we have to find the I/O BAR. */
   1846 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1847 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1848 			if (memtype == PCI_MAPREG_TYPE_IO)
   1849 				break;
   1850 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1851 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1852 				i += 4;	/* skip high bits, too */
   1853 		}
   1854 		if (i < PCI_MAPREG_END) {
   1855 			/*
   1856 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1857 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1858 			 * It's no problem because newer chips has no this
   1859 			 * bug.
   1860 			 *
   1861 			 * The i8254x doesn't apparently respond when the
   1862 			 * I/O BAR is 0, which looks somewhat like it's not
   1863 			 * been configured.
   1864 			 */
   1865 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1866 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1867 				aprint_error_dev(sc->sc_dev,
   1868 				    "WARNING: I/O BAR at zero.\n");
   1869 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1870 					0, &sc->sc_iot, &sc->sc_ioh,
   1871 					NULL, &sc->sc_ios) == 0) {
   1872 				sc->sc_flags |= WM_F_IOH_VALID;
   1873 			} else {
   1874 				aprint_error_dev(sc->sc_dev,
   1875 				    "WARNING: unable to map I/O space\n");
   1876 			}
   1877 		}
   1878 
   1879 	}
   1880 
   1881 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1882 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1883 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1884 	if (sc->sc_type < WM_T_82542_2_1)
   1885 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1886 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1887 
   1888 	/* power up chip */
   1889 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1890 	    && error != EOPNOTSUPP) {
   1891 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1892 		return;
   1893 	}
   1894 
   1895 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1896 	/*
   1897 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1898 	 * resource.
   1899 	 */
   1900 	if (sc->sc_nqueues > 1) {
   1901 		max_type = PCI_INTR_TYPE_MSIX;
   1902 		/*
   1903 		 *  82583 has a MSI-X capability in the PCI configuration space
   1904 		 * but it doesn't support it. At least the document doesn't
   1905 		 * say anything about MSI-X.
   1906 		 */
   1907 		counts[PCI_INTR_TYPE_MSIX]
   1908 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1909 	} else {
   1910 		max_type = PCI_INTR_TYPE_MSI;
   1911 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1912 	}
   1913 
   1914 	/* Allocation settings */
   1915 	counts[PCI_INTR_TYPE_MSI] = 1;
   1916 	counts[PCI_INTR_TYPE_INTX] = 1;
   1917 	/* overridden by disable flags */
   1918 	if (wm_disable_msi != 0) {
   1919 		counts[PCI_INTR_TYPE_MSI] = 0;
   1920 		if (wm_disable_msix != 0) {
   1921 			max_type = PCI_INTR_TYPE_INTX;
   1922 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1923 		}
   1924 	} else if (wm_disable_msix != 0) {
   1925 		max_type = PCI_INTR_TYPE_MSI;
   1926 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1927 	}
   1928 
   1929 alloc_retry:
   1930 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1931 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1932 		return;
   1933 	}
   1934 
   1935 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1936 		error = wm_setup_msix(sc);
   1937 		if (error) {
   1938 			pci_intr_release(pc, sc->sc_intrs,
   1939 			    counts[PCI_INTR_TYPE_MSIX]);
   1940 
   1941 			/* Setup for MSI: Disable MSI-X */
   1942 			max_type = PCI_INTR_TYPE_MSI;
   1943 			counts[PCI_INTR_TYPE_MSI] = 1;
   1944 			counts[PCI_INTR_TYPE_INTX] = 1;
   1945 			goto alloc_retry;
   1946 		}
   1947 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1948 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1949 		error = wm_setup_legacy(sc);
   1950 		if (error) {
   1951 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1952 			    counts[PCI_INTR_TYPE_MSI]);
   1953 
   1954 			/* The next try is for INTx: Disable MSI */
   1955 			max_type = PCI_INTR_TYPE_INTX;
   1956 			counts[PCI_INTR_TYPE_INTX] = 1;
   1957 			goto alloc_retry;
   1958 		}
   1959 	} else {
   1960 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1961 		error = wm_setup_legacy(sc);
   1962 		if (error) {
   1963 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1964 			    counts[PCI_INTR_TYPE_INTX]);
   1965 			return;
   1966 		}
   1967 	}
   1968 
   1969 	/*
   1970 	 * Check the function ID (unit number of the chip).
   1971 	 */
   1972 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1973 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   1974 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1975 	    || (sc->sc_type == WM_T_82580)
   1976 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1977 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1978 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1979 	else
   1980 		sc->sc_funcid = 0;
   1981 
   1982 	/*
   1983 	 * Determine a few things about the bus we're connected to.
   1984 	 */
   1985 	if (sc->sc_type < WM_T_82543) {
   1986 		/* We don't really know the bus characteristics here. */
   1987 		sc->sc_bus_speed = 33;
   1988 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1989 		/*
   1990 		 * CSA (Communication Streaming Architecture) is about as fast
   1991 		 * a 32-bit 66MHz PCI Bus.
   1992 		 */
   1993 		sc->sc_flags |= WM_F_CSA;
   1994 		sc->sc_bus_speed = 66;
   1995 		aprint_verbose_dev(sc->sc_dev,
   1996 		    "Communication Streaming Architecture\n");
   1997 		if (sc->sc_type == WM_T_82547) {
   1998 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1999 			callout_setfunc(&sc->sc_txfifo_ch,
   2000 			    wm_82547_txfifo_stall, sc);
   2001 			aprint_verbose_dev(sc->sc_dev,
   2002 			    "using 82547 Tx FIFO stall work-around\n");
   2003 		}
   2004 	} else if (sc->sc_type >= WM_T_82571) {
   2005 		sc->sc_flags |= WM_F_PCIE;
   2006 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2007 		    && (sc->sc_type != WM_T_ICH10)
   2008 		    && (sc->sc_type != WM_T_PCH)
   2009 		    && (sc->sc_type != WM_T_PCH2)
   2010 		    && (sc->sc_type != WM_T_PCH_LPT)
   2011 		    && (sc->sc_type != WM_T_PCH_SPT)
   2012 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2013 			/* ICH* and PCH* have no PCIe capability registers */
   2014 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2015 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2016 				NULL) == 0)
   2017 				aprint_error_dev(sc->sc_dev,
   2018 				    "unable to find PCIe capability\n");
   2019 		}
   2020 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2021 	} else {
   2022 		reg = CSR_READ(sc, WMREG_STATUS);
   2023 		if (reg & STATUS_BUS64)
   2024 			sc->sc_flags |= WM_F_BUS64;
   2025 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2026 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2027 
   2028 			sc->sc_flags |= WM_F_PCIX;
   2029 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2030 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2031 				aprint_error_dev(sc->sc_dev,
   2032 				    "unable to find PCIX capability\n");
   2033 			else if (sc->sc_type != WM_T_82545_3 &&
   2034 				 sc->sc_type != WM_T_82546_3) {
   2035 				/*
   2036 				 * Work around a problem caused by the BIOS
   2037 				 * setting the max memory read byte count
   2038 				 * incorrectly.
   2039 				 */
   2040 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2041 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2042 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2043 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2044 
   2045 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2046 				    PCIX_CMD_BYTECNT_SHIFT;
   2047 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2048 				    PCIX_STATUS_MAXB_SHIFT;
   2049 				if (bytecnt > maxb) {
   2050 					aprint_verbose_dev(sc->sc_dev,
   2051 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2052 					    512 << bytecnt, 512 << maxb);
   2053 					pcix_cmd = (pcix_cmd &
   2054 					    ~PCIX_CMD_BYTECNT_MASK) |
   2055 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2056 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2057 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2058 					    pcix_cmd);
   2059 				}
   2060 			}
   2061 		}
   2062 		/*
   2063 		 * The quad port adapter is special; it has a PCIX-PCIX
   2064 		 * bridge on the board, and can run the secondary bus at
   2065 		 * a higher speed.
   2066 		 */
   2067 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2068 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2069 								      : 66;
   2070 		} else if (sc->sc_flags & WM_F_PCIX) {
   2071 			switch (reg & STATUS_PCIXSPD_MASK) {
   2072 			case STATUS_PCIXSPD_50_66:
   2073 				sc->sc_bus_speed = 66;
   2074 				break;
   2075 			case STATUS_PCIXSPD_66_100:
   2076 				sc->sc_bus_speed = 100;
   2077 				break;
   2078 			case STATUS_PCIXSPD_100_133:
   2079 				sc->sc_bus_speed = 133;
   2080 				break;
   2081 			default:
   2082 				aprint_error_dev(sc->sc_dev,
   2083 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2084 				    reg & STATUS_PCIXSPD_MASK);
   2085 				sc->sc_bus_speed = 66;
   2086 				break;
   2087 			}
   2088 		} else
   2089 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2090 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2091 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2092 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2093 	}
   2094 
   2095 	/* Disable ASPM L0s and/or L1 for workaround */
   2096 	wm_disable_aspm(sc);
   2097 
   2098 	/* clear interesting stat counters */
   2099 	CSR_READ(sc, WMREG_COLC);
   2100 	CSR_READ(sc, WMREG_RXERRC);
   2101 
   2102 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2103 	    || (sc->sc_type >= WM_T_ICH8))
   2104 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2105 	if (sc->sc_type >= WM_T_ICH8)
   2106 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2107 
   2108 	/* Set PHY, NVM mutex related stuff */
   2109 	switch (sc->sc_type) {
   2110 	case WM_T_82542_2_0:
   2111 	case WM_T_82542_2_1:
   2112 	case WM_T_82543:
   2113 	case WM_T_82544:
   2114 		/* Microwire */
   2115 		sc->nvm.read = wm_nvm_read_uwire;
   2116 		sc->sc_nvm_wordsize = 64;
   2117 		sc->sc_nvm_addrbits = 6;
   2118 		break;
   2119 	case WM_T_82540:
   2120 	case WM_T_82545:
   2121 	case WM_T_82545_3:
   2122 	case WM_T_82546:
   2123 	case WM_T_82546_3:
   2124 		/* Microwire */
   2125 		sc->nvm.read = wm_nvm_read_uwire;
   2126 		reg = CSR_READ(sc, WMREG_EECD);
   2127 		if (reg & EECD_EE_SIZE) {
   2128 			sc->sc_nvm_wordsize = 256;
   2129 			sc->sc_nvm_addrbits = 8;
   2130 		} else {
   2131 			sc->sc_nvm_wordsize = 64;
   2132 			sc->sc_nvm_addrbits = 6;
   2133 		}
   2134 		sc->sc_flags |= WM_F_LOCK_EECD;
   2135 		sc->nvm.acquire = wm_get_eecd;
   2136 		sc->nvm.release = wm_put_eecd;
   2137 		break;
   2138 	case WM_T_82541:
   2139 	case WM_T_82541_2:
   2140 	case WM_T_82547:
   2141 	case WM_T_82547_2:
   2142 		reg = CSR_READ(sc, WMREG_EECD);
   2143 		/*
   2144 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2145 		 * on 8254[17], so set flags and functios before calling it.
   2146 		 */
   2147 		sc->sc_flags |= WM_F_LOCK_EECD;
   2148 		sc->nvm.acquire = wm_get_eecd;
   2149 		sc->nvm.release = wm_put_eecd;
   2150 		if (reg & EECD_EE_TYPE) {
   2151 			/* SPI */
   2152 			sc->nvm.read = wm_nvm_read_spi;
   2153 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2154 			wm_nvm_set_addrbits_size_eecd(sc);
   2155 		} else {
   2156 			/* Microwire */
   2157 			sc->nvm.read = wm_nvm_read_uwire;
   2158 			if ((reg & EECD_EE_ABITS) != 0) {
   2159 				sc->sc_nvm_wordsize = 256;
   2160 				sc->sc_nvm_addrbits = 8;
   2161 			} else {
   2162 				sc->sc_nvm_wordsize = 64;
   2163 				sc->sc_nvm_addrbits = 6;
   2164 			}
   2165 		}
   2166 		break;
   2167 	case WM_T_82571:
   2168 	case WM_T_82572:
   2169 		/* SPI */
   2170 		sc->nvm.read = wm_nvm_read_eerd;
   2171 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2172 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2173 		wm_nvm_set_addrbits_size_eecd(sc);
   2174 		sc->phy.acquire = wm_get_swsm_semaphore;
   2175 		sc->phy.release = wm_put_swsm_semaphore;
   2176 		sc->nvm.acquire = wm_get_nvm_82571;
   2177 		sc->nvm.release = wm_put_nvm_82571;
   2178 		break;
   2179 	case WM_T_82573:
   2180 	case WM_T_82574:
   2181 	case WM_T_82583:
   2182 		sc->nvm.read = wm_nvm_read_eerd;
   2183 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2184 		if (sc->sc_type == WM_T_82573) {
   2185 			sc->phy.acquire = wm_get_swsm_semaphore;
   2186 			sc->phy.release = wm_put_swsm_semaphore;
   2187 			sc->nvm.acquire = wm_get_nvm_82571;
   2188 			sc->nvm.release = wm_put_nvm_82571;
   2189 		} else {
   2190 			/* Both PHY and NVM use the same semaphore. */
   2191 			sc->phy.acquire = sc->nvm.acquire
   2192 			    = wm_get_swfwhw_semaphore;
   2193 			sc->phy.release = sc->nvm.release
   2194 			    = wm_put_swfwhw_semaphore;
   2195 		}
   2196 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2197 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2198 			sc->sc_nvm_wordsize = 2048;
   2199 		} else {
   2200 			/* SPI */
   2201 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2202 			wm_nvm_set_addrbits_size_eecd(sc);
   2203 		}
   2204 		break;
   2205 	case WM_T_82575:
   2206 	case WM_T_82576:
   2207 	case WM_T_82580:
   2208 	case WM_T_I350:
   2209 	case WM_T_I354:
   2210 	case WM_T_80003:
   2211 		/* SPI */
   2212 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2213 		wm_nvm_set_addrbits_size_eecd(sc);
   2214 		if ((sc->sc_type == WM_T_80003)
   2215 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2216 			sc->nvm.read = wm_nvm_read_eerd;
   2217 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2218 		} else {
   2219 			sc->nvm.read = wm_nvm_read_spi;
   2220 			sc->sc_flags |= WM_F_LOCK_EECD;
   2221 		}
   2222 		sc->phy.acquire = wm_get_phy_82575;
   2223 		sc->phy.release = wm_put_phy_82575;
   2224 		sc->nvm.acquire = wm_get_nvm_80003;
   2225 		sc->nvm.release = wm_put_nvm_80003;
   2226 		break;
   2227 	case WM_T_ICH8:
   2228 	case WM_T_ICH9:
   2229 	case WM_T_ICH10:
   2230 	case WM_T_PCH:
   2231 	case WM_T_PCH2:
   2232 	case WM_T_PCH_LPT:
   2233 		sc->nvm.read = wm_nvm_read_ich8;
   2234 		/* FLASH */
   2235 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2236 		sc->sc_nvm_wordsize = 2048;
   2237 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2238 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2239 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2240 			aprint_error_dev(sc->sc_dev,
   2241 			    "can't map FLASH registers\n");
   2242 			goto out;
   2243 		}
   2244 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2245 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2246 		    ICH_FLASH_SECTOR_SIZE;
   2247 		sc->sc_ich8_flash_bank_size =
   2248 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2249 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2250 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2251 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2252 		sc->sc_flashreg_offset = 0;
   2253 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2254 		sc->phy.release = wm_put_swflag_ich8lan;
   2255 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2256 		sc->nvm.release = wm_put_nvm_ich8lan;
   2257 		break;
   2258 	case WM_T_PCH_SPT:
   2259 	case WM_T_PCH_CNP:
   2260 		sc->nvm.read = wm_nvm_read_spt;
   2261 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2262 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2263 		sc->sc_flasht = sc->sc_st;
   2264 		sc->sc_flashh = sc->sc_sh;
   2265 		sc->sc_ich8_flash_base = 0;
   2266 		sc->sc_nvm_wordsize =
   2267 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2268 		    * NVM_SIZE_MULTIPLIER;
   2269 		/* It is size in bytes, we want words */
   2270 		sc->sc_nvm_wordsize /= 2;
   2271 		/* assume 2 banks */
   2272 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2273 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2274 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2275 		sc->phy.release = wm_put_swflag_ich8lan;
   2276 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2277 		sc->nvm.release = wm_put_nvm_ich8lan;
   2278 		break;
   2279 	case WM_T_I210:
   2280 	case WM_T_I211:
   2281 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2282 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2283 		if (wm_nvm_flash_presence_i210(sc)) {
   2284 			sc->nvm.read = wm_nvm_read_eerd;
   2285 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2286 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2287 			wm_nvm_set_addrbits_size_eecd(sc);
   2288 		} else {
   2289 			sc->nvm.read = wm_nvm_read_invm;
   2290 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2291 			sc->sc_nvm_wordsize = INVM_SIZE;
   2292 		}
   2293 		sc->phy.acquire = wm_get_phy_82575;
   2294 		sc->phy.release = wm_put_phy_82575;
   2295 		sc->nvm.acquire = wm_get_nvm_80003;
   2296 		sc->nvm.release = wm_put_nvm_80003;
   2297 		break;
   2298 	default:
   2299 		break;
   2300 	}
   2301 
   2302 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2303 	switch (sc->sc_type) {
   2304 	case WM_T_82571:
   2305 	case WM_T_82572:
   2306 		reg = CSR_READ(sc, WMREG_SWSM2);
   2307 		if ((reg & SWSM2_LOCK) == 0) {
   2308 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2309 			force_clear_smbi = true;
   2310 		} else
   2311 			force_clear_smbi = false;
   2312 		break;
   2313 	case WM_T_82573:
   2314 	case WM_T_82574:
   2315 	case WM_T_82583:
   2316 		force_clear_smbi = true;
   2317 		break;
   2318 	default:
   2319 		force_clear_smbi = false;
   2320 		break;
   2321 	}
   2322 	if (force_clear_smbi) {
   2323 		reg = CSR_READ(sc, WMREG_SWSM);
   2324 		if ((reg & SWSM_SMBI) != 0)
   2325 			aprint_error_dev(sc->sc_dev,
   2326 			    "Please update the Bootagent\n");
   2327 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2328 	}
   2329 
   2330 	/*
   2331 	 * Defer printing the EEPROM type until after verifying the checksum
   2332 	 * This allows the EEPROM type to be printed correctly in the case
   2333 	 * that no EEPROM is attached.
   2334 	 */
   2335 	/*
   2336 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2337 	 * this for later, so we can fail future reads from the EEPROM.
   2338 	 */
   2339 	if (wm_nvm_validate_checksum(sc)) {
   2340 		/*
   2341 		 * Read twice again because some PCI-e parts fail the
   2342 		 * first check due to the link being in sleep state.
   2343 		 */
   2344 		if (wm_nvm_validate_checksum(sc))
   2345 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2346 	}
   2347 
   2348 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2349 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2350 	else {
   2351 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2352 		    sc->sc_nvm_wordsize);
   2353 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2354 			aprint_verbose("iNVM");
   2355 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2356 			aprint_verbose("FLASH(HW)");
   2357 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2358 			aprint_verbose("FLASH");
   2359 		else {
   2360 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2361 				eetype = "SPI";
   2362 			else
   2363 				eetype = "MicroWire";
   2364 			aprint_verbose("(%d address bits) %s EEPROM",
   2365 			    sc->sc_nvm_addrbits, eetype);
   2366 		}
   2367 	}
   2368 	wm_nvm_version(sc);
   2369 	aprint_verbose("\n");
   2370 
   2371 	/*
   2372 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2373 	 * incorrect.
   2374 	 */
   2375 	wm_gmii_setup_phytype(sc, 0, 0);
   2376 
   2377 	/* Reset the chip to a known state. */
   2378 	wm_reset(sc);
   2379 
   2380 	/*
   2381 	 * Check for I21[01] PLL workaround.
   2382 	 *
   2383 	 * Three cases:
   2384 	 * a) Chip is I211.
   2385 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2386 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2387 	 */
   2388 	if (sc->sc_type == WM_T_I211)
   2389 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2390 	if (sc->sc_type == WM_T_I210) {
   2391 		if (!wm_nvm_flash_presence_i210(sc))
   2392 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2393 		else if ((sc->sc_nvm_ver_major < 3)
   2394 		    || ((sc->sc_nvm_ver_major == 3)
   2395 			&& (sc->sc_nvm_ver_minor < 25))) {
   2396 			aprint_verbose_dev(sc->sc_dev,
   2397 			    "ROM image version %d.%d is older than 3.25\n",
   2398 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2399 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2400 		}
   2401 	}
   2402 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2403 		wm_pll_workaround_i210(sc);
   2404 
   2405 	wm_get_wakeup(sc);
   2406 
   2407 	/* Non-AMT based hardware can now take control from firmware */
   2408 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2409 		wm_get_hw_control(sc);
   2410 
   2411 	/*
   2412 	 * Read the Ethernet address from the EEPROM, if not first found
   2413 	 * in device properties.
   2414 	 */
   2415 	ea = prop_dictionary_get(dict, "mac-address");
   2416 	if (ea != NULL) {
   2417 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2418 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2419 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2420 	} else {
   2421 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2422 			aprint_error_dev(sc->sc_dev,
   2423 			    "unable to read Ethernet address\n");
   2424 			goto out;
   2425 		}
   2426 	}
   2427 
   2428 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2429 	    ether_sprintf(enaddr));
   2430 
   2431 	/*
   2432 	 * Read the config info from the EEPROM, and set up various
   2433 	 * bits in the control registers based on their contents.
   2434 	 */
   2435 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2436 	if (pn != NULL) {
   2437 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2438 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2439 	} else {
   2440 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2441 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2442 			goto out;
   2443 		}
   2444 	}
   2445 
   2446 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2447 	if (pn != NULL) {
   2448 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2449 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2450 	} else {
   2451 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2452 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2453 			goto out;
   2454 		}
   2455 	}
   2456 
   2457 	/* check for WM_F_WOL */
   2458 	switch (sc->sc_type) {
   2459 	case WM_T_82542_2_0:
   2460 	case WM_T_82542_2_1:
   2461 	case WM_T_82543:
   2462 		/* dummy? */
   2463 		eeprom_data = 0;
   2464 		apme_mask = NVM_CFG3_APME;
   2465 		break;
   2466 	case WM_T_82544:
   2467 		apme_mask = NVM_CFG2_82544_APM_EN;
   2468 		eeprom_data = cfg2;
   2469 		break;
   2470 	case WM_T_82546:
   2471 	case WM_T_82546_3:
   2472 	case WM_T_82571:
   2473 	case WM_T_82572:
   2474 	case WM_T_82573:
   2475 	case WM_T_82574:
   2476 	case WM_T_82583:
   2477 	case WM_T_80003:
   2478 	default:
   2479 		apme_mask = NVM_CFG3_APME;
   2480 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2481 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2482 		break;
   2483 	case WM_T_82575:
   2484 	case WM_T_82576:
   2485 	case WM_T_82580:
   2486 	case WM_T_I350:
   2487 	case WM_T_I354: /* XXX ok? */
   2488 	case WM_T_ICH8:
   2489 	case WM_T_ICH9:
   2490 	case WM_T_ICH10:
   2491 	case WM_T_PCH:
   2492 	case WM_T_PCH2:
   2493 	case WM_T_PCH_LPT:
   2494 	case WM_T_PCH_SPT:
   2495 	case WM_T_PCH_CNP:
   2496 		/* XXX The funcid should be checked on some devices */
   2497 		apme_mask = WUC_APME;
   2498 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2499 		break;
   2500 	}
   2501 
   2502 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2503 	if ((eeprom_data & apme_mask) != 0)
   2504 		sc->sc_flags |= WM_F_WOL;
   2505 
   2506 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2507 		/* Check NVM for autonegotiation */
   2508 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2509 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2510 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2511 		}
   2512 	}
   2513 
   2514 	/*
   2515 	 * XXX need special handling for some multiple port cards
   2516 	 * to disable a paticular port.
   2517 	 */
   2518 
   2519 	if (sc->sc_type >= WM_T_82544) {
   2520 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2521 		if (pn != NULL) {
   2522 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2523 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2524 		} else {
   2525 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2526 				aprint_error_dev(sc->sc_dev,
   2527 				    "unable to read SWDPIN\n");
   2528 				goto out;
   2529 			}
   2530 		}
   2531 	}
   2532 
   2533 	if (cfg1 & NVM_CFG1_ILOS)
   2534 		sc->sc_ctrl |= CTRL_ILOS;
   2535 
   2536 	/*
   2537 	 * XXX
   2538 	 * This code isn't correct because pin 2 and 3 are located
   2539 	 * in different position on newer chips. Check all datasheet.
   2540 	 *
   2541 	 * Until resolve this problem, check if a chip < 82580
   2542 	 */
   2543 	if (sc->sc_type <= WM_T_82580) {
   2544 		if (sc->sc_type >= WM_T_82544) {
   2545 			sc->sc_ctrl |=
   2546 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2547 			    CTRL_SWDPIO_SHIFT;
   2548 			sc->sc_ctrl |=
   2549 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2550 			    CTRL_SWDPINS_SHIFT;
   2551 		} else {
   2552 			sc->sc_ctrl |=
   2553 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2554 			    CTRL_SWDPIO_SHIFT;
   2555 		}
   2556 	}
   2557 
   2558 	/* XXX For other than 82580? */
   2559 	if (sc->sc_type == WM_T_82580) {
   2560 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2561 		if (nvmword & __BIT(13))
   2562 			sc->sc_ctrl |= CTRL_ILOS;
   2563 	}
   2564 
   2565 #if 0
   2566 	if (sc->sc_type >= WM_T_82544) {
   2567 		if (cfg1 & NVM_CFG1_IPS0)
   2568 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2569 		if (cfg1 & NVM_CFG1_IPS1)
   2570 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2571 		sc->sc_ctrl_ext |=
   2572 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2573 		    CTRL_EXT_SWDPIO_SHIFT;
   2574 		sc->sc_ctrl_ext |=
   2575 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2576 		    CTRL_EXT_SWDPINS_SHIFT;
   2577 	} else {
   2578 		sc->sc_ctrl_ext |=
   2579 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2580 		    CTRL_EXT_SWDPIO_SHIFT;
   2581 	}
   2582 #endif
   2583 
   2584 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2585 #if 0
   2586 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2587 #endif
   2588 
   2589 	if (sc->sc_type == WM_T_PCH) {
   2590 		uint16_t val;
   2591 
   2592 		/* Save the NVM K1 bit setting */
   2593 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2594 
   2595 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2596 			sc->sc_nvm_k1_enabled = 1;
   2597 		else
   2598 			sc->sc_nvm_k1_enabled = 0;
   2599 	}
   2600 
   2601 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2602 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2603 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2604 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2605 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2606 	    || sc->sc_type == WM_T_82573
   2607 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2608 		/* Copper only */
   2609 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2610 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2611 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2612 	    || (sc->sc_type ==WM_T_I211)) {
   2613 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2614 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2615 		switch (link_mode) {
   2616 		case CTRL_EXT_LINK_MODE_1000KX:
   2617 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2618 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2619 			break;
   2620 		case CTRL_EXT_LINK_MODE_SGMII:
   2621 			if (wm_sgmii_uses_mdio(sc)) {
   2622 				aprint_verbose_dev(sc->sc_dev,
   2623 				    "SGMII(MDIO)\n");
   2624 				sc->sc_flags |= WM_F_SGMII;
   2625 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2626 				break;
   2627 			}
   2628 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2629 			/*FALLTHROUGH*/
   2630 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2631 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2632 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2633 				if (link_mode
   2634 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2635 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2636 					sc->sc_flags |= WM_F_SGMII;
   2637 				} else {
   2638 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2639 					aprint_verbose_dev(sc->sc_dev,
   2640 					    "SERDES\n");
   2641 				}
   2642 				break;
   2643 			}
   2644 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2645 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2646 
   2647 			/* Change current link mode setting */
   2648 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2649 			switch (sc->sc_mediatype) {
   2650 			case WM_MEDIATYPE_COPPER:
   2651 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2652 				break;
   2653 			case WM_MEDIATYPE_SERDES:
   2654 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2655 				break;
   2656 			default:
   2657 				break;
   2658 			}
   2659 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2660 			break;
   2661 		case CTRL_EXT_LINK_MODE_GMII:
   2662 		default:
   2663 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2664 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2665 			break;
   2666 		}
   2667 
   2668 		reg &= ~CTRL_EXT_I2C_ENA;
   2669 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2670 			reg |= CTRL_EXT_I2C_ENA;
   2671 		else
   2672 			reg &= ~CTRL_EXT_I2C_ENA;
   2673 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2674 	} else if (sc->sc_type < WM_T_82543 ||
   2675 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2676 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2677 			aprint_error_dev(sc->sc_dev,
   2678 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2679 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2680 		}
   2681 	} else {
   2682 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2683 			aprint_error_dev(sc->sc_dev,
   2684 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2685 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2686 		}
   2687 	}
   2688 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2689 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2690 
   2691 	/* Set device properties (macflags) */
   2692 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2693 
   2694 	/* Initialize the media structures accordingly. */
   2695 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2696 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2697 	else
   2698 		wm_tbi_mediainit(sc); /* All others */
   2699 
   2700 	ifp = &sc->sc_ethercom.ec_if;
   2701 	xname = device_xname(sc->sc_dev);
   2702 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2703 	ifp->if_softc = sc;
   2704 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2705 #ifdef WM_MPSAFE
   2706 	ifp->if_extflags = IFEF_MPSAFE;
   2707 #endif
   2708 	ifp->if_ioctl = wm_ioctl;
   2709 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2710 		ifp->if_start = wm_nq_start;
   2711 		/*
   2712 		 * When the number of CPUs is one and the controller can use
   2713 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2714 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2715 		 * and the other is used for link status changing.
   2716 		 * In this situation, wm_nq_transmit() is disadvantageous
   2717 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2718 		 */
   2719 		if (wm_is_using_multiqueue(sc))
   2720 			ifp->if_transmit = wm_nq_transmit;
   2721 	} else {
   2722 		ifp->if_start = wm_start;
   2723 		/*
   2724 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2725 		 */
   2726 		if (wm_is_using_multiqueue(sc))
   2727 			ifp->if_transmit = wm_transmit;
   2728 	}
   2729 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2730 	ifp->if_init = wm_init;
   2731 	ifp->if_stop = wm_stop;
   2732 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2733 	IFQ_SET_READY(&ifp->if_snd);
   2734 
   2735 	/* Check for jumbo frame */
   2736 	switch (sc->sc_type) {
   2737 	case WM_T_82573:
   2738 		/* XXX limited to 9234 if ASPM is disabled */
   2739 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2740 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2741 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2742 		break;
   2743 	case WM_T_82571:
   2744 	case WM_T_82572:
   2745 	case WM_T_82574:
   2746 	case WM_T_82583:
   2747 	case WM_T_82575:
   2748 	case WM_T_82576:
   2749 	case WM_T_82580:
   2750 	case WM_T_I350:
   2751 	case WM_T_I354:
   2752 	case WM_T_I210:
   2753 	case WM_T_I211:
   2754 	case WM_T_80003:
   2755 	case WM_T_ICH9:
   2756 	case WM_T_ICH10:
   2757 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2758 	case WM_T_PCH_LPT:
   2759 	case WM_T_PCH_SPT:
   2760 	case WM_T_PCH_CNP:
   2761 		/* XXX limited to 9234 */
   2762 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2763 		break;
   2764 	case WM_T_PCH:
   2765 		/* XXX limited to 4096 */
   2766 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2767 		break;
   2768 	case WM_T_82542_2_0:
   2769 	case WM_T_82542_2_1:
   2770 	case WM_T_ICH8:
   2771 		/* No support for jumbo frame */
   2772 		break;
   2773 	default:
   2774 		/* ETHER_MAX_LEN_JUMBO */
   2775 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2776 		break;
   2777 	}
   2778 
   2779 	/* If we're a i82543 or greater, we can support VLANs. */
   2780 	if (sc->sc_type >= WM_T_82543)
   2781 		sc->sc_ethercom.ec_capabilities |=
   2782 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2783 
   2784 	/*
   2785 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2786 	 * on i82543 and later.
   2787 	 */
   2788 	if (sc->sc_type >= WM_T_82543) {
   2789 		ifp->if_capabilities |=
   2790 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2791 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2792 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2793 		    IFCAP_CSUM_TCPv6_Tx |
   2794 		    IFCAP_CSUM_UDPv6_Tx;
   2795 	}
   2796 
   2797 	/*
   2798 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2799 	 *
   2800 	 *	82541GI (8086:1076) ... no
   2801 	 *	82572EI (8086:10b9) ... yes
   2802 	 */
   2803 	if (sc->sc_type >= WM_T_82571) {
   2804 		ifp->if_capabilities |=
   2805 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2806 	}
   2807 
   2808 	/*
   2809 	 * If we're a i82544 or greater (except i82547), we can do
   2810 	 * TCP segmentation offload.
   2811 	 */
   2812 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2813 		ifp->if_capabilities |= IFCAP_TSOv4;
   2814 	}
   2815 
   2816 	if (sc->sc_type >= WM_T_82571) {
   2817 		ifp->if_capabilities |= IFCAP_TSOv6;
   2818 	}
   2819 
   2820 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2821 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2822 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2823 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2824 
   2825 #ifdef WM_MPSAFE
   2826 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2827 #else
   2828 	sc->sc_core_lock = NULL;
   2829 #endif
   2830 
   2831 	/* Attach the interface. */
   2832 	error = if_initialize(ifp);
   2833 	if (error != 0) {
   2834 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2835 		    error);
   2836 		return; /* Error */
   2837 	}
   2838 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2839 	ether_ifattach(ifp, enaddr);
   2840 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2841 	if_register(ifp);
   2842 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2843 	    RND_FLAG_DEFAULT);
   2844 
   2845 #ifdef WM_EVENT_COUNTERS
   2846 	/* Attach event counters. */
   2847 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2848 	    NULL, xname, "linkintr");
   2849 
   2850 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2851 	    NULL, xname, "tx_xoff");
   2852 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2853 	    NULL, xname, "tx_xon");
   2854 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2855 	    NULL, xname, "rx_xoff");
   2856 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2857 	    NULL, xname, "rx_xon");
   2858 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2859 	    NULL, xname, "rx_macctl");
   2860 #endif /* WM_EVENT_COUNTERS */
   2861 
   2862 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2863 		pmf_class_network_register(self, ifp);
   2864 	else
   2865 		aprint_error_dev(self, "couldn't establish power handler\n");
   2866 
   2867 	sc->sc_flags |= WM_F_ATTACHED;
   2868  out:
   2869 	return;
   2870 }
   2871 
   2872 /* The detach function (ca_detach) */
   2873 static int
   2874 wm_detach(device_t self, int flags __unused)
   2875 {
   2876 	struct wm_softc *sc = device_private(self);
   2877 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2878 	int i;
   2879 
   2880 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2881 		return 0;
   2882 
   2883 	/* Stop the interface. Callouts are stopped in it. */
   2884 	wm_stop(ifp, 1);
   2885 
   2886 	pmf_device_deregister(self);
   2887 
   2888 #ifdef WM_EVENT_COUNTERS
   2889 	evcnt_detach(&sc->sc_ev_linkintr);
   2890 
   2891 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2892 	evcnt_detach(&sc->sc_ev_tx_xon);
   2893 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2894 	evcnt_detach(&sc->sc_ev_rx_xon);
   2895 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2896 #endif /* WM_EVENT_COUNTERS */
   2897 
   2898 	/* Tell the firmware about the release */
   2899 	WM_CORE_LOCK(sc);
   2900 	wm_release_manageability(sc);
   2901 	wm_release_hw_control(sc);
   2902 	wm_enable_wakeup(sc);
   2903 	WM_CORE_UNLOCK(sc);
   2904 
   2905 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2906 
   2907 	/* Delete all remaining media. */
   2908 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2909 
   2910 	ether_ifdetach(ifp);
   2911 	if_detach(ifp);
   2912 	if_percpuq_destroy(sc->sc_ipq);
   2913 
   2914 	/* Unload RX dmamaps and free mbufs */
   2915 	for (i = 0; i < sc->sc_nqueues; i++) {
   2916 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2917 		mutex_enter(rxq->rxq_lock);
   2918 		wm_rxdrain(rxq);
   2919 		mutex_exit(rxq->rxq_lock);
   2920 	}
   2921 	/* Must unlock here */
   2922 
   2923 	/* Disestablish the interrupt handler */
   2924 	for (i = 0; i < sc->sc_nintrs; i++) {
   2925 		if (sc->sc_ihs[i] != NULL) {
   2926 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2927 			sc->sc_ihs[i] = NULL;
   2928 		}
   2929 	}
   2930 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2931 
   2932 	wm_free_txrx_queues(sc);
   2933 
   2934 	/* Unmap the registers */
   2935 	if (sc->sc_ss) {
   2936 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2937 		sc->sc_ss = 0;
   2938 	}
   2939 	if (sc->sc_ios) {
   2940 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2941 		sc->sc_ios = 0;
   2942 	}
   2943 	if (sc->sc_flashs) {
   2944 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2945 		sc->sc_flashs = 0;
   2946 	}
   2947 
   2948 	if (sc->sc_core_lock)
   2949 		mutex_obj_free(sc->sc_core_lock);
   2950 	if (sc->sc_ich_phymtx)
   2951 		mutex_obj_free(sc->sc_ich_phymtx);
   2952 	if (sc->sc_ich_nvmmtx)
   2953 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2954 
   2955 	return 0;
   2956 }
   2957 
   2958 static bool
   2959 wm_suspend(device_t self, const pmf_qual_t *qual)
   2960 {
   2961 	struct wm_softc *sc = device_private(self);
   2962 
   2963 	wm_release_manageability(sc);
   2964 	wm_release_hw_control(sc);
   2965 	wm_enable_wakeup(sc);
   2966 
   2967 	return true;
   2968 }
   2969 
   2970 static bool
   2971 wm_resume(device_t self, const pmf_qual_t *qual)
   2972 {
   2973 	struct wm_softc *sc = device_private(self);
   2974 
   2975 	/* Disable ASPM L0s and/or L1 for workaround */
   2976 	wm_disable_aspm(sc);
   2977 	wm_init_manageability(sc);
   2978 
   2979 	return true;
   2980 }
   2981 
   2982 /*
   2983  * wm_watchdog:		[ifnet interface function]
   2984  *
   2985  *	Watchdog timer handler.
   2986  */
   2987 static void
   2988 wm_watchdog(struct ifnet *ifp)
   2989 {
   2990 	int qid;
   2991 	struct wm_softc *sc = ifp->if_softc;
   2992 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   2993 
   2994 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2995 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2996 
   2997 		wm_watchdog_txq(ifp, txq, &hang_queue);
   2998 	}
   2999 
   3000 	/*
   3001 	 * IF any of queues hanged up, reset the interface.
   3002 	 */
   3003 	if (hang_queue != 0) {
   3004 		(void) wm_init(ifp);
   3005 
   3006 		/*
   3007 		 * There are still some upper layer processing which call
   3008 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3009 		 */
   3010 		/* Try to get more packets going. */
   3011 		ifp->if_start(ifp);
   3012 	}
   3013 }
   3014 
   3015 
   3016 static void
   3017 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3018 {
   3019 
   3020 	mutex_enter(txq->txq_lock);
   3021 	if (txq->txq_sending &&
   3022 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
   3023 		wm_watchdog_txq_locked(ifp, txq, hang);
   3024 	}
   3025 	mutex_exit(txq->txq_lock);
   3026 }
   3027 
   3028 static void
   3029 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3030     uint16_t *hang)
   3031 {
   3032 	struct wm_softc *sc = ifp->if_softc;
   3033 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3034 
   3035 	KASSERT(mutex_owned(txq->txq_lock));
   3036 
   3037 	/*
   3038 	 * Since we're using delayed interrupts, sweep up
   3039 	 * before we report an error.
   3040 	 */
   3041 	wm_txeof(txq, UINT_MAX);
   3042 
   3043 	if (txq->txq_sending)
   3044 		*hang |= __BIT(wmq->wmq_id);
   3045 
   3046 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3047 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3048 		    device_xname(sc->sc_dev));
   3049 	} else {
   3050 #ifdef WM_DEBUG
   3051 		int i, j;
   3052 		struct wm_txsoft *txs;
   3053 #endif
   3054 		log(LOG_ERR,
   3055 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3056 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3057 		    txq->txq_next);
   3058 		ifp->if_oerrors++;
   3059 #ifdef WM_DEBUG
   3060 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3061 		    i = WM_NEXTTXS(txq, i)) {
   3062 		    txs = &txq->txq_soft[i];
   3063 		    printf("txs %d tx %d -> %d\n",
   3064 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3065 		    for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3066 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3067 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3068 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3069 				    printf("\t %#08x%08x\n",
   3070 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3071 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3072 			    } else {
   3073 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3074 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3075 					txq->txq_descs[j].wtx_addr.wa_low);
   3076 				    printf("\t %#04x%02x%02x%08x\n",
   3077 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3078 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3079 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3080 					txq->txq_descs[j].wtx_cmdlen);
   3081 			    }
   3082 			if (j == txs->txs_lastdesc)
   3083 				break;
   3084 			}
   3085 		}
   3086 #endif
   3087 	}
   3088 }
   3089 
   3090 /*
   3091  * wm_tick:
   3092  *
   3093  *	One second timer, used to check link status, sweep up
   3094  *	completed transmit jobs, etc.
   3095  */
   3096 static void
   3097 wm_tick(void *arg)
   3098 {
   3099 	struct wm_softc *sc = arg;
   3100 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3101 #ifndef WM_MPSAFE
   3102 	int s = splnet();
   3103 #endif
   3104 
   3105 	WM_CORE_LOCK(sc);
   3106 
   3107 	if (sc->sc_core_stopping) {
   3108 		WM_CORE_UNLOCK(sc);
   3109 #ifndef WM_MPSAFE
   3110 		splx(s);
   3111 #endif
   3112 		return;
   3113 	}
   3114 
   3115 	if (sc->sc_type >= WM_T_82542_2_1) {
   3116 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3117 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3118 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3119 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3120 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3121 	}
   3122 
   3123 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3124 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3125 	    + CSR_READ(sc, WMREG_CRCERRS)
   3126 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3127 	    + CSR_READ(sc, WMREG_SYMERRC)
   3128 	    + CSR_READ(sc, WMREG_RXERRC)
   3129 	    + CSR_READ(sc, WMREG_SEC)
   3130 	    + CSR_READ(sc, WMREG_CEXTERR)
   3131 	    + CSR_READ(sc, WMREG_RLEC);
   3132 	/*
   3133 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3134 	 * memory. It does not mean the number of dropped packet. Because
   3135 	 * ethernet controller can receive packets in such case if there is
   3136 	 * space in phy's FIFO.
   3137 	 *
   3138 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3139 	 * own EVCNT instead of if_iqdrops.
   3140 	 */
   3141 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3142 
   3143 	if (sc->sc_flags & WM_F_HAS_MII)
   3144 		mii_tick(&sc->sc_mii);
   3145 	else if ((sc->sc_type >= WM_T_82575)
   3146 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3147 		wm_serdes_tick(sc);
   3148 	else
   3149 		wm_tbi_tick(sc);
   3150 
   3151 	WM_CORE_UNLOCK(sc);
   3152 
   3153 	wm_watchdog(ifp);
   3154 
   3155 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3156 }
   3157 
   3158 static int
   3159 wm_ifflags_cb(struct ethercom *ec)
   3160 {
   3161 	struct ifnet *ifp = &ec->ec_if;
   3162 	struct wm_softc *sc = ifp->if_softc;
   3163 	int rc = 0;
   3164 
   3165 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3166 		device_xname(sc->sc_dev), __func__));
   3167 
   3168 	WM_CORE_LOCK(sc);
   3169 
   3170 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3171 	sc->sc_if_flags = ifp->if_flags;
   3172 
   3173 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3174 		rc = ENETRESET;
   3175 		goto out;
   3176 	}
   3177 
   3178 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3179 		wm_set_filter(sc);
   3180 
   3181 	wm_set_vlan(sc);
   3182 
   3183 out:
   3184 	WM_CORE_UNLOCK(sc);
   3185 
   3186 	return rc;
   3187 }
   3188 
   3189 /*
   3190  * wm_ioctl:		[ifnet interface function]
   3191  *
   3192  *	Handle control requests from the operator.
   3193  */
   3194 static int
   3195 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3196 {
   3197 	struct wm_softc *sc = ifp->if_softc;
   3198 	struct ifreq *ifr = (struct ifreq *) data;
   3199 	struct ifaddr *ifa = (struct ifaddr *)data;
   3200 	struct sockaddr_dl *sdl;
   3201 	int s, error;
   3202 
   3203 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3204 		device_xname(sc->sc_dev), __func__));
   3205 
   3206 #ifndef WM_MPSAFE
   3207 	s = splnet();
   3208 #endif
   3209 	switch (cmd) {
   3210 	case SIOCSIFMEDIA:
   3211 	case SIOCGIFMEDIA:
   3212 		WM_CORE_LOCK(sc);
   3213 		/* Flow control requires full-duplex mode. */
   3214 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3215 		    (ifr->ifr_media & IFM_FDX) == 0)
   3216 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3217 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3218 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3219 				/* We can do both TXPAUSE and RXPAUSE. */
   3220 				ifr->ifr_media |=
   3221 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3222 			}
   3223 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3224 		}
   3225 		WM_CORE_UNLOCK(sc);
   3226 #ifdef WM_MPSAFE
   3227 		s = splnet();
   3228 #endif
   3229 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3230 #ifdef WM_MPSAFE
   3231 		splx(s);
   3232 #endif
   3233 		break;
   3234 	case SIOCINITIFADDR:
   3235 		WM_CORE_LOCK(sc);
   3236 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3237 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3238 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3239 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3240 			/* unicast address is first multicast entry */
   3241 			wm_set_filter(sc);
   3242 			error = 0;
   3243 			WM_CORE_UNLOCK(sc);
   3244 			break;
   3245 		}
   3246 		WM_CORE_UNLOCK(sc);
   3247 		/*FALLTHROUGH*/
   3248 	default:
   3249 #ifdef WM_MPSAFE
   3250 		s = splnet();
   3251 #endif
   3252 		/* It may call wm_start, so unlock here */
   3253 		error = ether_ioctl(ifp, cmd, data);
   3254 #ifdef WM_MPSAFE
   3255 		splx(s);
   3256 #endif
   3257 		if (error != ENETRESET)
   3258 			break;
   3259 
   3260 		error = 0;
   3261 
   3262 		if (cmd == SIOCSIFCAP) {
   3263 			error = (*ifp->if_init)(ifp);
   3264 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3265 			;
   3266 		else if (ifp->if_flags & IFF_RUNNING) {
   3267 			/*
   3268 			 * Multicast list has changed; set the hardware filter
   3269 			 * accordingly.
   3270 			 */
   3271 			WM_CORE_LOCK(sc);
   3272 			wm_set_filter(sc);
   3273 			WM_CORE_UNLOCK(sc);
   3274 		}
   3275 		break;
   3276 	}
   3277 
   3278 #ifndef WM_MPSAFE
   3279 	splx(s);
   3280 #endif
   3281 	return error;
   3282 }
   3283 
   3284 /* MAC address related */
   3285 
   3286 /*
   3287  * Get the offset of MAC address and return it.
   3288  * If error occured, use offset 0.
   3289  */
   3290 static uint16_t
   3291 wm_check_alt_mac_addr(struct wm_softc *sc)
   3292 {
   3293 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3294 	uint16_t offset = NVM_OFF_MACADDR;
   3295 
   3296 	/* Try to read alternative MAC address pointer */
   3297 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3298 		return 0;
   3299 
   3300 	/* Check pointer if it's valid or not. */
   3301 	if ((offset == 0x0000) || (offset == 0xffff))
   3302 		return 0;
   3303 
   3304 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3305 	/*
   3306 	 * Check whether alternative MAC address is valid or not.
   3307 	 * Some cards have non 0xffff pointer but those don't use
   3308 	 * alternative MAC address in reality.
   3309 	 *
   3310 	 * Check whether the broadcast bit is set or not.
   3311 	 */
   3312 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3313 		if (((myea[0] & 0xff) & 0x01) == 0)
   3314 			return offset; /* Found */
   3315 
   3316 	/* Not found */
   3317 	return 0;
   3318 }
   3319 
   3320 static int
   3321 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3322 {
   3323 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3324 	uint16_t offset = NVM_OFF_MACADDR;
   3325 	int do_invert = 0;
   3326 
   3327 	switch (sc->sc_type) {
   3328 	case WM_T_82580:
   3329 	case WM_T_I350:
   3330 	case WM_T_I354:
   3331 		/* EEPROM Top Level Partitioning */
   3332 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3333 		break;
   3334 	case WM_T_82571:
   3335 	case WM_T_82575:
   3336 	case WM_T_82576:
   3337 	case WM_T_80003:
   3338 	case WM_T_I210:
   3339 	case WM_T_I211:
   3340 		offset = wm_check_alt_mac_addr(sc);
   3341 		if (offset == 0)
   3342 			if ((sc->sc_funcid & 0x01) == 1)
   3343 				do_invert = 1;
   3344 		break;
   3345 	default:
   3346 		if ((sc->sc_funcid & 0x01) == 1)
   3347 			do_invert = 1;
   3348 		break;
   3349 	}
   3350 
   3351 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3352 		goto bad;
   3353 
   3354 	enaddr[0] = myea[0] & 0xff;
   3355 	enaddr[1] = myea[0] >> 8;
   3356 	enaddr[2] = myea[1] & 0xff;
   3357 	enaddr[3] = myea[1] >> 8;
   3358 	enaddr[4] = myea[2] & 0xff;
   3359 	enaddr[5] = myea[2] >> 8;
   3360 
   3361 	/*
   3362 	 * Toggle the LSB of the MAC address on the second port
   3363 	 * of some dual port cards.
   3364 	 */
   3365 	if (do_invert != 0)
   3366 		enaddr[5] ^= 1;
   3367 
   3368 	return 0;
   3369 
   3370  bad:
   3371 	return -1;
   3372 }
   3373 
   3374 /*
   3375  * wm_set_ral:
   3376  *
   3377  *	Set an entery in the receive address list.
   3378  */
   3379 static void
   3380 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3381 {
   3382 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3383 	uint32_t wlock_mac;
   3384 	int rv;
   3385 
   3386 	if (enaddr != NULL) {
   3387 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3388 		    (enaddr[3] << 24);
   3389 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3390 		ral_hi |= RAL_AV;
   3391 	} else {
   3392 		ral_lo = 0;
   3393 		ral_hi = 0;
   3394 	}
   3395 
   3396 	switch (sc->sc_type) {
   3397 	case WM_T_82542_2_0:
   3398 	case WM_T_82542_2_1:
   3399 	case WM_T_82543:
   3400 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3401 		CSR_WRITE_FLUSH(sc);
   3402 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3403 		CSR_WRITE_FLUSH(sc);
   3404 		break;
   3405 	case WM_T_PCH2:
   3406 	case WM_T_PCH_LPT:
   3407 	case WM_T_PCH_SPT:
   3408 	case WM_T_PCH_CNP:
   3409 		if (idx == 0) {
   3410 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3411 			CSR_WRITE_FLUSH(sc);
   3412 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3413 			CSR_WRITE_FLUSH(sc);
   3414 			return;
   3415 		}
   3416 		if (sc->sc_type != WM_T_PCH2) {
   3417 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3418 			    FWSM_WLOCK_MAC);
   3419 			addrl = WMREG_SHRAL(idx - 1);
   3420 			addrh = WMREG_SHRAH(idx - 1);
   3421 		} else {
   3422 			wlock_mac = 0;
   3423 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3424 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3425 		}
   3426 
   3427 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3428 			rv = wm_get_swflag_ich8lan(sc);
   3429 			if (rv != 0)
   3430 				return;
   3431 			CSR_WRITE(sc, addrl, ral_lo);
   3432 			CSR_WRITE_FLUSH(sc);
   3433 			CSR_WRITE(sc, addrh, ral_hi);
   3434 			CSR_WRITE_FLUSH(sc);
   3435 			wm_put_swflag_ich8lan(sc);
   3436 		}
   3437 
   3438 		break;
   3439 	default:
   3440 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3441 		CSR_WRITE_FLUSH(sc);
   3442 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3443 		CSR_WRITE_FLUSH(sc);
   3444 		break;
   3445 	}
   3446 }
   3447 
   3448 /*
   3449  * wm_mchash:
   3450  *
   3451  *	Compute the hash of the multicast address for the 4096-bit
   3452  *	multicast filter.
   3453  */
   3454 static uint32_t
   3455 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3456 {
   3457 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3458 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3459 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3460 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3461 	uint32_t hash;
   3462 
   3463 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3464 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3465 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3466 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3467 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3468 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3469 		return (hash & 0x3ff);
   3470 	}
   3471 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3472 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3473 
   3474 	return (hash & 0xfff);
   3475 }
   3476 
   3477 /*
   3478  * wm_set_filter:
   3479  *
   3480  *	Set up the receive filter.
   3481  */
   3482 static void
   3483 wm_set_filter(struct wm_softc *sc)
   3484 {
   3485 	struct ethercom *ec = &sc->sc_ethercom;
   3486 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3487 	struct ether_multi *enm;
   3488 	struct ether_multistep step;
   3489 	bus_addr_t mta_reg;
   3490 	uint32_t hash, reg, bit;
   3491 	int i, size, ralmax;
   3492 
   3493 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3494 		device_xname(sc->sc_dev), __func__));
   3495 
   3496 	if (sc->sc_type >= WM_T_82544)
   3497 		mta_reg = WMREG_CORDOVA_MTA;
   3498 	else
   3499 		mta_reg = WMREG_MTA;
   3500 
   3501 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3502 
   3503 	if (ifp->if_flags & IFF_BROADCAST)
   3504 		sc->sc_rctl |= RCTL_BAM;
   3505 	if (ifp->if_flags & IFF_PROMISC) {
   3506 		sc->sc_rctl |= RCTL_UPE;
   3507 		goto allmulti;
   3508 	}
   3509 
   3510 	/*
   3511 	 * Set the station address in the first RAL slot, and
   3512 	 * clear the remaining slots.
   3513 	 */
   3514 	if (sc->sc_type == WM_T_ICH8)
   3515 		size = WM_RAL_TABSIZE_ICH8 -1;
   3516 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3517 	    || (sc->sc_type == WM_T_PCH))
   3518 		size = WM_RAL_TABSIZE_ICH8;
   3519 	else if (sc->sc_type == WM_T_PCH2)
   3520 		size = WM_RAL_TABSIZE_PCH2;
   3521 	else if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3522 	    || (sc->sc_type == WM_T_PCH_CNP))
   3523 		size = WM_RAL_TABSIZE_PCH_LPT;
   3524 	else if (sc->sc_type == WM_T_82575)
   3525 		size = WM_RAL_TABSIZE_82575;
   3526 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3527 		size = WM_RAL_TABSIZE_82576;
   3528 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3529 		size = WM_RAL_TABSIZE_I350;
   3530 	else
   3531 		size = WM_RAL_TABSIZE;
   3532 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3533 
   3534 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3535 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3536 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3537 		switch (i) {
   3538 		case 0:
   3539 			/* We can use all entries */
   3540 			ralmax = size;
   3541 			break;
   3542 		case 1:
   3543 			/* Only RAR[0] */
   3544 			ralmax = 1;
   3545 			break;
   3546 		default:
   3547 			/* available SHRA + RAR[0] */
   3548 			ralmax = i + 1;
   3549 		}
   3550 	} else
   3551 		ralmax = size;
   3552 	for (i = 1; i < size; i++) {
   3553 		if (i < ralmax)
   3554 			wm_set_ral(sc, NULL, i);
   3555 	}
   3556 
   3557 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3558 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3559 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3560 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3561 		size = WM_ICH8_MC_TABSIZE;
   3562 	else
   3563 		size = WM_MC_TABSIZE;
   3564 	/* Clear out the multicast table. */
   3565 	for (i = 0; i < size; i++) {
   3566 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3567 		CSR_WRITE_FLUSH(sc);
   3568 	}
   3569 
   3570 	ETHER_LOCK(ec);
   3571 	ETHER_FIRST_MULTI(step, ec, enm);
   3572 	while (enm != NULL) {
   3573 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3574 			ETHER_UNLOCK(ec);
   3575 			/*
   3576 			 * We must listen to a range of multicast addresses.
   3577 			 * For now, just accept all multicasts, rather than
   3578 			 * trying to set only those filter bits needed to match
   3579 			 * the range.  (At this time, the only use of address
   3580 			 * ranges is for IP multicast routing, for which the
   3581 			 * range is big enough to require all bits set.)
   3582 			 */
   3583 			goto allmulti;
   3584 		}
   3585 
   3586 		hash = wm_mchash(sc, enm->enm_addrlo);
   3587 
   3588 		reg = (hash >> 5);
   3589 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3590 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3591 		    || (sc->sc_type == WM_T_PCH2)
   3592 		    || (sc->sc_type == WM_T_PCH_LPT)
   3593 		    || (sc->sc_type == WM_T_PCH_SPT)
   3594 		    || (sc->sc_type == WM_T_PCH_CNP))
   3595 			reg &= 0x1f;
   3596 		else
   3597 			reg &= 0x7f;
   3598 		bit = hash & 0x1f;
   3599 
   3600 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3601 		hash |= 1U << bit;
   3602 
   3603 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3604 			/*
   3605 			 * 82544 Errata 9: Certain register cannot be written
   3606 			 * with particular alignments in PCI-X bus operation
   3607 			 * (FCAH, MTA and VFTA).
   3608 			 */
   3609 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3610 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3611 			CSR_WRITE_FLUSH(sc);
   3612 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3613 			CSR_WRITE_FLUSH(sc);
   3614 		} else {
   3615 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3616 			CSR_WRITE_FLUSH(sc);
   3617 		}
   3618 
   3619 		ETHER_NEXT_MULTI(step, enm);
   3620 	}
   3621 	ETHER_UNLOCK(ec);
   3622 
   3623 	ifp->if_flags &= ~IFF_ALLMULTI;
   3624 	goto setit;
   3625 
   3626  allmulti:
   3627 	ifp->if_flags |= IFF_ALLMULTI;
   3628 	sc->sc_rctl |= RCTL_MPE;
   3629 
   3630  setit:
   3631 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3632 }
   3633 
   3634 /* Reset and init related */
   3635 
   3636 static void
   3637 wm_set_vlan(struct wm_softc *sc)
   3638 {
   3639 
   3640 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3641 		device_xname(sc->sc_dev), __func__));
   3642 
   3643 	/* Deal with VLAN enables. */
   3644 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3645 		sc->sc_ctrl |= CTRL_VME;
   3646 	else
   3647 		sc->sc_ctrl &= ~CTRL_VME;
   3648 
   3649 	/* Write the control registers. */
   3650 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3651 }
   3652 
   3653 static void
   3654 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3655 {
   3656 	uint32_t gcr;
   3657 	pcireg_t ctrl2;
   3658 
   3659 	gcr = CSR_READ(sc, WMREG_GCR);
   3660 
   3661 	/* Only take action if timeout value is defaulted to 0 */
   3662 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3663 		goto out;
   3664 
   3665 	if ((gcr & GCR_CAP_VER2) == 0) {
   3666 		gcr |= GCR_CMPL_TMOUT_10MS;
   3667 		goto out;
   3668 	}
   3669 
   3670 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3671 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3672 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3673 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3674 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3675 
   3676 out:
   3677 	/* Disable completion timeout resend */
   3678 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3679 
   3680 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3681 }
   3682 
   3683 void
   3684 wm_get_auto_rd_done(struct wm_softc *sc)
   3685 {
   3686 	int i;
   3687 
   3688 	/* wait for eeprom to reload */
   3689 	switch (sc->sc_type) {
   3690 	case WM_T_82571:
   3691 	case WM_T_82572:
   3692 	case WM_T_82573:
   3693 	case WM_T_82574:
   3694 	case WM_T_82583:
   3695 	case WM_T_82575:
   3696 	case WM_T_82576:
   3697 	case WM_T_82580:
   3698 	case WM_T_I350:
   3699 	case WM_T_I354:
   3700 	case WM_T_I210:
   3701 	case WM_T_I211:
   3702 	case WM_T_80003:
   3703 	case WM_T_ICH8:
   3704 	case WM_T_ICH9:
   3705 		for (i = 0; i < 10; i++) {
   3706 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3707 				break;
   3708 			delay(1000);
   3709 		}
   3710 		if (i == 10) {
   3711 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3712 			    "complete\n", device_xname(sc->sc_dev));
   3713 		}
   3714 		break;
   3715 	default:
   3716 		break;
   3717 	}
   3718 }
   3719 
   3720 void
   3721 wm_lan_init_done(struct wm_softc *sc)
   3722 {
   3723 	uint32_t reg = 0;
   3724 	int i;
   3725 
   3726 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3727 		device_xname(sc->sc_dev), __func__));
   3728 
   3729 	/* Wait for eeprom to reload */
   3730 	switch (sc->sc_type) {
   3731 	case WM_T_ICH10:
   3732 	case WM_T_PCH:
   3733 	case WM_T_PCH2:
   3734 	case WM_T_PCH_LPT:
   3735 	case WM_T_PCH_SPT:
   3736 	case WM_T_PCH_CNP:
   3737 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3738 			reg = CSR_READ(sc, WMREG_STATUS);
   3739 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3740 				break;
   3741 			delay(100);
   3742 		}
   3743 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3744 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3745 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3746 		}
   3747 		break;
   3748 	default:
   3749 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3750 		    __func__);
   3751 		break;
   3752 	}
   3753 
   3754 	reg &= ~STATUS_LAN_INIT_DONE;
   3755 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3756 }
   3757 
   3758 void
   3759 wm_get_cfg_done(struct wm_softc *sc)
   3760 {
   3761 	int mask;
   3762 	uint32_t reg;
   3763 	int i;
   3764 
   3765 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3766 		device_xname(sc->sc_dev), __func__));
   3767 
   3768 	/* Wait for eeprom to reload */
   3769 	switch (sc->sc_type) {
   3770 	case WM_T_82542_2_0:
   3771 	case WM_T_82542_2_1:
   3772 		/* null */
   3773 		break;
   3774 	case WM_T_82543:
   3775 	case WM_T_82544:
   3776 	case WM_T_82540:
   3777 	case WM_T_82545:
   3778 	case WM_T_82545_3:
   3779 	case WM_T_82546:
   3780 	case WM_T_82546_3:
   3781 	case WM_T_82541:
   3782 	case WM_T_82541_2:
   3783 	case WM_T_82547:
   3784 	case WM_T_82547_2:
   3785 	case WM_T_82573:
   3786 	case WM_T_82574:
   3787 	case WM_T_82583:
   3788 		/* generic */
   3789 		delay(10*1000);
   3790 		break;
   3791 	case WM_T_80003:
   3792 	case WM_T_82571:
   3793 	case WM_T_82572:
   3794 	case WM_T_82575:
   3795 	case WM_T_82576:
   3796 	case WM_T_82580:
   3797 	case WM_T_I350:
   3798 	case WM_T_I354:
   3799 	case WM_T_I210:
   3800 	case WM_T_I211:
   3801 		if (sc->sc_type == WM_T_82571) {
   3802 			/* Only 82571 shares port 0 */
   3803 			mask = EEMNGCTL_CFGDONE_0;
   3804 		} else
   3805 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3806 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3807 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3808 				break;
   3809 			delay(1000);
   3810 		}
   3811 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3812 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3813 				device_xname(sc->sc_dev), __func__));
   3814 		}
   3815 		break;
   3816 	case WM_T_ICH8:
   3817 	case WM_T_ICH9:
   3818 	case WM_T_ICH10:
   3819 	case WM_T_PCH:
   3820 	case WM_T_PCH2:
   3821 	case WM_T_PCH_LPT:
   3822 	case WM_T_PCH_SPT:
   3823 	case WM_T_PCH_CNP:
   3824 		delay(10*1000);
   3825 		if (sc->sc_type >= WM_T_ICH10)
   3826 			wm_lan_init_done(sc);
   3827 		else
   3828 			wm_get_auto_rd_done(sc);
   3829 
   3830 		reg = CSR_READ(sc, WMREG_STATUS);
   3831 		if ((reg & STATUS_PHYRA) != 0)
   3832 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3833 		break;
   3834 	default:
   3835 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3836 		    __func__);
   3837 		break;
   3838 	}
   3839 }
   3840 
   3841 void
   3842 wm_phy_post_reset(struct wm_softc *sc)
   3843 {
   3844 	uint32_t reg;
   3845 
   3846 	/* This function is only for ICH8 and newer. */
   3847 	if (sc->sc_type < WM_T_ICH8)
   3848 		return;
   3849 
   3850 	if (wm_phy_resetisblocked(sc)) {
   3851 		/* XXX */
   3852 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3853 		return;
   3854 	}
   3855 
   3856 	/* Allow time for h/w to get to quiescent state after reset */
   3857 	delay(10*1000);
   3858 
   3859 	/* Perform any necessary post-reset workarounds */
   3860 	if (sc->sc_type == WM_T_PCH)
   3861 		wm_hv_phy_workaround_ich8lan(sc);
   3862 	if (sc->sc_type == WM_T_PCH2)
   3863 		wm_lv_phy_workaround_ich8lan(sc);
   3864 
   3865 	/* Clear the host wakeup bit after lcd reset */
   3866 	if (sc->sc_type >= WM_T_PCH) {
   3867 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3868 		    BM_PORT_GEN_CFG);
   3869 		reg &= ~BM_WUC_HOST_WU_BIT;
   3870 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3871 		    BM_PORT_GEN_CFG, reg);
   3872 	}
   3873 
   3874 	/* Configure the LCD with the extended configuration region in NVM */
   3875 	wm_init_lcd_from_nvm(sc);
   3876 
   3877 	/* XXX Configure the LCD with the OEM bits in NVM */
   3878 
   3879 	if (sc->sc_type == WM_T_PCH2) {
   3880 		/* Ungate automatic PHY configuration on non-managed 82579 */
   3881 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   3882 			delay(10 * 1000);
   3883 			wm_gate_hw_phy_config_ich8lan(sc, false);
   3884 		}
   3885 		/* XXX Set EEE LPI Update Timer to 200usec */
   3886 	}
   3887 }
   3888 
   3889 /* Only for PCH and newer */
   3890 static void
   3891 wm_write_smbus_addr(struct wm_softc *sc)
   3892 {
   3893 	uint32_t strap, freq;
   3894 	uint32_t phy_data;
   3895 
   3896 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3897 		device_xname(sc->sc_dev), __func__));
   3898 
   3899 	strap = CSR_READ(sc, WMREG_STRAP);
   3900 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3901 
   3902 	phy_data = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR);
   3903 
   3904 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3905 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3906 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3907 
   3908 	if (sc->sc_phytype == WMPHY_I217) {
   3909 		/* Restore SMBus frequency */
   3910 		if (freq --) {
   3911 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3912 			    | HV_SMB_ADDR_FREQ_HIGH);
   3913 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3914 			    HV_SMB_ADDR_FREQ_LOW);
   3915 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3916 			    HV_SMB_ADDR_FREQ_HIGH);
   3917 		} else {
   3918 			DPRINTF(WM_DEBUG_INIT,
   3919 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3920 				device_xname(sc->sc_dev), __func__));
   3921 		}
   3922 	}
   3923 
   3924 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, phy_data);
   3925 }
   3926 
   3927 void
   3928 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3929 {
   3930 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3931 	uint16_t phy_page = 0;
   3932 
   3933 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3934 		device_xname(sc->sc_dev), __func__));
   3935 
   3936 	switch (sc->sc_type) {
   3937 	case WM_T_ICH8:
   3938 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3939 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3940 			return;
   3941 
   3942 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3943 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3944 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3945 			break;
   3946 		}
   3947 		/* FALLTHROUGH */
   3948 	case WM_T_PCH:
   3949 	case WM_T_PCH2:
   3950 	case WM_T_PCH_LPT:
   3951 	case WM_T_PCH_SPT:
   3952 	case WM_T_PCH_CNP:
   3953 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3954 		break;
   3955 	default:
   3956 		return;
   3957 	}
   3958 
   3959 	sc->phy.acquire(sc);
   3960 
   3961 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3962 	if ((reg & sw_cfg_mask) == 0)
   3963 		goto release;
   3964 
   3965 	/*
   3966 	 * Make sure HW does not configure LCD from PHY extended configuration
   3967 	 * before SW configuration
   3968 	 */
   3969 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3970 	if ((sc->sc_type < WM_T_PCH2)
   3971 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3972 		goto release;
   3973 
   3974 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3975 		device_xname(sc->sc_dev), __func__));
   3976 	/* word_addr is in DWORD */
   3977 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3978 
   3979 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3980 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3981 	if (cnf_size == 0)
   3982 		goto release;
   3983 
   3984 	if (((sc->sc_type == WM_T_PCH)
   3985 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3986 	    || (sc->sc_type > WM_T_PCH)) {
   3987 		/*
   3988 		 * HW configures the SMBus address and LEDs when the OEM and
   3989 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3990 		 * are cleared, SW will configure them instead.
   3991 		 */
   3992 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   3993 			device_xname(sc->sc_dev), __func__));
   3994 		wm_write_smbus_addr(sc);
   3995 
   3996 		reg = CSR_READ(sc, WMREG_LEDCTL);
   3997 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   3998 	}
   3999 
   4000 	/* Configure LCD from extended configuration region. */
   4001 	for (i = 0; i < cnf_size; i++) {
   4002 		uint16_t reg_data, reg_addr;
   4003 
   4004 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4005 			goto release;
   4006 
   4007 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4008 			goto release;
   4009 
   4010 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4011 			phy_page = reg_data;
   4012 
   4013 		reg_addr &= IGPHY_MAXREGADDR;
   4014 		reg_addr |= phy_page;
   4015 
   4016 		sc->phy.release(sc); /* XXX */
   4017 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
   4018 		sc->phy.acquire(sc); /* XXX */
   4019 	}
   4020 
   4021 release:
   4022 	sc->phy.release(sc);
   4023 	return;
   4024 }
   4025 
   4026 
   4027 /* Init hardware bits */
   4028 void
   4029 wm_initialize_hardware_bits(struct wm_softc *sc)
   4030 {
   4031 	uint32_t tarc0, tarc1, reg;
   4032 
   4033 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4034 		device_xname(sc->sc_dev), __func__));
   4035 
   4036 	/* For 82571 variant, 80003 and ICHs */
   4037 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4038 	    || (sc->sc_type >= WM_T_80003)) {
   4039 
   4040 		/* Transmit Descriptor Control 0 */
   4041 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4042 		reg |= TXDCTL_COUNT_DESC;
   4043 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4044 
   4045 		/* Transmit Descriptor Control 1 */
   4046 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4047 		reg |= TXDCTL_COUNT_DESC;
   4048 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4049 
   4050 		/* TARC0 */
   4051 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4052 		switch (sc->sc_type) {
   4053 		case WM_T_82571:
   4054 		case WM_T_82572:
   4055 		case WM_T_82573:
   4056 		case WM_T_82574:
   4057 		case WM_T_82583:
   4058 		case WM_T_80003:
   4059 			/* Clear bits 30..27 */
   4060 			tarc0 &= ~__BITS(30, 27);
   4061 			break;
   4062 		default:
   4063 			break;
   4064 		}
   4065 
   4066 		switch (sc->sc_type) {
   4067 		case WM_T_82571:
   4068 		case WM_T_82572:
   4069 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4070 
   4071 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4072 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4073 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4074 			/* 8257[12] Errata No.7 */
   4075 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4076 
   4077 			/* TARC1 bit 28 */
   4078 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4079 				tarc1 &= ~__BIT(28);
   4080 			else
   4081 				tarc1 |= __BIT(28);
   4082 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4083 
   4084 			/*
   4085 			 * 8257[12] Errata No.13
   4086 			 * Disable Dyamic Clock Gating.
   4087 			 */
   4088 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4089 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4090 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4091 			break;
   4092 		case WM_T_82573:
   4093 		case WM_T_82574:
   4094 		case WM_T_82583:
   4095 			if ((sc->sc_type == WM_T_82574)
   4096 			    || (sc->sc_type == WM_T_82583))
   4097 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4098 
   4099 			/* Extended Device Control */
   4100 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4101 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4102 			reg |= __BIT(22);	/* Set bit 22 */
   4103 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4104 
   4105 			/* Device Control */
   4106 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4107 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4108 
   4109 			/* PCIe Control Register */
   4110 			/*
   4111 			 * 82573 Errata (unknown).
   4112 			 *
   4113 			 * 82574 Errata 25 and 82583 Errata 12
   4114 			 * "Dropped Rx Packets":
   4115 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4116 			 */
   4117 			reg = CSR_READ(sc, WMREG_GCR);
   4118 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4119 			CSR_WRITE(sc, WMREG_GCR, reg);
   4120 
   4121 			if ((sc->sc_type == WM_T_82574)
   4122 			    || (sc->sc_type == WM_T_82583)) {
   4123 				/*
   4124 				 * Document says this bit must be set for
   4125 				 * proper operation.
   4126 				 */
   4127 				reg = CSR_READ(sc, WMREG_GCR);
   4128 				reg |= __BIT(22);
   4129 				CSR_WRITE(sc, WMREG_GCR, reg);
   4130 
   4131 				/*
   4132 				 * Apply workaround for hardware errata
   4133 				 * documented in errata docs Fixes issue where
   4134 				 * some error prone or unreliable PCIe
   4135 				 * completions are occurring, particularly
   4136 				 * with ASPM enabled. Without fix, issue can
   4137 				 * cause Tx timeouts.
   4138 				 */
   4139 				reg = CSR_READ(sc, WMREG_GCR2);
   4140 				reg |= __BIT(0);
   4141 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4142 			}
   4143 			break;
   4144 		case WM_T_80003:
   4145 			/* TARC0 */
   4146 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4147 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4148 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4149 
   4150 			/* TARC1 bit 28 */
   4151 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4152 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4153 				tarc1 &= ~__BIT(28);
   4154 			else
   4155 				tarc1 |= __BIT(28);
   4156 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4157 			break;
   4158 		case WM_T_ICH8:
   4159 		case WM_T_ICH9:
   4160 		case WM_T_ICH10:
   4161 		case WM_T_PCH:
   4162 		case WM_T_PCH2:
   4163 		case WM_T_PCH_LPT:
   4164 		case WM_T_PCH_SPT:
   4165 		case WM_T_PCH_CNP:
   4166 			/* TARC0 */
   4167 			if (sc->sc_type == WM_T_ICH8) {
   4168 				/* Set TARC0 bits 29 and 28 */
   4169 				tarc0 |= __BITS(29, 28);
   4170 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4171 				tarc0 |= __BIT(29);
   4172 				/*
   4173 				 *  Drop bit 28. From Linux.
   4174 				 * See I218/I219 spec update
   4175 				 * "5. Buffer Overrun While the I219 is
   4176 				 * Processing DMA Transactions"
   4177 				 */
   4178 				tarc0 &= ~__BIT(28);
   4179 			}
   4180 			/* Set TARC0 bits 23,24,26,27 */
   4181 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4182 
   4183 			/* CTRL_EXT */
   4184 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4185 			reg |= __BIT(22);	/* Set bit 22 */
   4186 			/*
   4187 			 * Enable PHY low-power state when MAC is at D3
   4188 			 * w/o WoL
   4189 			 */
   4190 			if (sc->sc_type >= WM_T_PCH)
   4191 				reg |= CTRL_EXT_PHYPDEN;
   4192 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4193 
   4194 			/* TARC1 */
   4195 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4196 			/* bit 28 */
   4197 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4198 				tarc1 &= ~__BIT(28);
   4199 			else
   4200 				tarc1 |= __BIT(28);
   4201 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4202 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4203 
   4204 			/* Device Status */
   4205 			if (sc->sc_type == WM_T_ICH8) {
   4206 				reg = CSR_READ(sc, WMREG_STATUS);
   4207 				reg &= ~__BIT(31);
   4208 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4209 
   4210 			}
   4211 
   4212 			/* IOSFPC */
   4213 			if (sc->sc_type == WM_T_PCH_SPT) {
   4214 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4215 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4216 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4217 			}
   4218 			/*
   4219 			 * Work-around descriptor data corruption issue during
   4220 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4221 			 * capability.
   4222 			 */
   4223 			reg = CSR_READ(sc, WMREG_RFCTL);
   4224 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4225 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4226 			break;
   4227 		default:
   4228 			break;
   4229 		}
   4230 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4231 
   4232 		switch (sc->sc_type) {
   4233 		/*
   4234 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4235 		 * Avoid RSS Hash Value bug.
   4236 		 */
   4237 		case WM_T_82571:
   4238 		case WM_T_82572:
   4239 		case WM_T_82573:
   4240 		case WM_T_80003:
   4241 		case WM_T_ICH8:
   4242 			reg = CSR_READ(sc, WMREG_RFCTL);
   4243 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4244 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4245 			break;
   4246 		case WM_T_82574:
   4247 			/* use extened Rx descriptor. */
   4248 			reg = CSR_READ(sc, WMREG_RFCTL);
   4249 			reg |= WMREG_RFCTL_EXSTEN;
   4250 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4251 			break;
   4252 		default:
   4253 			break;
   4254 		}
   4255 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4256 		/*
   4257 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4258 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4259 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4260 		 * Correctly by the Device"
   4261 		 *
   4262 		 * I354(C2000) Errata AVR53:
   4263 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4264 		 * Hang"
   4265 		 */
   4266 		reg = CSR_READ(sc, WMREG_RFCTL);
   4267 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4268 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4269 	}
   4270 }
   4271 
   4272 static uint32_t
   4273 wm_rxpbs_adjust_82580(uint32_t val)
   4274 {
   4275 	uint32_t rv = 0;
   4276 
   4277 	if (val < __arraycount(wm_82580_rxpbs_table))
   4278 		rv = wm_82580_rxpbs_table[val];
   4279 
   4280 	return rv;
   4281 }
   4282 
   4283 /*
   4284  * wm_reset_phy:
   4285  *
   4286  *	generic PHY reset function.
   4287  *	Same as e1000_phy_hw_reset_generic()
   4288  */
   4289 static void
   4290 wm_reset_phy(struct wm_softc *sc)
   4291 {
   4292 	uint32_t reg;
   4293 
   4294 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4295 		device_xname(sc->sc_dev), __func__));
   4296 	if (wm_phy_resetisblocked(sc))
   4297 		return;
   4298 
   4299 	sc->phy.acquire(sc);
   4300 
   4301 	reg = CSR_READ(sc, WMREG_CTRL);
   4302 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4303 	CSR_WRITE_FLUSH(sc);
   4304 
   4305 	delay(sc->phy.reset_delay_us);
   4306 
   4307 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4308 	CSR_WRITE_FLUSH(sc);
   4309 
   4310 	delay(150);
   4311 
   4312 	sc->phy.release(sc);
   4313 
   4314 	wm_get_cfg_done(sc);
   4315 	wm_phy_post_reset(sc);
   4316 }
   4317 
   4318 /*
   4319  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4320  * so it is enough to check sc->sc_queue[0] only.
   4321  */
   4322 static void
   4323 wm_flush_desc_rings(struct wm_softc *sc)
   4324 {
   4325 	pcireg_t preg;
   4326 	uint32_t reg;
   4327 	struct wm_txqueue *txq;
   4328 	wiseman_txdesc_t *txd;
   4329 	int nexttx;
   4330 	uint32_t rctl;
   4331 
   4332 	/* First, disable MULR fix in FEXTNVM11 */
   4333 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4334 	reg |= FEXTNVM11_DIS_MULRFIX;
   4335 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4336 
   4337 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4338 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4339 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4340 		return;
   4341 
   4342 	/* TX */
   4343 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4344 	    device_xname(sc->sc_dev), preg, reg);
   4345 	reg = CSR_READ(sc, WMREG_TCTL);
   4346 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4347 
   4348 	txq = &sc->sc_queue[0].wmq_txq;
   4349 	nexttx = txq->txq_next;
   4350 	txd = &txq->txq_descs[nexttx];
   4351 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4352 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4353 	txd->wtx_fields.wtxu_status = 0;
   4354 	txd->wtx_fields.wtxu_options = 0;
   4355 	txd->wtx_fields.wtxu_vlan = 0;
   4356 
   4357 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4358 	    BUS_SPACE_BARRIER_WRITE);
   4359 
   4360 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4361 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4362 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4363 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4364 	delay(250);
   4365 
   4366 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4367 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4368 		return;
   4369 
   4370 	/* RX */
   4371 	printf("%s: Need RX flush (reg = %08x)\n",
   4372 	    device_xname(sc->sc_dev), preg);
   4373 	rctl = CSR_READ(sc, WMREG_RCTL);
   4374 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4375 	CSR_WRITE_FLUSH(sc);
   4376 	delay(150);
   4377 
   4378 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4379 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4380 	reg &= 0xffffc000;
   4381 	/*
   4382 	 * update thresholds: prefetch threshold to 31, host threshold
   4383 	 * to 1 and make sure the granularity is "descriptors" and not
   4384 	 * "cache lines"
   4385 	 */
   4386 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4387 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4388 
   4389 	/*
   4390 	 * momentarily enable the RX ring for the changes to take
   4391 	 * effect
   4392 	 */
   4393 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4394 	CSR_WRITE_FLUSH(sc);
   4395 	delay(150);
   4396 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4397 }
   4398 
   4399 /*
   4400  * wm_reset:
   4401  *
   4402  *	Reset the i82542 chip.
   4403  */
   4404 static void
   4405 wm_reset(struct wm_softc *sc)
   4406 {
   4407 	int phy_reset = 0;
   4408 	int i, error = 0;
   4409 	uint32_t reg;
   4410 	uint16_t kmreg;
   4411 	int rv;
   4412 
   4413 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4414 		device_xname(sc->sc_dev), __func__));
   4415 	KASSERT(sc->sc_type != 0);
   4416 
   4417 	/*
   4418 	 * Allocate on-chip memory according to the MTU size.
   4419 	 * The Packet Buffer Allocation register must be written
   4420 	 * before the chip is reset.
   4421 	 */
   4422 	switch (sc->sc_type) {
   4423 	case WM_T_82547:
   4424 	case WM_T_82547_2:
   4425 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4426 		    PBA_22K : PBA_30K;
   4427 		for (i = 0; i < sc->sc_nqueues; i++) {
   4428 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4429 			txq->txq_fifo_head = 0;
   4430 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4431 			txq->txq_fifo_size =
   4432 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4433 			txq->txq_fifo_stall = 0;
   4434 		}
   4435 		break;
   4436 	case WM_T_82571:
   4437 	case WM_T_82572:
   4438 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4439 	case WM_T_80003:
   4440 		sc->sc_pba = PBA_32K;
   4441 		break;
   4442 	case WM_T_82573:
   4443 		sc->sc_pba = PBA_12K;
   4444 		break;
   4445 	case WM_T_82574:
   4446 	case WM_T_82583:
   4447 		sc->sc_pba = PBA_20K;
   4448 		break;
   4449 	case WM_T_82576:
   4450 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4451 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4452 		break;
   4453 	case WM_T_82580:
   4454 	case WM_T_I350:
   4455 	case WM_T_I354:
   4456 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4457 		break;
   4458 	case WM_T_I210:
   4459 	case WM_T_I211:
   4460 		sc->sc_pba = PBA_34K;
   4461 		break;
   4462 	case WM_T_ICH8:
   4463 		/* Workaround for a bit corruption issue in FIFO memory */
   4464 		sc->sc_pba = PBA_8K;
   4465 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4466 		break;
   4467 	case WM_T_ICH9:
   4468 	case WM_T_ICH10:
   4469 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4470 		    PBA_14K : PBA_10K;
   4471 		break;
   4472 	case WM_T_PCH:
   4473 	case WM_T_PCH2:	/* XXX 14K? */
   4474 	case WM_T_PCH_LPT:
   4475 	case WM_T_PCH_SPT:
   4476 	case WM_T_PCH_CNP:
   4477 		sc->sc_pba = PBA_26K;
   4478 		break;
   4479 	default:
   4480 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4481 		    PBA_40K : PBA_48K;
   4482 		break;
   4483 	}
   4484 	/*
   4485 	 * Only old or non-multiqueue devices have the PBA register
   4486 	 * XXX Need special handling for 82575.
   4487 	 */
   4488 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4489 	    || (sc->sc_type == WM_T_82575))
   4490 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4491 
   4492 	/* Prevent the PCI-E bus from sticking */
   4493 	if (sc->sc_flags & WM_F_PCIE) {
   4494 		int timeout = 800;
   4495 
   4496 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4497 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4498 
   4499 		while (timeout--) {
   4500 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4501 			    == 0)
   4502 				break;
   4503 			delay(100);
   4504 		}
   4505 		if (timeout == 0)
   4506 			device_printf(sc->sc_dev,
   4507 			    "failed to disable busmastering\n");
   4508 	}
   4509 
   4510 	/* Set the completion timeout for interface */
   4511 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4512 	    || (sc->sc_type == WM_T_82580)
   4513 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4514 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4515 		wm_set_pcie_completion_timeout(sc);
   4516 
   4517 	/* Clear interrupt */
   4518 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4519 	if (wm_is_using_msix(sc)) {
   4520 		if (sc->sc_type != WM_T_82574) {
   4521 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4522 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4523 		} else {
   4524 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4525 		}
   4526 	}
   4527 
   4528 	/* Stop the transmit and receive processes. */
   4529 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4530 	sc->sc_rctl &= ~RCTL_EN;
   4531 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4532 	CSR_WRITE_FLUSH(sc);
   4533 
   4534 	/* XXX set_tbi_sbp_82543() */
   4535 
   4536 	delay(10*1000);
   4537 
   4538 	/* Must acquire the MDIO ownership before MAC reset */
   4539 	switch (sc->sc_type) {
   4540 	case WM_T_82573:
   4541 	case WM_T_82574:
   4542 	case WM_T_82583:
   4543 		error = wm_get_hw_semaphore_82573(sc);
   4544 		break;
   4545 	default:
   4546 		break;
   4547 	}
   4548 
   4549 	/*
   4550 	 * 82541 Errata 29? & 82547 Errata 28?
   4551 	 * See also the description about PHY_RST bit in CTRL register
   4552 	 * in 8254x_GBe_SDM.pdf.
   4553 	 */
   4554 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4555 		CSR_WRITE(sc, WMREG_CTRL,
   4556 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4557 		CSR_WRITE_FLUSH(sc);
   4558 		delay(5000);
   4559 	}
   4560 
   4561 	switch (sc->sc_type) {
   4562 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4563 	case WM_T_82541:
   4564 	case WM_T_82541_2:
   4565 	case WM_T_82547:
   4566 	case WM_T_82547_2:
   4567 		/*
   4568 		 * On some chipsets, a reset through a memory-mapped write
   4569 		 * cycle can cause the chip to reset before completing the
   4570 		 * write cycle. This causes major headache that can be avoided
   4571 		 * by issuing the reset via indirect register writes through
   4572 		 * I/O space.
   4573 		 *
   4574 		 * So, if we successfully mapped the I/O BAR at attach time,
   4575 		 * use that. Otherwise, try our luck with a memory-mapped
   4576 		 * reset.
   4577 		 */
   4578 		if (sc->sc_flags & WM_F_IOH_VALID)
   4579 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4580 		else
   4581 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4582 		break;
   4583 	case WM_T_82545_3:
   4584 	case WM_T_82546_3:
   4585 		/* Use the shadow control register on these chips. */
   4586 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4587 		break;
   4588 	case WM_T_80003:
   4589 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4590 		sc->phy.acquire(sc);
   4591 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4592 		sc->phy.release(sc);
   4593 		break;
   4594 	case WM_T_ICH8:
   4595 	case WM_T_ICH9:
   4596 	case WM_T_ICH10:
   4597 	case WM_T_PCH:
   4598 	case WM_T_PCH2:
   4599 	case WM_T_PCH_LPT:
   4600 	case WM_T_PCH_SPT:
   4601 	case WM_T_PCH_CNP:
   4602 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4603 		if (wm_phy_resetisblocked(sc) == false) {
   4604 			/*
   4605 			 * Gate automatic PHY configuration by hardware on
   4606 			 * non-managed 82579
   4607 			 */
   4608 			if ((sc->sc_type == WM_T_PCH2)
   4609 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4610 				== 0))
   4611 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4612 
   4613 			reg |= CTRL_PHY_RESET;
   4614 			phy_reset = 1;
   4615 		} else
   4616 			printf("XXX reset is blocked!!!\n");
   4617 		sc->phy.acquire(sc);
   4618 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4619 		/* Don't insert a completion barrier when reset */
   4620 		delay(20*1000);
   4621 		mutex_exit(sc->sc_ich_phymtx);
   4622 		break;
   4623 	case WM_T_82580:
   4624 	case WM_T_I350:
   4625 	case WM_T_I354:
   4626 	case WM_T_I210:
   4627 	case WM_T_I211:
   4628 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4629 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4630 			CSR_WRITE_FLUSH(sc);
   4631 		delay(5000);
   4632 		break;
   4633 	case WM_T_82542_2_0:
   4634 	case WM_T_82542_2_1:
   4635 	case WM_T_82543:
   4636 	case WM_T_82540:
   4637 	case WM_T_82545:
   4638 	case WM_T_82546:
   4639 	case WM_T_82571:
   4640 	case WM_T_82572:
   4641 	case WM_T_82573:
   4642 	case WM_T_82574:
   4643 	case WM_T_82575:
   4644 	case WM_T_82576:
   4645 	case WM_T_82583:
   4646 	default:
   4647 		/* Everything else can safely use the documented method. */
   4648 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4649 		break;
   4650 	}
   4651 
   4652 	/* Must release the MDIO ownership after MAC reset */
   4653 	switch (sc->sc_type) {
   4654 	case WM_T_82573:
   4655 	case WM_T_82574:
   4656 	case WM_T_82583:
   4657 		if (error == 0)
   4658 			wm_put_hw_semaphore_82573(sc);
   4659 		break;
   4660 	default:
   4661 		break;
   4662 	}
   4663 
   4664 	/* Set Phy Config Counter to 50msec */
   4665 	if (sc->sc_type == WM_T_PCH2) {
   4666 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   4667 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   4668 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   4669 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   4670 	}
   4671 
   4672 	if (phy_reset != 0)
   4673 		wm_get_cfg_done(sc);
   4674 
   4675 	/* reload EEPROM */
   4676 	switch (sc->sc_type) {
   4677 	case WM_T_82542_2_0:
   4678 	case WM_T_82542_2_1:
   4679 	case WM_T_82543:
   4680 	case WM_T_82544:
   4681 		delay(10);
   4682 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4683 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4684 		CSR_WRITE_FLUSH(sc);
   4685 		delay(2000);
   4686 		break;
   4687 	case WM_T_82540:
   4688 	case WM_T_82545:
   4689 	case WM_T_82545_3:
   4690 	case WM_T_82546:
   4691 	case WM_T_82546_3:
   4692 		delay(5*1000);
   4693 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4694 		break;
   4695 	case WM_T_82541:
   4696 	case WM_T_82541_2:
   4697 	case WM_T_82547:
   4698 	case WM_T_82547_2:
   4699 		delay(20000);
   4700 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4701 		break;
   4702 	case WM_T_82571:
   4703 	case WM_T_82572:
   4704 	case WM_T_82573:
   4705 	case WM_T_82574:
   4706 	case WM_T_82583:
   4707 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4708 			delay(10);
   4709 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4710 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4711 			CSR_WRITE_FLUSH(sc);
   4712 		}
   4713 		/* check EECD_EE_AUTORD */
   4714 		wm_get_auto_rd_done(sc);
   4715 		/*
   4716 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4717 		 * is set.
   4718 		 */
   4719 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4720 		    || (sc->sc_type == WM_T_82583))
   4721 			delay(25*1000);
   4722 		break;
   4723 	case WM_T_82575:
   4724 	case WM_T_82576:
   4725 	case WM_T_82580:
   4726 	case WM_T_I350:
   4727 	case WM_T_I354:
   4728 	case WM_T_I210:
   4729 	case WM_T_I211:
   4730 	case WM_T_80003:
   4731 		/* check EECD_EE_AUTORD */
   4732 		wm_get_auto_rd_done(sc);
   4733 		break;
   4734 	case WM_T_ICH8:
   4735 	case WM_T_ICH9:
   4736 	case WM_T_ICH10:
   4737 	case WM_T_PCH:
   4738 	case WM_T_PCH2:
   4739 	case WM_T_PCH_LPT:
   4740 	case WM_T_PCH_SPT:
   4741 	case WM_T_PCH_CNP:
   4742 		break;
   4743 	default:
   4744 		panic("%s: unknown type\n", __func__);
   4745 	}
   4746 
   4747 	/* Check whether EEPROM is present or not */
   4748 	switch (sc->sc_type) {
   4749 	case WM_T_82575:
   4750 	case WM_T_82576:
   4751 	case WM_T_82580:
   4752 	case WM_T_I350:
   4753 	case WM_T_I354:
   4754 	case WM_T_ICH8:
   4755 	case WM_T_ICH9:
   4756 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4757 			/* Not found */
   4758 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4759 			if (sc->sc_type == WM_T_82575)
   4760 				wm_reset_init_script_82575(sc);
   4761 		}
   4762 		break;
   4763 	default:
   4764 		break;
   4765 	}
   4766 
   4767 	if (phy_reset != 0)
   4768 		wm_phy_post_reset(sc);
   4769 
   4770 	if ((sc->sc_type == WM_T_82580)
   4771 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4772 		/* clear global device reset status bit */
   4773 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4774 	}
   4775 
   4776 	/* Clear any pending interrupt events. */
   4777 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4778 	reg = CSR_READ(sc, WMREG_ICR);
   4779 	if (wm_is_using_msix(sc)) {
   4780 		if (sc->sc_type != WM_T_82574) {
   4781 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4782 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4783 		} else
   4784 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4785 	}
   4786 
   4787 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4788 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4789 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4790 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4791 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4792 		reg |= KABGTXD_BGSQLBIAS;
   4793 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4794 	}
   4795 
   4796 	/* reload sc_ctrl */
   4797 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4798 
   4799 	if (sc->sc_type == WM_T_I354) {
   4800 #if 0
   4801 		/* I354 uses an external PHY */
   4802 		wm_set_eee_i354(sc);
   4803 #endif
   4804 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4805 		wm_set_eee_i350(sc);
   4806 
   4807 	/*
   4808 	 * For PCH, this write will make sure that any noise will be detected
   4809 	 * as a CRC error and be dropped rather than show up as a bad packet
   4810 	 * to the DMA engine
   4811 	 */
   4812 	if (sc->sc_type == WM_T_PCH)
   4813 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4814 
   4815 	if (sc->sc_type >= WM_T_82544)
   4816 		CSR_WRITE(sc, WMREG_WUC, 0);
   4817 
   4818 	wm_reset_mdicnfg_82580(sc);
   4819 
   4820 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4821 		wm_pll_workaround_i210(sc);
   4822 
   4823 	if (sc->sc_type == WM_T_80003) {
   4824 		/* default to TRUE to enable the MDIC W/A */
   4825 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   4826 
   4827 		rv = wm_kmrn_readreg(sc,
   4828 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   4829 		if (rv == 0) {
   4830 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   4831 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   4832 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   4833 			else
   4834 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   4835 		}
   4836 	}
   4837 }
   4838 
   4839 /*
   4840  * wm_add_rxbuf:
   4841  *
   4842  *	Add a receive buffer to the indiciated descriptor.
   4843  */
   4844 static int
   4845 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4846 {
   4847 	struct wm_softc *sc = rxq->rxq_sc;
   4848 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4849 	struct mbuf *m;
   4850 	int error;
   4851 
   4852 	KASSERT(mutex_owned(rxq->rxq_lock));
   4853 
   4854 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4855 	if (m == NULL)
   4856 		return ENOBUFS;
   4857 
   4858 	MCLGET(m, M_DONTWAIT);
   4859 	if ((m->m_flags & M_EXT) == 0) {
   4860 		m_freem(m);
   4861 		return ENOBUFS;
   4862 	}
   4863 
   4864 	if (rxs->rxs_mbuf != NULL)
   4865 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4866 
   4867 	rxs->rxs_mbuf = m;
   4868 
   4869 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4870 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4871 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4872 	if (error) {
   4873 		/* XXX XXX XXX */
   4874 		aprint_error_dev(sc->sc_dev,
   4875 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   4876 		panic("wm_add_rxbuf");
   4877 	}
   4878 
   4879 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4880 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4881 
   4882 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4883 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4884 			wm_init_rxdesc(rxq, idx);
   4885 	} else
   4886 		wm_init_rxdesc(rxq, idx);
   4887 
   4888 	return 0;
   4889 }
   4890 
   4891 /*
   4892  * wm_rxdrain:
   4893  *
   4894  *	Drain the receive queue.
   4895  */
   4896 static void
   4897 wm_rxdrain(struct wm_rxqueue *rxq)
   4898 {
   4899 	struct wm_softc *sc = rxq->rxq_sc;
   4900 	struct wm_rxsoft *rxs;
   4901 	int i;
   4902 
   4903 	KASSERT(mutex_owned(rxq->rxq_lock));
   4904 
   4905 	for (i = 0; i < WM_NRXDESC; i++) {
   4906 		rxs = &rxq->rxq_soft[i];
   4907 		if (rxs->rxs_mbuf != NULL) {
   4908 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4909 			m_freem(rxs->rxs_mbuf);
   4910 			rxs->rxs_mbuf = NULL;
   4911 		}
   4912 	}
   4913 }
   4914 
   4915 /*
   4916  * Setup registers for RSS.
   4917  *
   4918  * XXX not yet VMDq support
   4919  */
   4920 static void
   4921 wm_init_rss(struct wm_softc *sc)
   4922 {
   4923 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4924 	int i;
   4925 
   4926 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   4927 
   4928 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4929 		int qid, reta_ent;
   4930 
   4931 		qid  = i % sc->sc_nqueues;
   4932 		switch (sc->sc_type) {
   4933 		case WM_T_82574:
   4934 			reta_ent = __SHIFTIN(qid,
   4935 			    RETA_ENT_QINDEX_MASK_82574);
   4936 			break;
   4937 		case WM_T_82575:
   4938 			reta_ent = __SHIFTIN(qid,
   4939 			    RETA_ENT_QINDEX1_MASK_82575);
   4940 			break;
   4941 		default:
   4942 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4943 			break;
   4944 		}
   4945 
   4946 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4947 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4948 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4949 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4950 	}
   4951 
   4952 	rss_getkey((uint8_t *)rss_key);
   4953 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4954 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4955 
   4956 	if (sc->sc_type == WM_T_82574)
   4957 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4958 	else
   4959 		mrqc = MRQC_ENABLE_RSS_MQ;
   4960 
   4961 	/*
   4962 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4963 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4964 	 */
   4965 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4966 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4967 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4968 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4969 
   4970 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4971 }
   4972 
   4973 /*
   4974  * Adjust TX and RX queue numbers which the system actulally uses.
   4975  *
   4976  * The numbers are affected by below parameters.
   4977  *     - The nubmer of hardware queues
   4978  *     - The number of MSI-X vectors (= "nvectors" argument)
   4979  *     - ncpu
   4980  */
   4981 static void
   4982 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4983 {
   4984 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4985 
   4986 	if (nvectors < 2) {
   4987 		sc->sc_nqueues = 1;
   4988 		return;
   4989 	}
   4990 
   4991 	switch (sc->sc_type) {
   4992 	case WM_T_82572:
   4993 		hw_ntxqueues = 2;
   4994 		hw_nrxqueues = 2;
   4995 		break;
   4996 	case WM_T_82574:
   4997 		hw_ntxqueues = 2;
   4998 		hw_nrxqueues = 2;
   4999 		break;
   5000 	case WM_T_82575:
   5001 		hw_ntxqueues = 4;
   5002 		hw_nrxqueues = 4;
   5003 		break;
   5004 	case WM_T_82576:
   5005 		hw_ntxqueues = 16;
   5006 		hw_nrxqueues = 16;
   5007 		break;
   5008 	case WM_T_82580:
   5009 	case WM_T_I350:
   5010 	case WM_T_I354:
   5011 		hw_ntxqueues = 8;
   5012 		hw_nrxqueues = 8;
   5013 		break;
   5014 	case WM_T_I210:
   5015 		hw_ntxqueues = 4;
   5016 		hw_nrxqueues = 4;
   5017 		break;
   5018 	case WM_T_I211:
   5019 		hw_ntxqueues = 2;
   5020 		hw_nrxqueues = 2;
   5021 		break;
   5022 		/*
   5023 		 * As below ethernet controllers does not support MSI-X,
   5024 		 * this driver let them not use multiqueue.
   5025 		 *     - WM_T_80003
   5026 		 *     - WM_T_ICH8
   5027 		 *     - WM_T_ICH9
   5028 		 *     - WM_T_ICH10
   5029 		 *     - WM_T_PCH
   5030 		 *     - WM_T_PCH2
   5031 		 *     - WM_T_PCH_LPT
   5032 		 */
   5033 	default:
   5034 		hw_ntxqueues = 1;
   5035 		hw_nrxqueues = 1;
   5036 		break;
   5037 	}
   5038 
   5039 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5040 
   5041 	/*
   5042 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5043 	 * the number of queues used actually.
   5044 	 */
   5045 	if (nvectors < hw_nqueues + 1)
   5046 		sc->sc_nqueues = nvectors - 1;
   5047 	else
   5048 		sc->sc_nqueues = hw_nqueues;
   5049 
   5050 	/*
   5051 	 * As queues more then cpus cannot improve scaling, we limit
   5052 	 * the number of queues used actually.
   5053 	 */
   5054 	if (ncpu < sc->sc_nqueues)
   5055 		sc->sc_nqueues = ncpu;
   5056 }
   5057 
   5058 static inline bool
   5059 wm_is_using_msix(struct wm_softc *sc)
   5060 {
   5061 
   5062 	return (sc->sc_nintrs > 1);
   5063 }
   5064 
   5065 static inline bool
   5066 wm_is_using_multiqueue(struct wm_softc *sc)
   5067 {
   5068 
   5069 	return (sc->sc_nqueues > 1);
   5070 }
   5071 
   5072 static int
   5073 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5074 {
   5075 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5076 	wmq->wmq_id = qidx;
   5077 	wmq->wmq_intr_idx = intr_idx;
   5078 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5079 #ifdef WM_MPSAFE
   5080 	    | SOFTINT_MPSAFE
   5081 #endif
   5082 	    , wm_handle_queue, wmq);
   5083 	if (wmq->wmq_si != NULL)
   5084 		return 0;
   5085 
   5086 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5087 	    wmq->wmq_id);
   5088 
   5089 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5090 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5091 	return ENOMEM;
   5092 }
   5093 
   5094 /*
   5095  * Both single interrupt MSI and INTx can use this function.
   5096  */
   5097 static int
   5098 wm_setup_legacy(struct wm_softc *sc)
   5099 {
   5100 	pci_chipset_tag_t pc = sc->sc_pc;
   5101 	const char *intrstr = NULL;
   5102 	char intrbuf[PCI_INTRSTR_LEN];
   5103 	int error;
   5104 
   5105 	error = wm_alloc_txrx_queues(sc);
   5106 	if (error) {
   5107 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5108 		    error);
   5109 		return ENOMEM;
   5110 	}
   5111 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5112 	    sizeof(intrbuf));
   5113 #ifdef WM_MPSAFE
   5114 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5115 #endif
   5116 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5117 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5118 	if (sc->sc_ihs[0] == NULL) {
   5119 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5120 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5121 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5122 		return ENOMEM;
   5123 	}
   5124 
   5125 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5126 	sc->sc_nintrs = 1;
   5127 
   5128 	return wm_softint_establish(sc, 0, 0);
   5129 }
   5130 
   5131 static int
   5132 wm_setup_msix(struct wm_softc *sc)
   5133 {
   5134 	void *vih;
   5135 	kcpuset_t *affinity;
   5136 	int qidx, error, intr_idx, txrx_established;
   5137 	pci_chipset_tag_t pc = sc->sc_pc;
   5138 	const char *intrstr = NULL;
   5139 	char intrbuf[PCI_INTRSTR_LEN];
   5140 	char intr_xname[INTRDEVNAMEBUF];
   5141 
   5142 	if (sc->sc_nqueues < ncpu) {
   5143 		/*
   5144 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5145 		 * interrupts start from CPU#1.
   5146 		 */
   5147 		sc->sc_affinity_offset = 1;
   5148 	} else {
   5149 		/*
   5150 		 * In this case, this device use all CPUs. So, we unify
   5151 		 * affinitied cpu_index to msix vector number for readability.
   5152 		 */
   5153 		sc->sc_affinity_offset = 0;
   5154 	}
   5155 
   5156 	error = wm_alloc_txrx_queues(sc);
   5157 	if (error) {
   5158 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5159 		    error);
   5160 		return ENOMEM;
   5161 	}
   5162 
   5163 	kcpuset_create(&affinity, false);
   5164 	intr_idx = 0;
   5165 
   5166 	/*
   5167 	 * TX and RX
   5168 	 */
   5169 	txrx_established = 0;
   5170 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5171 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5172 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5173 
   5174 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5175 		    sizeof(intrbuf));
   5176 #ifdef WM_MPSAFE
   5177 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5178 		    PCI_INTR_MPSAFE, true);
   5179 #endif
   5180 		memset(intr_xname, 0, sizeof(intr_xname));
   5181 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5182 		    device_xname(sc->sc_dev), qidx);
   5183 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5184 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5185 		if (vih == NULL) {
   5186 			aprint_error_dev(sc->sc_dev,
   5187 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5188 			    intrstr ? " at " : "",
   5189 			    intrstr ? intrstr : "");
   5190 
   5191 			goto fail;
   5192 		}
   5193 		kcpuset_zero(affinity);
   5194 		/* Round-robin affinity */
   5195 		kcpuset_set(affinity, affinity_to);
   5196 		error = interrupt_distribute(vih, affinity, NULL);
   5197 		if (error == 0) {
   5198 			aprint_normal_dev(sc->sc_dev,
   5199 			    "for TX and RX interrupting at %s affinity to %u\n",
   5200 			    intrstr, affinity_to);
   5201 		} else {
   5202 			aprint_normal_dev(sc->sc_dev,
   5203 			    "for TX and RX interrupting at %s\n", intrstr);
   5204 		}
   5205 		sc->sc_ihs[intr_idx] = vih;
   5206 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5207 			goto fail;
   5208 		txrx_established++;
   5209 		intr_idx++;
   5210 	}
   5211 
   5212 	/*
   5213 	 * LINK
   5214 	 */
   5215 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5216 	    sizeof(intrbuf));
   5217 #ifdef WM_MPSAFE
   5218 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5219 #endif
   5220 	memset(intr_xname, 0, sizeof(intr_xname));
   5221 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5222 	    device_xname(sc->sc_dev));
   5223 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5224 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5225 	if (vih == NULL) {
   5226 		aprint_error_dev(sc->sc_dev,
   5227 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5228 		    intrstr ? " at " : "",
   5229 		    intrstr ? intrstr : "");
   5230 
   5231 		goto fail;
   5232 	}
   5233 	/* keep default affinity to LINK interrupt */
   5234 	aprint_normal_dev(sc->sc_dev,
   5235 	    "for LINK interrupting at %s\n", intrstr);
   5236 	sc->sc_ihs[intr_idx] = vih;
   5237 	sc->sc_link_intr_idx = intr_idx;
   5238 
   5239 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5240 	kcpuset_destroy(affinity);
   5241 	return 0;
   5242 
   5243  fail:
   5244 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5245 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5246 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5247 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5248 	}
   5249 
   5250 	kcpuset_destroy(affinity);
   5251 	return ENOMEM;
   5252 }
   5253 
   5254 static void
   5255 wm_unset_stopping_flags(struct wm_softc *sc)
   5256 {
   5257 	int i;
   5258 
   5259 	KASSERT(WM_CORE_LOCKED(sc));
   5260 
   5261 	/*
   5262 	 * must unset stopping flags in ascending order.
   5263 	 */
   5264 	for (i = 0; i < sc->sc_nqueues; i++) {
   5265 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5266 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5267 
   5268 		mutex_enter(txq->txq_lock);
   5269 		txq->txq_stopping = false;
   5270 		mutex_exit(txq->txq_lock);
   5271 
   5272 		mutex_enter(rxq->rxq_lock);
   5273 		rxq->rxq_stopping = false;
   5274 		mutex_exit(rxq->rxq_lock);
   5275 	}
   5276 
   5277 	sc->sc_core_stopping = false;
   5278 }
   5279 
   5280 static void
   5281 wm_set_stopping_flags(struct wm_softc *sc)
   5282 {
   5283 	int i;
   5284 
   5285 	KASSERT(WM_CORE_LOCKED(sc));
   5286 
   5287 	sc->sc_core_stopping = true;
   5288 
   5289 	/*
   5290 	 * must set stopping flags in ascending order.
   5291 	 */
   5292 	for (i = 0; i < sc->sc_nqueues; i++) {
   5293 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5294 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5295 
   5296 		mutex_enter(rxq->rxq_lock);
   5297 		rxq->rxq_stopping = true;
   5298 		mutex_exit(rxq->rxq_lock);
   5299 
   5300 		mutex_enter(txq->txq_lock);
   5301 		txq->txq_stopping = true;
   5302 		mutex_exit(txq->txq_lock);
   5303 	}
   5304 }
   5305 
   5306 /*
   5307  * write interrupt interval value to ITR or EITR
   5308  */
   5309 static void
   5310 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5311 {
   5312 
   5313 	if (!wmq->wmq_set_itr)
   5314 		return;
   5315 
   5316 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5317 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5318 
   5319 		/*
   5320 		 * 82575 doesn't have CNT_INGR field.
   5321 		 * So, overwrite counter field by software.
   5322 		 */
   5323 		if (sc->sc_type == WM_T_82575)
   5324 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5325 		else
   5326 			eitr |= EITR_CNT_INGR;
   5327 
   5328 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5329 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5330 		/*
   5331 		 * 82574 has both ITR and EITR. SET EITR when we use
   5332 		 * the multi queue function with MSI-X.
   5333 		 */
   5334 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5335 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5336 	} else {
   5337 		KASSERT(wmq->wmq_id == 0);
   5338 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5339 	}
   5340 
   5341 	wmq->wmq_set_itr = false;
   5342 }
   5343 
   5344 /*
   5345  * TODO
   5346  * Below dynamic calculation of itr is almost the same as linux igb,
   5347  * however it does not fit to wm(4). So, we will have been disable AIM
   5348  * until we will find appropriate calculation of itr.
   5349  */
   5350 /*
   5351  * calculate interrupt interval value to be going to write register in
   5352  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5353  */
   5354 static void
   5355 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5356 {
   5357 #ifdef NOTYET
   5358 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5359 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5360 	uint32_t avg_size = 0;
   5361 	uint32_t new_itr;
   5362 
   5363 	if (rxq->rxq_packets)
   5364 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5365 	if (txq->txq_packets)
   5366 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5367 
   5368 	if (avg_size == 0) {
   5369 		new_itr = 450; /* restore default value */
   5370 		goto out;
   5371 	}
   5372 
   5373 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5374 	avg_size += 24;
   5375 
   5376 	/* Don't starve jumbo frames */
   5377 	avg_size = uimin(avg_size, 3000);
   5378 
   5379 	/* Give a little boost to mid-size frames */
   5380 	if ((avg_size > 300) && (avg_size < 1200))
   5381 		new_itr = avg_size / 3;
   5382 	else
   5383 		new_itr = avg_size / 2;
   5384 
   5385 out:
   5386 	/*
   5387 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5388 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5389 	 */
   5390 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5391 		new_itr *= 4;
   5392 
   5393 	if (new_itr != wmq->wmq_itr) {
   5394 		wmq->wmq_itr = new_itr;
   5395 		wmq->wmq_set_itr = true;
   5396 	} else
   5397 		wmq->wmq_set_itr = false;
   5398 
   5399 	rxq->rxq_packets = 0;
   5400 	rxq->rxq_bytes = 0;
   5401 	txq->txq_packets = 0;
   5402 	txq->txq_bytes = 0;
   5403 #endif
   5404 }
   5405 
   5406 /*
   5407  * wm_init:		[ifnet interface function]
   5408  *
   5409  *	Initialize the interface.
   5410  */
   5411 static int
   5412 wm_init(struct ifnet *ifp)
   5413 {
   5414 	struct wm_softc *sc = ifp->if_softc;
   5415 	int ret;
   5416 
   5417 	WM_CORE_LOCK(sc);
   5418 	ret = wm_init_locked(ifp);
   5419 	WM_CORE_UNLOCK(sc);
   5420 
   5421 	return ret;
   5422 }
   5423 
   5424 static int
   5425 wm_init_locked(struct ifnet *ifp)
   5426 {
   5427 	struct wm_softc *sc = ifp->if_softc;
   5428 	int i, j, trynum, error = 0;
   5429 	uint32_t reg;
   5430 
   5431 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5432 		device_xname(sc->sc_dev), __func__));
   5433 	KASSERT(WM_CORE_LOCKED(sc));
   5434 
   5435 	/*
   5436 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5437 	 * There is a small but measurable benefit to avoiding the adjusment
   5438 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5439 	 * on such platforms.  One possibility is that the DMA itself is
   5440 	 * slightly more efficient if the front of the entire packet (instead
   5441 	 * of the front of the headers) is aligned.
   5442 	 *
   5443 	 * Note we must always set align_tweak to 0 if we are using
   5444 	 * jumbo frames.
   5445 	 */
   5446 #ifdef __NO_STRICT_ALIGNMENT
   5447 	sc->sc_align_tweak = 0;
   5448 #else
   5449 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5450 		sc->sc_align_tweak = 0;
   5451 	else
   5452 		sc->sc_align_tweak = 2;
   5453 #endif /* __NO_STRICT_ALIGNMENT */
   5454 
   5455 	/* Cancel any pending I/O. */
   5456 	wm_stop_locked(ifp, 0);
   5457 
   5458 	/* update statistics before reset */
   5459 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5460 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5461 
   5462 	/* PCH_SPT hardware workaround */
   5463 	if (sc->sc_type == WM_T_PCH_SPT)
   5464 		wm_flush_desc_rings(sc);
   5465 
   5466 	/* Reset the chip to a known state. */
   5467 	wm_reset(sc);
   5468 
   5469 	/*
   5470 	 * AMT based hardware can now take control from firmware
   5471 	 * Do this after reset.
   5472 	 */
   5473 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5474 		wm_get_hw_control(sc);
   5475 
   5476 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5477 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5478 		wm_legacy_irq_quirk_spt(sc);
   5479 
   5480 	/* Init hardware bits */
   5481 	wm_initialize_hardware_bits(sc);
   5482 
   5483 	/* Reset the PHY. */
   5484 	if (sc->sc_flags & WM_F_HAS_MII)
   5485 		wm_gmii_reset(sc);
   5486 
   5487 	/* Calculate (E)ITR value */
   5488 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5489 		/*
   5490 		 * For NEWQUEUE's EITR (except for 82575).
   5491 		 * 82575's EITR should be set same throttling value as other
   5492 		 * old controllers' ITR because the interrupt/sec calculation
   5493 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5494 		 *
   5495 		 * 82574's EITR should be set same throttling value as ITR.
   5496 		 *
   5497 		 * For N interrupts/sec, set this value to:
   5498 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5499 		 */
   5500 		sc->sc_itr_init = 450;
   5501 	} else if (sc->sc_type >= WM_T_82543) {
   5502 		/*
   5503 		 * Set up the interrupt throttling register (units of 256ns)
   5504 		 * Note that a footnote in Intel's documentation says this
   5505 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5506 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5507 		 * that that is also true for the 1024ns units of the other
   5508 		 * interrupt-related timer registers -- so, really, we ought
   5509 		 * to divide this value by 4 when the link speed is low.
   5510 		 *
   5511 		 * XXX implement this division at link speed change!
   5512 		 */
   5513 
   5514 		/*
   5515 		 * For N interrupts/sec, set this value to:
   5516 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5517 		 * absolute and packet timer values to this value
   5518 		 * divided by 4 to get "simple timer" behavior.
   5519 		 */
   5520 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5521 	}
   5522 
   5523 	error = wm_init_txrx_queues(sc);
   5524 	if (error)
   5525 		goto out;
   5526 
   5527 	/*
   5528 	 * Clear out the VLAN table -- we don't use it (yet).
   5529 	 */
   5530 	CSR_WRITE(sc, WMREG_VET, 0);
   5531 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5532 		trynum = 10; /* Due to hw errata */
   5533 	else
   5534 		trynum = 1;
   5535 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5536 		for (j = 0; j < trynum; j++)
   5537 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5538 
   5539 	/*
   5540 	 * Set up flow-control parameters.
   5541 	 *
   5542 	 * XXX Values could probably stand some tuning.
   5543 	 */
   5544 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5545 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5546 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5547 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5548 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5549 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5550 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5551 	}
   5552 
   5553 	sc->sc_fcrtl = FCRTL_DFLT;
   5554 	if (sc->sc_type < WM_T_82543) {
   5555 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5556 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5557 	} else {
   5558 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5559 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5560 	}
   5561 
   5562 	if (sc->sc_type == WM_T_80003)
   5563 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5564 	else
   5565 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5566 
   5567 	/* Writes the control register. */
   5568 	wm_set_vlan(sc);
   5569 
   5570 	if (sc->sc_flags & WM_F_HAS_MII) {
   5571 		uint16_t kmreg;
   5572 
   5573 		switch (sc->sc_type) {
   5574 		case WM_T_80003:
   5575 		case WM_T_ICH8:
   5576 		case WM_T_ICH9:
   5577 		case WM_T_ICH10:
   5578 		case WM_T_PCH:
   5579 		case WM_T_PCH2:
   5580 		case WM_T_PCH_LPT:
   5581 		case WM_T_PCH_SPT:
   5582 		case WM_T_PCH_CNP:
   5583 			/*
   5584 			 * Set the mac to wait the maximum time between each
   5585 			 * iteration and increase the max iterations when
   5586 			 * polling the phy; this fixes erroneous timeouts at
   5587 			 * 10Mbps.
   5588 			 */
   5589 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5590 			    0xFFFF);
   5591 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5592 			    &kmreg);
   5593 			kmreg |= 0x3F;
   5594 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5595 			    kmreg);
   5596 			break;
   5597 		default:
   5598 			break;
   5599 		}
   5600 
   5601 		if (sc->sc_type == WM_T_80003) {
   5602 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5603 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5604 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5605 
   5606 			/* Bypass RX and TX FIFO's */
   5607 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5608 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5609 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5610 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5611 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5612 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5613 		}
   5614 	}
   5615 #if 0
   5616 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5617 #endif
   5618 
   5619 	/* Set up checksum offload parameters. */
   5620 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5621 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5622 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5623 		reg |= RXCSUM_IPOFL;
   5624 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5625 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5626 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5627 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5628 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5629 
   5630 	/* Set registers about MSI-X */
   5631 	if (wm_is_using_msix(sc)) {
   5632 		uint32_t ivar;
   5633 		struct wm_queue *wmq;
   5634 		int qid, qintr_idx;
   5635 
   5636 		if (sc->sc_type == WM_T_82575) {
   5637 			/* Interrupt control */
   5638 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5639 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5640 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5641 
   5642 			/* TX and RX */
   5643 			for (i = 0; i < sc->sc_nqueues; i++) {
   5644 				wmq = &sc->sc_queue[i];
   5645 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5646 				    EITR_TX_QUEUE(wmq->wmq_id)
   5647 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5648 			}
   5649 			/* Link status */
   5650 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5651 			    EITR_OTHER);
   5652 		} else if (sc->sc_type == WM_T_82574) {
   5653 			/* Interrupt control */
   5654 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5655 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5656 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5657 
   5658 			/*
   5659 			 * workaround issue with spurious interrupts
   5660 			 * in MSI-X mode.
   5661 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5662 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5663 			 */
   5664 			reg = CSR_READ(sc, WMREG_RFCTL);
   5665 			reg |= WMREG_RFCTL_ACKDIS;
   5666 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5667 
   5668 			ivar = 0;
   5669 			/* TX and RX */
   5670 			for (i = 0; i < sc->sc_nqueues; i++) {
   5671 				wmq = &sc->sc_queue[i];
   5672 				qid = wmq->wmq_id;
   5673 				qintr_idx = wmq->wmq_intr_idx;
   5674 
   5675 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5676 				    IVAR_TX_MASK_Q_82574(qid));
   5677 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5678 				    IVAR_RX_MASK_Q_82574(qid));
   5679 			}
   5680 			/* Link status */
   5681 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5682 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5683 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5684 		} else {
   5685 			/* Interrupt control */
   5686 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5687 			    | GPIE_EIAME | GPIE_PBA);
   5688 
   5689 			switch (sc->sc_type) {
   5690 			case WM_T_82580:
   5691 			case WM_T_I350:
   5692 			case WM_T_I354:
   5693 			case WM_T_I210:
   5694 			case WM_T_I211:
   5695 				/* TX and RX */
   5696 				for (i = 0; i < sc->sc_nqueues; i++) {
   5697 					wmq = &sc->sc_queue[i];
   5698 					qid = wmq->wmq_id;
   5699 					qintr_idx = wmq->wmq_intr_idx;
   5700 
   5701 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5702 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5703 					ivar |= __SHIFTIN((qintr_idx
   5704 						| IVAR_VALID),
   5705 					    IVAR_TX_MASK_Q(qid));
   5706 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5707 					ivar |= __SHIFTIN((qintr_idx
   5708 						| IVAR_VALID),
   5709 					    IVAR_RX_MASK_Q(qid));
   5710 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5711 				}
   5712 				break;
   5713 			case WM_T_82576:
   5714 				/* TX and RX */
   5715 				for (i = 0; i < sc->sc_nqueues; i++) {
   5716 					wmq = &sc->sc_queue[i];
   5717 					qid = wmq->wmq_id;
   5718 					qintr_idx = wmq->wmq_intr_idx;
   5719 
   5720 					ivar = CSR_READ(sc,
   5721 					    WMREG_IVAR_Q_82576(qid));
   5722 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5723 					ivar |= __SHIFTIN((qintr_idx
   5724 						| IVAR_VALID),
   5725 					    IVAR_TX_MASK_Q_82576(qid));
   5726 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5727 					ivar |= __SHIFTIN((qintr_idx
   5728 						| IVAR_VALID),
   5729 					    IVAR_RX_MASK_Q_82576(qid));
   5730 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5731 					    ivar);
   5732 				}
   5733 				break;
   5734 			default:
   5735 				break;
   5736 			}
   5737 
   5738 			/* Link status */
   5739 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5740 			    IVAR_MISC_OTHER);
   5741 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5742 		}
   5743 
   5744 		if (wm_is_using_multiqueue(sc)) {
   5745 			wm_init_rss(sc);
   5746 
   5747 			/*
   5748 			** NOTE: Receive Full-Packet Checksum Offload
   5749 			** is mutually exclusive with Multiqueue. However
   5750 			** this is not the same as TCP/IP checksums which
   5751 			** still work.
   5752 			*/
   5753 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5754 			reg |= RXCSUM_PCSD;
   5755 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5756 		}
   5757 	}
   5758 
   5759 	/* Set up the interrupt registers. */
   5760 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5761 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5762 	    ICR_RXO | ICR_RXT0;
   5763 	if (wm_is_using_msix(sc)) {
   5764 		uint32_t mask;
   5765 		struct wm_queue *wmq;
   5766 
   5767 		switch (sc->sc_type) {
   5768 		case WM_T_82574:
   5769 			mask = 0;
   5770 			for (i = 0; i < sc->sc_nqueues; i++) {
   5771 				wmq = &sc->sc_queue[i];
   5772 				mask |= ICR_TXQ(wmq->wmq_id);
   5773 				mask |= ICR_RXQ(wmq->wmq_id);
   5774 			}
   5775 			mask |= ICR_OTHER;
   5776 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5777 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5778 			break;
   5779 		default:
   5780 			if (sc->sc_type == WM_T_82575) {
   5781 				mask = 0;
   5782 				for (i = 0; i < sc->sc_nqueues; i++) {
   5783 					wmq = &sc->sc_queue[i];
   5784 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5785 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5786 				}
   5787 				mask |= EITR_OTHER;
   5788 			} else {
   5789 				mask = 0;
   5790 				for (i = 0; i < sc->sc_nqueues; i++) {
   5791 					wmq = &sc->sc_queue[i];
   5792 					mask |= 1 << wmq->wmq_intr_idx;
   5793 				}
   5794 				mask |= 1 << sc->sc_link_intr_idx;
   5795 			}
   5796 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5797 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5798 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5799 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5800 			break;
   5801 		}
   5802 	} else
   5803 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5804 
   5805 	/* Set up the inter-packet gap. */
   5806 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5807 
   5808 	if (sc->sc_type >= WM_T_82543) {
   5809 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5810 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5811 			wm_itrs_writereg(sc, wmq);
   5812 		}
   5813 		/*
   5814 		 * Link interrupts occur much less than TX
   5815 		 * interrupts and RX interrupts. So, we don't
   5816 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5817 		 * FreeBSD's if_igb.
   5818 		 */
   5819 	}
   5820 
   5821 	/* Set the VLAN ethernetype. */
   5822 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5823 
   5824 	/*
   5825 	 * Set up the transmit control register; we start out with
   5826 	 * a collision distance suitable for FDX, but update it whe
   5827 	 * we resolve the media type.
   5828 	 */
   5829 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5830 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5831 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5832 	if (sc->sc_type >= WM_T_82571)
   5833 		sc->sc_tctl |= TCTL_MULR;
   5834 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5835 
   5836 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5837 		/* Write TDT after TCTL.EN is set. See the document. */
   5838 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5839 	}
   5840 
   5841 	if (sc->sc_type == WM_T_80003) {
   5842 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5843 		reg &= ~TCTL_EXT_GCEX_MASK;
   5844 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5845 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5846 	}
   5847 
   5848 	/* Set the media. */
   5849 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5850 		goto out;
   5851 
   5852 	/* Configure for OS presence */
   5853 	wm_init_manageability(sc);
   5854 
   5855 	/*
   5856 	 * Set up the receive control register; we actually program the
   5857 	 * register when we set the receive filter. Use multicast address
   5858 	 * offset type 0.
   5859 	 *
   5860 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   5861 	 * don't enable that feature.
   5862 	 */
   5863 	sc->sc_mchash_type = 0;
   5864 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5865 	    | RCTL_MO(sc->sc_mchash_type);
   5866 
   5867 	/*
   5868 	 * 82574 use one buffer extended Rx descriptor.
   5869 	 */
   5870 	if (sc->sc_type == WM_T_82574)
   5871 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5872 
   5873 	/*
   5874 	 * The I350 has a bug where it always strips the CRC whether
   5875 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5876 	 */
   5877 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5878 	    || (sc->sc_type == WM_T_I210))
   5879 		sc->sc_rctl |= RCTL_SECRC;
   5880 
   5881 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5882 	    && (ifp->if_mtu > ETHERMTU)) {
   5883 		sc->sc_rctl |= RCTL_LPE;
   5884 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5885 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5886 	}
   5887 
   5888 	if (MCLBYTES == 2048) {
   5889 		sc->sc_rctl |= RCTL_2k;
   5890 	} else {
   5891 		if (sc->sc_type >= WM_T_82543) {
   5892 			switch (MCLBYTES) {
   5893 			case 4096:
   5894 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5895 				break;
   5896 			case 8192:
   5897 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5898 				break;
   5899 			case 16384:
   5900 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5901 				break;
   5902 			default:
   5903 				panic("wm_init: MCLBYTES %d unsupported",
   5904 				    MCLBYTES);
   5905 				break;
   5906 			}
   5907 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5908 	}
   5909 
   5910 	/* Enable ECC */
   5911 	switch (sc->sc_type) {
   5912 	case WM_T_82571:
   5913 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5914 		reg |= PBA_ECC_CORR_EN;
   5915 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5916 		break;
   5917 	case WM_T_PCH_LPT:
   5918 	case WM_T_PCH_SPT:
   5919 	case WM_T_PCH_CNP:
   5920 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5921 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5922 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5923 
   5924 		sc->sc_ctrl |= CTRL_MEHE;
   5925 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5926 		break;
   5927 	default:
   5928 		break;
   5929 	}
   5930 
   5931 	/*
   5932 	 * Set the receive filter.
   5933 	 *
   5934 	 * For 82575 and 82576, the RX descriptors must be initialized after
   5935 	 * the setting of RCTL.EN in wm_set_filter()
   5936 	 */
   5937 	wm_set_filter(sc);
   5938 
   5939 	/* On 575 and later set RDT only if RX enabled */
   5940 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5941 		int qidx;
   5942 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5943 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5944 			for (i = 0; i < WM_NRXDESC; i++) {
   5945 				mutex_enter(rxq->rxq_lock);
   5946 				wm_init_rxdesc(rxq, i);
   5947 				mutex_exit(rxq->rxq_lock);
   5948 
   5949 			}
   5950 		}
   5951 	}
   5952 
   5953 	wm_unset_stopping_flags(sc);
   5954 
   5955 	/* Start the one second link check clock. */
   5956 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5957 
   5958 	/* ...all done! */
   5959 	ifp->if_flags |= IFF_RUNNING;
   5960 	ifp->if_flags &= ~IFF_OACTIVE;
   5961 
   5962  out:
   5963 	sc->sc_if_flags = ifp->if_flags;
   5964 	if (error)
   5965 		log(LOG_ERR, "%s: interface not running\n",
   5966 		    device_xname(sc->sc_dev));
   5967 	return error;
   5968 }
   5969 
   5970 /*
   5971  * wm_stop:		[ifnet interface function]
   5972  *
   5973  *	Stop transmission on the interface.
   5974  */
   5975 static void
   5976 wm_stop(struct ifnet *ifp, int disable)
   5977 {
   5978 	struct wm_softc *sc = ifp->if_softc;
   5979 
   5980 	WM_CORE_LOCK(sc);
   5981 	wm_stop_locked(ifp, disable);
   5982 	WM_CORE_UNLOCK(sc);
   5983 }
   5984 
   5985 static void
   5986 wm_stop_locked(struct ifnet *ifp, int disable)
   5987 {
   5988 	struct wm_softc *sc = ifp->if_softc;
   5989 	struct wm_txsoft *txs;
   5990 	int i, qidx;
   5991 
   5992 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5993 		device_xname(sc->sc_dev), __func__));
   5994 	KASSERT(WM_CORE_LOCKED(sc));
   5995 
   5996 	wm_set_stopping_flags(sc);
   5997 
   5998 	/* Stop the one second clock. */
   5999 	callout_stop(&sc->sc_tick_ch);
   6000 
   6001 	/* Stop the 82547 Tx FIFO stall check timer. */
   6002 	if (sc->sc_type == WM_T_82547)
   6003 		callout_stop(&sc->sc_txfifo_ch);
   6004 
   6005 	if (sc->sc_flags & WM_F_HAS_MII) {
   6006 		/* Down the MII. */
   6007 		mii_down(&sc->sc_mii);
   6008 	} else {
   6009 #if 0
   6010 		/* Should we clear PHY's status properly? */
   6011 		wm_reset(sc);
   6012 #endif
   6013 	}
   6014 
   6015 	/* Stop the transmit and receive processes. */
   6016 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6017 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6018 	sc->sc_rctl &= ~RCTL_EN;
   6019 
   6020 	/*
   6021 	 * Clear the interrupt mask to ensure the device cannot assert its
   6022 	 * interrupt line.
   6023 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6024 	 * service any currently pending or shared interrupt.
   6025 	 */
   6026 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6027 	sc->sc_icr = 0;
   6028 	if (wm_is_using_msix(sc)) {
   6029 		if (sc->sc_type != WM_T_82574) {
   6030 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6031 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6032 		} else
   6033 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6034 	}
   6035 
   6036 	/* Release any queued transmit buffers. */
   6037 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6038 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6039 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6040 		mutex_enter(txq->txq_lock);
   6041 		txq->txq_sending = false; /* ensure watchdog disabled */
   6042 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6043 			txs = &txq->txq_soft[i];
   6044 			if (txs->txs_mbuf != NULL) {
   6045 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6046 				m_freem(txs->txs_mbuf);
   6047 				txs->txs_mbuf = NULL;
   6048 			}
   6049 		}
   6050 		mutex_exit(txq->txq_lock);
   6051 	}
   6052 
   6053 	/* Mark the interface as down and cancel the watchdog timer. */
   6054 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6055 
   6056 	if (disable) {
   6057 		for (i = 0; i < sc->sc_nqueues; i++) {
   6058 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6059 			mutex_enter(rxq->rxq_lock);
   6060 			wm_rxdrain(rxq);
   6061 			mutex_exit(rxq->rxq_lock);
   6062 		}
   6063 	}
   6064 
   6065 #if 0 /* notyet */
   6066 	if (sc->sc_type >= WM_T_82544)
   6067 		CSR_WRITE(sc, WMREG_WUC, 0);
   6068 #endif
   6069 }
   6070 
   6071 static void
   6072 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6073 {
   6074 	struct mbuf *m;
   6075 	int i;
   6076 
   6077 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6078 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6079 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6080 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6081 		    m->m_data, m->m_len, m->m_flags);
   6082 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6083 	    i, i == 1 ? "" : "s");
   6084 }
   6085 
   6086 /*
   6087  * wm_82547_txfifo_stall:
   6088  *
   6089  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6090  *	reset the FIFO pointers, and restart packet transmission.
   6091  */
   6092 static void
   6093 wm_82547_txfifo_stall(void *arg)
   6094 {
   6095 	struct wm_softc *sc = arg;
   6096 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6097 
   6098 	mutex_enter(txq->txq_lock);
   6099 
   6100 	if (txq->txq_stopping)
   6101 		goto out;
   6102 
   6103 	if (txq->txq_fifo_stall) {
   6104 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6105 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6106 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6107 			/*
   6108 			 * Packets have drained.  Stop transmitter, reset
   6109 			 * FIFO pointers, restart transmitter, and kick
   6110 			 * the packet queue.
   6111 			 */
   6112 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6113 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6114 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6115 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6116 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6117 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6118 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6119 			CSR_WRITE_FLUSH(sc);
   6120 
   6121 			txq->txq_fifo_head = 0;
   6122 			txq->txq_fifo_stall = 0;
   6123 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6124 		} else {
   6125 			/*
   6126 			 * Still waiting for packets to drain; try again in
   6127 			 * another tick.
   6128 			 */
   6129 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6130 		}
   6131 	}
   6132 
   6133 out:
   6134 	mutex_exit(txq->txq_lock);
   6135 }
   6136 
   6137 /*
   6138  * wm_82547_txfifo_bugchk:
   6139  *
   6140  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6141  *	prevent enqueueing a packet that would wrap around the end
   6142  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6143  *
   6144  *	We do this by checking the amount of space before the end
   6145  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6146  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6147  *	the internal FIFO pointers to the beginning, and restart
   6148  *	transmission on the interface.
   6149  */
   6150 #define	WM_FIFO_HDR		0x10
   6151 #define	WM_82547_PAD_LEN	0x3e0
   6152 static int
   6153 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6154 {
   6155 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6156 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6157 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6158 
   6159 	/* Just return if already stalled. */
   6160 	if (txq->txq_fifo_stall)
   6161 		return 1;
   6162 
   6163 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6164 		/* Stall only occurs in half-duplex mode. */
   6165 		goto send_packet;
   6166 	}
   6167 
   6168 	if (len >= WM_82547_PAD_LEN + space) {
   6169 		txq->txq_fifo_stall = 1;
   6170 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6171 		return 1;
   6172 	}
   6173 
   6174  send_packet:
   6175 	txq->txq_fifo_head += len;
   6176 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6177 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6178 
   6179 	return 0;
   6180 }
   6181 
   6182 static int
   6183 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6184 {
   6185 	int error;
   6186 
   6187 	/*
   6188 	 * Allocate the control data structures, and create and load the
   6189 	 * DMA map for it.
   6190 	 *
   6191 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6192 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6193 	 * both sets within the same 4G segment.
   6194 	 */
   6195 	if (sc->sc_type < WM_T_82544)
   6196 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6197 	else
   6198 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6199 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6200 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6201 	else
   6202 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6203 
   6204 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6205 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6206 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6207 		aprint_error_dev(sc->sc_dev,
   6208 		    "unable to allocate TX control data, error = %d\n",
   6209 		    error);
   6210 		goto fail_0;
   6211 	}
   6212 
   6213 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6214 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6215 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6216 		aprint_error_dev(sc->sc_dev,
   6217 		    "unable to map TX control data, error = %d\n", error);
   6218 		goto fail_1;
   6219 	}
   6220 
   6221 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6222 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6223 		aprint_error_dev(sc->sc_dev,
   6224 		    "unable to create TX control data DMA map, error = %d\n",
   6225 		    error);
   6226 		goto fail_2;
   6227 	}
   6228 
   6229 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6230 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6231 		aprint_error_dev(sc->sc_dev,
   6232 		    "unable to load TX control data DMA map, error = %d\n",
   6233 		    error);
   6234 		goto fail_3;
   6235 	}
   6236 
   6237 	return 0;
   6238 
   6239  fail_3:
   6240 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6241  fail_2:
   6242 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6243 	    WM_TXDESCS_SIZE(txq));
   6244  fail_1:
   6245 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6246  fail_0:
   6247 	return error;
   6248 }
   6249 
   6250 static void
   6251 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6252 {
   6253 
   6254 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6255 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6256 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6257 	    WM_TXDESCS_SIZE(txq));
   6258 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6259 }
   6260 
   6261 static int
   6262 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6263 {
   6264 	int error;
   6265 	size_t rxq_descs_size;
   6266 
   6267 	/*
   6268 	 * Allocate the control data structures, and create and load the
   6269 	 * DMA map for it.
   6270 	 *
   6271 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6272 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6273 	 * both sets within the same 4G segment.
   6274 	 */
   6275 	rxq->rxq_ndesc = WM_NRXDESC;
   6276 	if (sc->sc_type == WM_T_82574)
   6277 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6278 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6279 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6280 	else
   6281 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6282 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6283 
   6284 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6285 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6286 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6287 		aprint_error_dev(sc->sc_dev,
   6288 		    "unable to allocate RX control data, error = %d\n",
   6289 		    error);
   6290 		goto fail_0;
   6291 	}
   6292 
   6293 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6294 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6295 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6296 		aprint_error_dev(sc->sc_dev,
   6297 		    "unable to map RX control data, error = %d\n", error);
   6298 		goto fail_1;
   6299 	}
   6300 
   6301 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6302 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6303 		aprint_error_dev(sc->sc_dev,
   6304 		    "unable to create RX control data DMA map, error = %d\n",
   6305 		    error);
   6306 		goto fail_2;
   6307 	}
   6308 
   6309 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6310 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6311 		aprint_error_dev(sc->sc_dev,
   6312 		    "unable to load RX control data DMA map, error = %d\n",
   6313 		    error);
   6314 		goto fail_3;
   6315 	}
   6316 
   6317 	return 0;
   6318 
   6319  fail_3:
   6320 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6321  fail_2:
   6322 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6323 	    rxq_descs_size);
   6324  fail_1:
   6325 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6326  fail_0:
   6327 	return error;
   6328 }
   6329 
   6330 static void
   6331 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6332 {
   6333 
   6334 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6335 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6336 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6337 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6338 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6339 }
   6340 
   6341 
   6342 static int
   6343 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6344 {
   6345 	int i, error;
   6346 
   6347 	/* Create the transmit buffer DMA maps. */
   6348 	WM_TXQUEUELEN(txq) =
   6349 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6350 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6351 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6352 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6353 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6354 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6355 			aprint_error_dev(sc->sc_dev,
   6356 			    "unable to create Tx DMA map %d, error = %d\n",
   6357 			    i, error);
   6358 			goto fail;
   6359 		}
   6360 	}
   6361 
   6362 	return 0;
   6363 
   6364  fail:
   6365 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6366 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6367 			bus_dmamap_destroy(sc->sc_dmat,
   6368 			    txq->txq_soft[i].txs_dmamap);
   6369 	}
   6370 	return error;
   6371 }
   6372 
   6373 static void
   6374 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6375 {
   6376 	int i;
   6377 
   6378 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6379 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6380 			bus_dmamap_destroy(sc->sc_dmat,
   6381 			    txq->txq_soft[i].txs_dmamap);
   6382 	}
   6383 }
   6384 
   6385 static int
   6386 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6387 {
   6388 	int i, error;
   6389 
   6390 	/* Create the receive buffer DMA maps. */
   6391 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6392 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6393 			    MCLBYTES, 0, 0,
   6394 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6395 			aprint_error_dev(sc->sc_dev,
   6396 			    "unable to create Rx DMA map %d error = %d\n",
   6397 			    i, error);
   6398 			goto fail;
   6399 		}
   6400 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6401 	}
   6402 
   6403 	return 0;
   6404 
   6405  fail:
   6406 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6407 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6408 			bus_dmamap_destroy(sc->sc_dmat,
   6409 			    rxq->rxq_soft[i].rxs_dmamap);
   6410 	}
   6411 	return error;
   6412 }
   6413 
   6414 static void
   6415 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6416 {
   6417 	int i;
   6418 
   6419 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6420 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6421 			bus_dmamap_destroy(sc->sc_dmat,
   6422 			    rxq->rxq_soft[i].rxs_dmamap);
   6423 	}
   6424 }
   6425 
   6426 /*
   6427  * wm_alloc_quques:
   6428  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6429  */
   6430 static int
   6431 wm_alloc_txrx_queues(struct wm_softc *sc)
   6432 {
   6433 	int i, error, tx_done, rx_done;
   6434 
   6435 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6436 	    KM_SLEEP);
   6437 	if (sc->sc_queue == NULL) {
   6438 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6439 		error = ENOMEM;
   6440 		goto fail_0;
   6441 	}
   6442 
   6443 	/*
   6444 	 * For transmission
   6445 	 */
   6446 	error = 0;
   6447 	tx_done = 0;
   6448 	for (i = 0; i < sc->sc_nqueues; i++) {
   6449 #ifdef WM_EVENT_COUNTERS
   6450 		int j;
   6451 		const char *xname;
   6452 #endif
   6453 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6454 		txq->txq_sc = sc;
   6455 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6456 
   6457 		error = wm_alloc_tx_descs(sc, txq);
   6458 		if (error)
   6459 			break;
   6460 		error = wm_alloc_tx_buffer(sc, txq);
   6461 		if (error) {
   6462 			wm_free_tx_descs(sc, txq);
   6463 			break;
   6464 		}
   6465 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6466 		if (txq->txq_interq == NULL) {
   6467 			wm_free_tx_descs(sc, txq);
   6468 			wm_free_tx_buffer(sc, txq);
   6469 			error = ENOMEM;
   6470 			break;
   6471 		}
   6472 
   6473 #ifdef WM_EVENT_COUNTERS
   6474 		xname = device_xname(sc->sc_dev);
   6475 
   6476 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6477 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6478 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6479 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6480 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6481 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6482 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6483 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6484 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6485 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6486 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6487 
   6488 		for (j = 0; j < WM_NTXSEGS; j++) {
   6489 			snprintf(txq->txq_txseg_evcnt_names[j],
   6490 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6491 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6492 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6493 		}
   6494 
   6495 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6496 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6497 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6498 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6499 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6500 #endif /* WM_EVENT_COUNTERS */
   6501 
   6502 		tx_done++;
   6503 	}
   6504 	if (error)
   6505 		goto fail_1;
   6506 
   6507 	/*
   6508 	 * For recieve
   6509 	 */
   6510 	error = 0;
   6511 	rx_done = 0;
   6512 	for (i = 0; i < sc->sc_nqueues; i++) {
   6513 #ifdef WM_EVENT_COUNTERS
   6514 		const char *xname;
   6515 #endif
   6516 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6517 		rxq->rxq_sc = sc;
   6518 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6519 
   6520 		error = wm_alloc_rx_descs(sc, rxq);
   6521 		if (error)
   6522 			break;
   6523 
   6524 		error = wm_alloc_rx_buffer(sc, rxq);
   6525 		if (error) {
   6526 			wm_free_rx_descs(sc, rxq);
   6527 			break;
   6528 		}
   6529 
   6530 #ifdef WM_EVENT_COUNTERS
   6531 		xname = device_xname(sc->sc_dev);
   6532 
   6533 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6534 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6535 
   6536 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6537 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6538 #endif /* WM_EVENT_COUNTERS */
   6539 
   6540 		rx_done++;
   6541 	}
   6542 	if (error)
   6543 		goto fail_2;
   6544 
   6545 	return 0;
   6546 
   6547  fail_2:
   6548 	for (i = 0; i < rx_done; i++) {
   6549 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6550 		wm_free_rx_buffer(sc, rxq);
   6551 		wm_free_rx_descs(sc, rxq);
   6552 		if (rxq->rxq_lock)
   6553 			mutex_obj_free(rxq->rxq_lock);
   6554 	}
   6555  fail_1:
   6556 	for (i = 0; i < tx_done; i++) {
   6557 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6558 		pcq_destroy(txq->txq_interq);
   6559 		wm_free_tx_buffer(sc, txq);
   6560 		wm_free_tx_descs(sc, txq);
   6561 		if (txq->txq_lock)
   6562 			mutex_obj_free(txq->txq_lock);
   6563 	}
   6564 
   6565 	kmem_free(sc->sc_queue,
   6566 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6567  fail_0:
   6568 	return error;
   6569 }
   6570 
   6571 /*
   6572  * wm_free_quques:
   6573  *	Free {tx,rx}descs and {tx,rx} buffers
   6574  */
   6575 static void
   6576 wm_free_txrx_queues(struct wm_softc *sc)
   6577 {
   6578 	int i;
   6579 
   6580 	for (i = 0; i < sc->sc_nqueues; i++) {
   6581 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6582 
   6583 #ifdef WM_EVENT_COUNTERS
   6584 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6585 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6586 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6587 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6588 #endif /* WM_EVENT_COUNTERS */
   6589 
   6590 		wm_free_rx_buffer(sc, rxq);
   6591 		wm_free_rx_descs(sc, rxq);
   6592 		if (rxq->rxq_lock)
   6593 			mutex_obj_free(rxq->rxq_lock);
   6594 	}
   6595 
   6596 	for (i = 0; i < sc->sc_nqueues; i++) {
   6597 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6598 		struct mbuf *m;
   6599 #ifdef WM_EVENT_COUNTERS
   6600 		int j;
   6601 
   6602 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6603 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6604 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6605 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6606 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6607 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6608 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6609 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6610 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6611 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6612 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6613 
   6614 		for (j = 0; j < WM_NTXSEGS; j++)
   6615 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6616 
   6617 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6618 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6619 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6620 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6621 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6622 #endif /* WM_EVENT_COUNTERS */
   6623 
   6624 		/* drain txq_interq */
   6625 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6626 			m_freem(m);
   6627 		pcq_destroy(txq->txq_interq);
   6628 
   6629 		wm_free_tx_buffer(sc, txq);
   6630 		wm_free_tx_descs(sc, txq);
   6631 		if (txq->txq_lock)
   6632 			mutex_obj_free(txq->txq_lock);
   6633 	}
   6634 
   6635 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6636 }
   6637 
   6638 static void
   6639 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6640 {
   6641 
   6642 	KASSERT(mutex_owned(txq->txq_lock));
   6643 
   6644 	/* Initialize the transmit descriptor ring. */
   6645 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6646 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6647 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6648 	txq->txq_free = WM_NTXDESC(txq);
   6649 	txq->txq_next = 0;
   6650 }
   6651 
   6652 static void
   6653 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6654     struct wm_txqueue *txq)
   6655 {
   6656 
   6657 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6658 		device_xname(sc->sc_dev), __func__));
   6659 	KASSERT(mutex_owned(txq->txq_lock));
   6660 
   6661 	if (sc->sc_type < WM_T_82543) {
   6662 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6663 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6664 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6665 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6666 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6667 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6668 	} else {
   6669 		int qid = wmq->wmq_id;
   6670 
   6671 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6672 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6673 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6674 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6675 
   6676 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6677 			/*
   6678 			 * Don't write TDT before TCTL.EN is set.
   6679 			 * See the document.
   6680 			 */
   6681 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6682 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6683 			    | TXDCTL_WTHRESH(0));
   6684 		else {
   6685 			/* XXX should update with AIM? */
   6686 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6687 			if (sc->sc_type >= WM_T_82540) {
   6688 				/* should be same */
   6689 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6690 			}
   6691 
   6692 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6693 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6694 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6695 		}
   6696 	}
   6697 }
   6698 
   6699 static void
   6700 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6701 {
   6702 	int i;
   6703 
   6704 	KASSERT(mutex_owned(txq->txq_lock));
   6705 
   6706 	/* Initialize the transmit job descriptors. */
   6707 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6708 		txq->txq_soft[i].txs_mbuf = NULL;
   6709 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6710 	txq->txq_snext = 0;
   6711 	txq->txq_sdirty = 0;
   6712 }
   6713 
   6714 static void
   6715 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6716     struct wm_txqueue *txq)
   6717 {
   6718 
   6719 	KASSERT(mutex_owned(txq->txq_lock));
   6720 
   6721 	/*
   6722 	 * Set up some register offsets that are different between
   6723 	 * the i82542 and the i82543 and later chips.
   6724 	 */
   6725 	if (sc->sc_type < WM_T_82543)
   6726 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6727 	else
   6728 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6729 
   6730 	wm_init_tx_descs(sc, txq);
   6731 	wm_init_tx_regs(sc, wmq, txq);
   6732 	wm_init_tx_buffer(sc, txq);
   6733 
   6734 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   6735 	txq->txq_sending = false;
   6736 }
   6737 
   6738 static void
   6739 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6740     struct wm_rxqueue *rxq)
   6741 {
   6742 
   6743 	KASSERT(mutex_owned(rxq->rxq_lock));
   6744 
   6745 	/*
   6746 	 * Initialize the receive descriptor and receive job
   6747 	 * descriptor rings.
   6748 	 */
   6749 	if (sc->sc_type < WM_T_82543) {
   6750 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6751 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6752 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6753 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6754 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6755 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6756 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6757 
   6758 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6759 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6760 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6761 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6762 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6763 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6764 	} else {
   6765 		int qid = wmq->wmq_id;
   6766 
   6767 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6768 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6769 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   6770 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6771 
   6772 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6773 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6774 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6775 
   6776 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6777 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6778 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6779 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6780 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6781 			    | RXDCTL_WTHRESH(1));
   6782 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6783 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6784 		} else {
   6785 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6786 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6787 			/* XXX should update with AIM? */
   6788 			CSR_WRITE(sc, WMREG_RDTR,
   6789 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   6790 			/* MUST be same */
   6791 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6792 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6793 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6794 		}
   6795 	}
   6796 }
   6797 
   6798 static int
   6799 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6800 {
   6801 	struct wm_rxsoft *rxs;
   6802 	int error, i;
   6803 
   6804 	KASSERT(mutex_owned(rxq->rxq_lock));
   6805 
   6806 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6807 		rxs = &rxq->rxq_soft[i];
   6808 		if (rxs->rxs_mbuf == NULL) {
   6809 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6810 				log(LOG_ERR, "%s: unable to allocate or map "
   6811 				    "rx buffer %d, error = %d\n",
   6812 				    device_xname(sc->sc_dev), i, error);
   6813 				/*
   6814 				 * XXX Should attempt to run with fewer receive
   6815 				 * XXX buffers instead of just failing.
   6816 				 */
   6817 				wm_rxdrain(rxq);
   6818 				return ENOMEM;
   6819 			}
   6820 		} else {
   6821 			/*
   6822 			 * For 82575 and 82576, the RX descriptors must be
   6823 			 * initialized after the setting of RCTL.EN in
   6824 			 * wm_set_filter()
   6825 			 */
   6826 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6827 				wm_init_rxdesc(rxq, i);
   6828 		}
   6829 	}
   6830 	rxq->rxq_ptr = 0;
   6831 	rxq->rxq_discard = 0;
   6832 	WM_RXCHAIN_RESET(rxq);
   6833 
   6834 	return 0;
   6835 }
   6836 
   6837 static int
   6838 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6839     struct wm_rxqueue *rxq)
   6840 {
   6841 
   6842 	KASSERT(mutex_owned(rxq->rxq_lock));
   6843 
   6844 	/*
   6845 	 * Set up some register offsets that are different between
   6846 	 * the i82542 and the i82543 and later chips.
   6847 	 */
   6848 	if (sc->sc_type < WM_T_82543)
   6849 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6850 	else
   6851 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6852 
   6853 	wm_init_rx_regs(sc, wmq, rxq);
   6854 	return wm_init_rx_buffer(sc, rxq);
   6855 }
   6856 
   6857 /*
   6858  * wm_init_quques:
   6859  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6860  */
   6861 static int
   6862 wm_init_txrx_queues(struct wm_softc *sc)
   6863 {
   6864 	int i, error = 0;
   6865 
   6866 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6867 		device_xname(sc->sc_dev), __func__));
   6868 
   6869 	for (i = 0; i < sc->sc_nqueues; i++) {
   6870 		struct wm_queue *wmq = &sc->sc_queue[i];
   6871 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6872 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6873 
   6874 		/*
   6875 		 * TODO
   6876 		 * Currently, use constant variable instead of AIM.
   6877 		 * Furthermore, the interrupt interval of multiqueue which use
   6878 		 * polling mode is less than default value.
   6879 		 * More tuning and AIM are required.
   6880 		 */
   6881 		if (wm_is_using_multiqueue(sc))
   6882 			wmq->wmq_itr = 50;
   6883 		else
   6884 			wmq->wmq_itr = sc->sc_itr_init;
   6885 		wmq->wmq_set_itr = true;
   6886 
   6887 		mutex_enter(txq->txq_lock);
   6888 		wm_init_tx_queue(sc, wmq, txq);
   6889 		mutex_exit(txq->txq_lock);
   6890 
   6891 		mutex_enter(rxq->rxq_lock);
   6892 		error = wm_init_rx_queue(sc, wmq, rxq);
   6893 		mutex_exit(rxq->rxq_lock);
   6894 		if (error)
   6895 			break;
   6896 	}
   6897 
   6898 	return error;
   6899 }
   6900 
   6901 /*
   6902  * wm_tx_offload:
   6903  *
   6904  *	Set up TCP/IP checksumming parameters for the
   6905  *	specified packet.
   6906  */
   6907 static int
   6908 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6909     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6910 {
   6911 	struct mbuf *m0 = txs->txs_mbuf;
   6912 	struct livengood_tcpip_ctxdesc *t;
   6913 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6914 	uint32_t ipcse;
   6915 	struct ether_header *eh;
   6916 	int offset, iphl;
   6917 	uint8_t fields;
   6918 
   6919 	/*
   6920 	 * XXX It would be nice if the mbuf pkthdr had offset
   6921 	 * fields for the protocol headers.
   6922 	 */
   6923 
   6924 	eh = mtod(m0, struct ether_header *);
   6925 	switch (htons(eh->ether_type)) {
   6926 	case ETHERTYPE_IP:
   6927 	case ETHERTYPE_IPV6:
   6928 		offset = ETHER_HDR_LEN;
   6929 		break;
   6930 
   6931 	case ETHERTYPE_VLAN:
   6932 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6933 		break;
   6934 
   6935 	default:
   6936 		/*
   6937 		 * Don't support this protocol or encapsulation.
   6938 		 */
   6939 		*fieldsp = 0;
   6940 		*cmdp = 0;
   6941 		return 0;
   6942 	}
   6943 
   6944 	if ((m0->m_pkthdr.csum_flags &
   6945 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6946 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6947 	} else {
   6948 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   6949 	}
   6950 	ipcse = offset + iphl - 1;
   6951 
   6952 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6953 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6954 	seg = 0;
   6955 	fields = 0;
   6956 
   6957 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6958 		int hlen = offset + iphl;
   6959 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6960 
   6961 		if (__predict_false(m0->m_len <
   6962 				    (hlen + sizeof(struct tcphdr)))) {
   6963 			/*
   6964 			 * TCP/IP headers are not in the first mbuf; we need
   6965 			 * to do this the slow and painful way. Let's just
   6966 			 * hope this doesn't happen very often.
   6967 			 */
   6968 			struct tcphdr th;
   6969 
   6970 			WM_Q_EVCNT_INCR(txq, tsopain);
   6971 
   6972 			m_copydata(m0, hlen, sizeof(th), &th);
   6973 			if (v4) {
   6974 				struct ip ip;
   6975 
   6976 				m_copydata(m0, offset, sizeof(ip), &ip);
   6977 				ip.ip_len = 0;
   6978 				m_copyback(m0,
   6979 				    offset + offsetof(struct ip, ip_len),
   6980 				    sizeof(ip.ip_len), &ip.ip_len);
   6981 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6982 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6983 			} else {
   6984 				struct ip6_hdr ip6;
   6985 
   6986 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6987 				ip6.ip6_plen = 0;
   6988 				m_copyback(m0,
   6989 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6990 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6991 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6992 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6993 			}
   6994 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6995 			    sizeof(th.th_sum), &th.th_sum);
   6996 
   6997 			hlen += th.th_off << 2;
   6998 		} else {
   6999 			/*
   7000 			 * TCP/IP headers are in the first mbuf; we can do
   7001 			 * this the easy way.
   7002 			 */
   7003 			struct tcphdr *th;
   7004 
   7005 			if (v4) {
   7006 				struct ip *ip =
   7007 				    (void *)(mtod(m0, char *) + offset);
   7008 				th = (void *)(mtod(m0, char *) + hlen);
   7009 
   7010 				ip->ip_len = 0;
   7011 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7012 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7013 			} else {
   7014 				struct ip6_hdr *ip6 =
   7015 				    (void *)(mtod(m0, char *) + offset);
   7016 				th = (void *)(mtod(m0, char *) + hlen);
   7017 
   7018 				ip6->ip6_plen = 0;
   7019 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7020 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7021 			}
   7022 			hlen += th->th_off << 2;
   7023 		}
   7024 
   7025 		if (v4) {
   7026 			WM_Q_EVCNT_INCR(txq, tso);
   7027 			cmdlen |= WTX_TCPIP_CMD_IP;
   7028 		} else {
   7029 			WM_Q_EVCNT_INCR(txq, tso6);
   7030 			ipcse = 0;
   7031 		}
   7032 		cmd |= WTX_TCPIP_CMD_TSE;
   7033 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7034 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7035 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7036 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7037 	}
   7038 
   7039 	/*
   7040 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7041 	 * offload feature, if we load the context descriptor, we
   7042 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7043 	 */
   7044 
   7045 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7046 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7047 	    WTX_TCPIP_IPCSE(ipcse);
   7048 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7049 		WM_Q_EVCNT_INCR(txq, ipsum);
   7050 		fields |= WTX_IXSM;
   7051 	}
   7052 
   7053 	offset += iphl;
   7054 
   7055 	if (m0->m_pkthdr.csum_flags &
   7056 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7057 		WM_Q_EVCNT_INCR(txq, tusum);
   7058 		fields |= WTX_TXSM;
   7059 		tucs = WTX_TCPIP_TUCSS(offset) |
   7060 		    WTX_TCPIP_TUCSO(offset +
   7061 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7062 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7063 	} else if ((m0->m_pkthdr.csum_flags &
   7064 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7065 		WM_Q_EVCNT_INCR(txq, tusum6);
   7066 		fields |= WTX_TXSM;
   7067 		tucs = WTX_TCPIP_TUCSS(offset) |
   7068 		    WTX_TCPIP_TUCSO(offset +
   7069 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7070 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7071 	} else {
   7072 		/* Just initialize it to a valid TCP context. */
   7073 		tucs = WTX_TCPIP_TUCSS(offset) |
   7074 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7075 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7076 	}
   7077 
   7078 	/*
   7079 	 * We don't have to write context descriptor for every packet
   7080 	 * except for 82574. For 82574, we must write context descriptor
   7081 	 * for every packet when we use two descriptor queues.
   7082 	 * It would be overhead to write context descriptor for every packet,
   7083 	 * however it does not cause problems.
   7084 	 */
   7085 	/* Fill in the context descriptor. */
   7086 	t = (struct livengood_tcpip_ctxdesc *)
   7087 	    &txq->txq_descs[txq->txq_next];
   7088 	t->tcpip_ipcs = htole32(ipcs);
   7089 	t->tcpip_tucs = htole32(tucs);
   7090 	t->tcpip_cmdlen = htole32(cmdlen);
   7091 	t->tcpip_seg = htole32(seg);
   7092 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7093 
   7094 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7095 	txs->txs_ndesc++;
   7096 
   7097 	*cmdp = cmd;
   7098 	*fieldsp = fields;
   7099 
   7100 	return 0;
   7101 }
   7102 
   7103 static inline int
   7104 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7105 {
   7106 	struct wm_softc *sc = ifp->if_softc;
   7107 	u_int cpuid = cpu_index(curcpu());
   7108 
   7109 	/*
   7110 	 * Currently, simple distribute strategy.
   7111 	 * TODO:
   7112 	 * distribute by flowid(RSS has value).
   7113 	 */
   7114 	return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   7115 }
   7116 
   7117 /*
   7118  * wm_start:		[ifnet interface function]
   7119  *
   7120  *	Start packet transmission on the interface.
   7121  */
   7122 static void
   7123 wm_start(struct ifnet *ifp)
   7124 {
   7125 	struct wm_softc *sc = ifp->if_softc;
   7126 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7127 
   7128 #ifdef WM_MPSAFE
   7129 	KASSERT(if_is_mpsafe(ifp));
   7130 #endif
   7131 	/*
   7132 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7133 	 */
   7134 
   7135 	mutex_enter(txq->txq_lock);
   7136 	if (!txq->txq_stopping)
   7137 		wm_start_locked(ifp);
   7138 	mutex_exit(txq->txq_lock);
   7139 }
   7140 
   7141 static void
   7142 wm_start_locked(struct ifnet *ifp)
   7143 {
   7144 	struct wm_softc *sc = ifp->if_softc;
   7145 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7146 
   7147 	wm_send_common_locked(ifp, txq, false);
   7148 }
   7149 
   7150 static int
   7151 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7152 {
   7153 	int qid;
   7154 	struct wm_softc *sc = ifp->if_softc;
   7155 	struct wm_txqueue *txq;
   7156 
   7157 	qid = wm_select_txqueue(ifp, m);
   7158 	txq = &sc->sc_queue[qid].wmq_txq;
   7159 
   7160 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7161 		m_freem(m);
   7162 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7163 		return ENOBUFS;
   7164 	}
   7165 
   7166 	/*
   7167 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7168 	 */
   7169 	ifp->if_obytes += m->m_pkthdr.len;
   7170 	if (m->m_flags & M_MCAST)
   7171 		ifp->if_omcasts++;
   7172 
   7173 	if (mutex_tryenter(txq->txq_lock)) {
   7174 		if (!txq->txq_stopping)
   7175 			wm_transmit_locked(ifp, txq);
   7176 		mutex_exit(txq->txq_lock);
   7177 	}
   7178 
   7179 	return 0;
   7180 }
   7181 
   7182 static void
   7183 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7184 {
   7185 
   7186 	wm_send_common_locked(ifp, txq, true);
   7187 }
   7188 
   7189 static void
   7190 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7191     bool is_transmit)
   7192 {
   7193 	struct wm_softc *sc = ifp->if_softc;
   7194 	struct mbuf *m0;
   7195 	struct wm_txsoft *txs;
   7196 	bus_dmamap_t dmamap;
   7197 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7198 	bus_addr_t curaddr;
   7199 	bus_size_t seglen, curlen;
   7200 	uint32_t cksumcmd;
   7201 	uint8_t cksumfields;
   7202 	bool remap = true;
   7203 
   7204 	KASSERT(mutex_owned(txq->txq_lock));
   7205 
   7206 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7207 		return;
   7208 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7209 		return;
   7210 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7211 		return;
   7212 
   7213 	/* Remember the previous number of free descriptors. */
   7214 	ofree = txq->txq_free;
   7215 
   7216 	/*
   7217 	 * Loop through the send queue, setting up transmit descriptors
   7218 	 * until we drain the queue, or use up all available transmit
   7219 	 * descriptors.
   7220 	 */
   7221 	for (;;) {
   7222 		m0 = NULL;
   7223 
   7224 		/* Get a work queue entry. */
   7225 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7226 			wm_txeof(txq, UINT_MAX);
   7227 			if (txq->txq_sfree == 0) {
   7228 				DPRINTF(WM_DEBUG_TX,
   7229 				    ("%s: TX: no free job descriptors\n",
   7230 					device_xname(sc->sc_dev)));
   7231 				WM_Q_EVCNT_INCR(txq, txsstall);
   7232 				break;
   7233 			}
   7234 		}
   7235 
   7236 		/* Grab a packet off the queue. */
   7237 		if (is_transmit)
   7238 			m0 = pcq_get(txq->txq_interq);
   7239 		else
   7240 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7241 		if (m0 == NULL)
   7242 			break;
   7243 
   7244 		DPRINTF(WM_DEBUG_TX,
   7245 		    ("%s: TX: have packet to transmit: %p\n",
   7246 			device_xname(sc->sc_dev), m0));
   7247 
   7248 		txs = &txq->txq_soft[txq->txq_snext];
   7249 		dmamap = txs->txs_dmamap;
   7250 
   7251 		use_tso = (m0->m_pkthdr.csum_flags &
   7252 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7253 
   7254 		/*
   7255 		 * So says the Linux driver:
   7256 		 * The controller does a simple calculation to make sure
   7257 		 * there is enough room in the FIFO before initiating the
   7258 		 * DMA for each buffer. The calc is:
   7259 		 *	4 = ceil(buffer len / MSS)
   7260 		 * To make sure we don't overrun the FIFO, adjust the max
   7261 		 * buffer len if the MSS drops.
   7262 		 */
   7263 		dmamap->dm_maxsegsz =
   7264 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7265 		    ? m0->m_pkthdr.segsz << 2
   7266 		    : WTX_MAX_LEN;
   7267 
   7268 		/*
   7269 		 * Load the DMA map.  If this fails, the packet either
   7270 		 * didn't fit in the allotted number of segments, or we
   7271 		 * were short on resources.  For the too-many-segments
   7272 		 * case, we simply report an error and drop the packet,
   7273 		 * since we can't sanely copy a jumbo packet to a single
   7274 		 * buffer.
   7275 		 */
   7276 retry:
   7277 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7278 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7279 		if (__predict_false(error)) {
   7280 			if (error == EFBIG) {
   7281 				if (remap == true) {
   7282 					struct mbuf *m;
   7283 
   7284 					remap = false;
   7285 					m = m_defrag(m0, M_NOWAIT);
   7286 					if (m != NULL) {
   7287 						WM_Q_EVCNT_INCR(txq, defrag);
   7288 						m0 = m;
   7289 						goto retry;
   7290 					}
   7291 				}
   7292 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7293 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7294 				    "DMA segments, dropping...\n",
   7295 				    device_xname(sc->sc_dev));
   7296 				wm_dump_mbuf_chain(sc, m0);
   7297 				m_freem(m0);
   7298 				continue;
   7299 			}
   7300 			/*  Short on resources, just stop for now. */
   7301 			DPRINTF(WM_DEBUG_TX,
   7302 			    ("%s: TX: dmamap load failed: %d\n",
   7303 				device_xname(sc->sc_dev), error));
   7304 			break;
   7305 		}
   7306 
   7307 		segs_needed = dmamap->dm_nsegs;
   7308 		if (use_tso) {
   7309 			/* For sentinel descriptor; see below. */
   7310 			segs_needed++;
   7311 		}
   7312 
   7313 		/*
   7314 		 * Ensure we have enough descriptors free to describe
   7315 		 * the packet. Note, we always reserve one descriptor
   7316 		 * at the end of the ring due to the semantics of the
   7317 		 * TDT register, plus one more in the event we need
   7318 		 * to load offload context.
   7319 		 */
   7320 		if (segs_needed > txq->txq_free - 2) {
   7321 			/*
   7322 			 * Not enough free descriptors to transmit this
   7323 			 * packet.  We haven't committed anything yet,
   7324 			 * so just unload the DMA map, put the packet
   7325 			 * pack on the queue, and punt. Notify the upper
   7326 			 * layer that there are no more slots left.
   7327 			 */
   7328 			DPRINTF(WM_DEBUG_TX,
   7329 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7330 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7331 				segs_needed, txq->txq_free - 1));
   7332 			if (!is_transmit)
   7333 				ifp->if_flags |= IFF_OACTIVE;
   7334 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7335 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7336 			WM_Q_EVCNT_INCR(txq, txdstall);
   7337 			break;
   7338 		}
   7339 
   7340 		/*
   7341 		 * Check for 82547 Tx FIFO bug. We need to do this
   7342 		 * once we know we can transmit the packet, since we
   7343 		 * do some internal FIFO space accounting here.
   7344 		 */
   7345 		if (sc->sc_type == WM_T_82547 &&
   7346 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7347 			DPRINTF(WM_DEBUG_TX,
   7348 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7349 				device_xname(sc->sc_dev)));
   7350 			if (!is_transmit)
   7351 				ifp->if_flags |= IFF_OACTIVE;
   7352 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7353 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7354 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7355 			break;
   7356 		}
   7357 
   7358 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7359 
   7360 		DPRINTF(WM_DEBUG_TX,
   7361 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7362 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7363 
   7364 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7365 
   7366 		/*
   7367 		 * Store a pointer to the packet so that we can free it
   7368 		 * later.
   7369 		 *
   7370 		 * Initially, we consider the number of descriptors the
   7371 		 * packet uses the number of DMA segments.  This may be
   7372 		 * incremented by 1 if we do checksum offload (a descriptor
   7373 		 * is used to set the checksum context).
   7374 		 */
   7375 		txs->txs_mbuf = m0;
   7376 		txs->txs_firstdesc = txq->txq_next;
   7377 		txs->txs_ndesc = segs_needed;
   7378 
   7379 		/* Set up offload parameters for this packet. */
   7380 		if (m0->m_pkthdr.csum_flags &
   7381 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7382 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7383 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7384 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7385 					  &cksumfields) != 0) {
   7386 				/* Error message already displayed. */
   7387 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7388 				continue;
   7389 			}
   7390 		} else {
   7391 			cksumcmd = 0;
   7392 			cksumfields = 0;
   7393 		}
   7394 
   7395 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7396 
   7397 		/* Sync the DMA map. */
   7398 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7399 		    BUS_DMASYNC_PREWRITE);
   7400 
   7401 		/* Initialize the transmit descriptor. */
   7402 		for (nexttx = txq->txq_next, seg = 0;
   7403 		     seg < dmamap->dm_nsegs; seg++) {
   7404 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7405 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7406 			     seglen != 0;
   7407 			     curaddr += curlen, seglen -= curlen,
   7408 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7409 				curlen = seglen;
   7410 
   7411 				/*
   7412 				 * So says the Linux driver:
   7413 				 * Work around for premature descriptor
   7414 				 * write-backs in TSO mode.  Append a
   7415 				 * 4-byte sentinel descriptor.
   7416 				 */
   7417 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7418 				    curlen > 8)
   7419 					curlen -= 4;
   7420 
   7421 				wm_set_dma_addr(
   7422 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7423 				txq->txq_descs[nexttx].wtx_cmdlen
   7424 				    = htole32(cksumcmd | curlen);
   7425 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7426 				    = 0;
   7427 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7428 				    = cksumfields;
   7429 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7430 				lasttx = nexttx;
   7431 
   7432 				DPRINTF(WM_DEBUG_TX,
   7433 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7434 					"len %#04zx\n",
   7435 					device_xname(sc->sc_dev), nexttx,
   7436 					(uint64_t)curaddr, curlen));
   7437 			}
   7438 		}
   7439 
   7440 		KASSERT(lasttx != -1);
   7441 
   7442 		/*
   7443 		 * Set up the command byte on the last descriptor of
   7444 		 * the packet. If we're in the interrupt delay window,
   7445 		 * delay the interrupt.
   7446 		 */
   7447 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7448 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7449 
   7450 		/*
   7451 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7452 		 * up the descriptor to encapsulate the packet for us.
   7453 		 *
   7454 		 * This is only valid on the last descriptor of the packet.
   7455 		 */
   7456 		if (vlan_has_tag(m0)) {
   7457 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7458 			    htole32(WTX_CMD_VLE);
   7459 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7460 			    = htole16(vlan_get_tag(m0));
   7461 		}
   7462 
   7463 		txs->txs_lastdesc = lasttx;
   7464 
   7465 		DPRINTF(WM_DEBUG_TX,
   7466 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7467 			device_xname(sc->sc_dev),
   7468 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7469 
   7470 		/* Sync the descriptors we're using. */
   7471 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7472 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7473 
   7474 		/* Give the packet to the chip. */
   7475 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7476 
   7477 		DPRINTF(WM_DEBUG_TX,
   7478 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7479 
   7480 		DPRINTF(WM_DEBUG_TX,
   7481 		    ("%s: TX: finished transmitting packet, job %d\n",
   7482 			device_xname(sc->sc_dev), txq->txq_snext));
   7483 
   7484 		/* Advance the tx pointer. */
   7485 		txq->txq_free -= txs->txs_ndesc;
   7486 		txq->txq_next = nexttx;
   7487 
   7488 		txq->txq_sfree--;
   7489 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7490 
   7491 		/* Pass the packet to any BPF listeners. */
   7492 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7493 	}
   7494 
   7495 	if (m0 != NULL) {
   7496 		if (!is_transmit)
   7497 			ifp->if_flags |= IFF_OACTIVE;
   7498 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7499 		WM_Q_EVCNT_INCR(txq, descdrop);
   7500 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7501 			__func__));
   7502 		m_freem(m0);
   7503 	}
   7504 
   7505 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7506 		/* No more slots; notify upper layer. */
   7507 		if (!is_transmit)
   7508 			ifp->if_flags |= IFF_OACTIVE;
   7509 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7510 	}
   7511 
   7512 	if (txq->txq_free != ofree) {
   7513 		/* Set a watchdog timer in case the chip flakes out. */
   7514 		txq->txq_lastsent = time_uptime;
   7515 		txq->txq_sending = true;
   7516 	}
   7517 }
   7518 
   7519 /*
   7520  * wm_nq_tx_offload:
   7521  *
   7522  *	Set up TCP/IP checksumming parameters for the
   7523  *	specified packet, for NEWQUEUE devices
   7524  */
   7525 static int
   7526 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7527     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7528 {
   7529 	struct mbuf *m0 = txs->txs_mbuf;
   7530 	uint32_t vl_len, mssidx, cmdc;
   7531 	struct ether_header *eh;
   7532 	int offset, iphl;
   7533 
   7534 	/*
   7535 	 * XXX It would be nice if the mbuf pkthdr had offset
   7536 	 * fields for the protocol headers.
   7537 	 */
   7538 	*cmdlenp = 0;
   7539 	*fieldsp = 0;
   7540 
   7541 	eh = mtod(m0, struct ether_header *);
   7542 	switch (htons(eh->ether_type)) {
   7543 	case ETHERTYPE_IP:
   7544 	case ETHERTYPE_IPV6:
   7545 		offset = ETHER_HDR_LEN;
   7546 		break;
   7547 
   7548 	case ETHERTYPE_VLAN:
   7549 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7550 		break;
   7551 
   7552 	default:
   7553 		/* Don't support this protocol or encapsulation. */
   7554 		*do_csum = false;
   7555 		return 0;
   7556 	}
   7557 	*do_csum = true;
   7558 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7559 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7560 
   7561 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7562 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7563 
   7564 	if ((m0->m_pkthdr.csum_flags &
   7565 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7566 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7567 	} else {
   7568 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7569 	}
   7570 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7571 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7572 
   7573 	if (vlan_has_tag(m0)) {
   7574 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7575 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7576 		*cmdlenp |= NQTX_CMD_VLE;
   7577 	}
   7578 
   7579 	mssidx = 0;
   7580 
   7581 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7582 		int hlen = offset + iphl;
   7583 		int tcp_hlen;
   7584 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7585 
   7586 		if (__predict_false(m0->m_len <
   7587 				    (hlen + sizeof(struct tcphdr)))) {
   7588 			/*
   7589 			 * TCP/IP headers are not in the first mbuf; we need
   7590 			 * to do this the slow and painful way. Let's just
   7591 			 * hope this doesn't happen very often.
   7592 			 */
   7593 			struct tcphdr th;
   7594 
   7595 			WM_Q_EVCNT_INCR(txq, tsopain);
   7596 
   7597 			m_copydata(m0, hlen, sizeof(th), &th);
   7598 			if (v4) {
   7599 				struct ip ip;
   7600 
   7601 				m_copydata(m0, offset, sizeof(ip), &ip);
   7602 				ip.ip_len = 0;
   7603 				m_copyback(m0,
   7604 				    offset + offsetof(struct ip, ip_len),
   7605 				    sizeof(ip.ip_len), &ip.ip_len);
   7606 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7607 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7608 			} else {
   7609 				struct ip6_hdr ip6;
   7610 
   7611 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7612 				ip6.ip6_plen = 0;
   7613 				m_copyback(m0,
   7614 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7615 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7616 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7617 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7618 			}
   7619 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7620 			    sizeof(th.th_sum), &th.th_sum);
   7621 
   7622 			tcp_hlen = th.th_off << 2;
   7623 		} else {
   7624 			/*
   7625 			 * TCP/IP headers are in the first mbuf; we can do
   7626 			 * this the easy way.
   7627 			 */
   7628 			struct tcphdr *th;
   7629 
   7630 			if (v4) {
   7631 				struct ip *ip =
   7632 				    (void *)(mtod(m0, char *) + offset);
   7633 				th = (void *)(mtod(m0, char *) + hlen);
   7634 
   7635 				ip->ip_len = 0;
   7636 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7637 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7638 			} else {
   7639 				struct ip6_hdr *ip6 =
   7640 				    (void *)(mtod(m0, char *) + offset);
   7641 				th = (void *)(mtod(m0, char *) + hlen);
   7642 
   7643 				ip6->ip6_plen = 0;
   7644 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7645 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7646 			}
   7647 			tcp_hlen = th->th_off << 2;
   7648 		}
   7649 		hlen += tcp_hlen;
   7650 		*cmdlenp |= NQTX_CMD_TSE;
   7651 
   7652 		if (v4) {
   7653 			WM_Q_EVCNT_INCR(txq, tso);
   7654 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7655 		} else {
   7656 			WM_Q_EVCNT_INCR(txq, tso6);
   7657 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7658 		}
   7659 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7660 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7661 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7662 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7663 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7664 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7665 	} else {
   7666 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7667 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7668 	}
   7669 
   7670 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7671 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7672 		cmdc |= NQTXC_CMD_IP4;
   7673 	}
   7674 
   7675 	if (m0->m_pkthdr.csum_flags &
   7676 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7677 		WM_Q_EVCNT_INCR(txq, tusum);
   7678 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7679 			cmdc |= NQTXC_CMD_TCP;
   7680 		} else {
   7681 			cmdc |= NQTXC_CMD_UDP;
   7682 		}
   7683 		cmdc |= NQTXC_CMD_IP4;
   7684 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7685 	}
   7686 	if (m0->m_pkthdr.csum_flags &
   7687 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7688 		WM_Q_EVCNT_INCR(txq, tusum6);
   7689 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7690 			cmdc |= NQTXC_CMD_TCP;
   7691 		} else {
   7692 			cmdc |= NQTXC_CMD_UDP;
   7693 		}
   7694 		cmdc |= NQTXC_CMD_IP6;
   7695 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7696 	}
   7697 
   7698 	/*
   7699 	 * We don't have to write context descriptor for every packet to
   7700 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7701 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7702 	 * controllers.
   7703 	 * It would be overhead to write context descriptor for every packet,
   7704 	 * however it does not cause problems.
   7705 	 */
   7706 	/* Fill in the context descriptor. */
   7707 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7708 	    htole32(vl_len);
   7709 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7710 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7711 	    htole32(cmdc);
   7712 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7713 	    htole32(mssidx);
   7714 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7715 	DPRINTF(WM_DEBUG_TX,
   7716 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7717 		txq->txq_next, 0, vl_len));
   7718 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7719 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7720 	txs->txs_ndesc++;
   7721 	return 0;
   7722 }
   7723 
   7724 /*
   7725  * wm_nq_start:		[ifnet interface function]
   7726  *
   7727  *	Start packet transmission on the interface for NEWQUEUE devices
   7728  */
   7729 static void
   7730 wm_nq_start(struct ifnet *ifp)
   7731 {
   7732 	struct wm_softc *sc = ifp->if_softc;
   7733 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7734 
   7735 #ifdef WM_MPSAFE
   7736 	KASSERT(if_is_mpsafe(ifp));
   7737 #endif
   7738 	/*
   7739 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7740 	 */
   7741 
   7742 	mutex_enter(txq->txq_lock);
   7743 	if (!txq->txq_stopping)
   7744 		wm_nq_start_locked(ifp);
   7745 	mutex_exit(txq->txq_lock);
   7746 }
   7747 
   7748 static void
   7749 wm_nq_start_locked(struct ifnet *ifp)
   7750 {
   7751 	struct wm_softc *sc = ifp->if_softc;
   7752 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7753 
   7754 	wm_nq_send_common_locked(ifp, txq, false);
   7755 }
   7756 
   7757 static int
   7758 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7759 {
   7760 	int qid;
   7761 	struct wm_softc *sc = ifp->if_softc;
   7762 	struct wm_txqueue *txq;
   7763 
   7764 	qid = wm_select_txqueue(ifp, m);
   7765 	txq = &sc->sc_queue[qid].wmq_txq;
   7766 
   7767 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7768 		m_freem(m);
   7769 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7770 		return ENOBUFS;
   7771 	}
   7772 
   7773 	/*
   7774 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7775 	 */
   7776 	ifp->if_obytes += m->m_pkthdr.len;
   7777 	if (m->m_flags & M_MCAST)
   7778 		ifp->if_omcasts++;
   7779 
   7780 	/*
   7781 	 * The situations which this mutex_tryenter() fails at running time
   7782 	 * are below two patterns.
   7783 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7784 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7785 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7786 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7787 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   7788 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   7789 	 * stuck, either.
   7790 	 */
   7791 	if (mutex_tryenter(txq->txq_lock)) {
   7792 		if (!txq->txq_stopping)
   7793 			wm_nq_transmit_locked(ifp, txq);
   7794 		mutex_exit(txq->txq_lock);
   7795 	}
   7796 
   7797 	return 0;
   7798 }
   7799 
   7800 static void
   7801 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7802 {
   7803 
   7804 	wm_nq_send_common_locked(ifp, txq, true);
   7805 }
   7806 
   7807 static void
   7808 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7809     bool is_transmit)
   7810 {
   7811 	struct wm_softc *sc = ifp->if_softc;
   7812 	struct mbuf *m0;
   7813 	struct wm_txsoft *txs;
   7814 	bus_dmamap_t dmamap;
   7815 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7816 	bool do_csum, sent;
   7817 	bool remap = true;
   7818 
   7819 	KASSERT(mutex_owned(txq->txq_lock));
   7820 
   7821 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7822 		return;
   7823 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7824 		return;
   7825 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7826 		return;
   7827 
   7828 	sent = false;
   7829 
   7830 	/*
   7831 	 * Loop through the send queue, setting up transmit descriptors
   7832 	 * until we drain the queue, or use up all available transmit
   7833 	 * descriptors.
   7834 	 */
   7835 	for (;;) {
   7836 		m0 = NULL;
   7837 
   7838 		/* Get a work queue entry. */
   7839 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7840 			wm_txeof(txq, UINT_MAX);
   7841 			if (txq->txq_sfree == 0) {
   7842 				DPRINTF(WM_DEBUG_TX,
   7843 				    ("%s: TX: no free job descriptors\n",
   7844 					device_xname(sc->sc_dev)));
   7845 				WM_Q_EVCNT_INCR(txq, txsstall);
   7846 				break;
   7847 			}
   7848 		}
   7849 
   7850 		/* Grab a packet off the queue. */
   7851 		if (is_transmit)
   7852 			m0 = pcq_get(txq->txq_interq);
   7853 		else
   7854 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7855 		if (m0 == NULL)
   7856 			break;
   7857 
   7858 		DPRINTF(WM_DEBUG_TX,
   7859 		    ("%s: TX: have packet to transmit: %p\n",
   7860 		    device_xname(sc->sc_dev), m0));
   7861 
   7862 		txs = &txq->txq_soft[txq->txq_snext];
   7863 		dmamap = txs->txs_dmamap;
   7864 
   7865 		/*
   7866 		 * Load the DMA map.  If this fails, the packet either
   7867 		 * didn't fit in the allotted number of segments, or we
   7868 		 * were short on resources.  For the too-many-segments
   7869 		 * case, we simply report an error and drop the packet,
   7870 		 * since we can't sanely copy a jumbo packet to a single
   7871 		 * buffer.
   7872 		 */
   7873 retry:
   7874 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7875 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7876 		if (__predict_false(error)) {
   7877 			if (error == EFBIG) {
   7878 				if (remap == true) {
   7879 					struct mbuf *m;
   7880 
   7881 					remap = false;
   7882 					m = m_defrag(m0, M_NOWAIT);
   7883 					if (m != NULL) {
   7884 						WM_Q_EVCNT_INCR(txq, defrag);
   7885 						m0 = m;
   7886 						goto retry;
   7887 					}
   7888 				}
   7889 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7890 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7891 				    "DMA segments, dropping...\n",
   7892 				    device_xname(sc->sc_dev));
   7893 				wm_dump_mbuf_chain(sc, m0);
   7894 				m_freem(m0);
   7895 				continue;
   7896 			}
   7897 			/* Short on resources, just stop for now. */
   7898 			DPRINTF(WM_DEBUG_TX,
   7899 			    ("%s: TX: dmamap load failed: %d\n",
   7900 				device_xname(sc->sc_dev), error));
   7901 			break;
   7902 		}
   7903 
   7904 		segs_needed = dmamap->dm_nsegs;
   7905 
   7906 		/*
   7907 		 * Ensure we have enough descriptors free to describe
   7908 		 * the packet. Note, we always reserve one descriptor
   7909 		 * at the end of the ring due to the semantics of the
   7910 		 * TDT register, plus one more in the event we need
   7911 		 * to load offload context.
   7912 		 */
   7913 		if (segs_needed > txq->txq_free - 2) {
   7914 			/*
   7915 			 * Not enough free descriptors to transmit this
   7916 			 * packet.  We haven't committed anything yet,
   7917 			 * so just unload the DMA map, put the packet
   7918 			 * pack on the queue, and punt. Notify the upper
   7919 			 * layer that there are no more slots left.
   7920 			 */
   7921 			DPRINTF(WM_DEBUG_TX,
   7922 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7923 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7924 				segs_needed, txq->txq_free - 1));
   7925 			if (!is_transmit)
   7926 				ifp->if_flags |= IFF_OACTIVE;
   7927 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7928 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7929 			WM_Q_EVCNT_INCR(txq, txdstall);
   7930 			break;
   7931 		}
   7932 
   7933 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7934 
   7935 		DPRINTF(WM_DEBUG_TX,
   7936 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7937 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7938 
   7939 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7940 
   7941 		/*
   7942 		 * Store a pointer to the packet so that we can free it
   7943 		 * later.
   7944 		 *
   7945 		 * Initially, we consider the number of descriptors the
   7946 		 * packet uses the number of DMA segments.  This may be
   7947 		 * incremented by 1 if we do checksum offload (a descriptor
   7948 		 * is used to set the checksum context).
   7949 		 */
   7950 		txs->txs_mbuf = m0;
   7951 		txs->txs_firstdesc = txq->txq_next;
   7952 		txs->txs_ndesc = segs_needed;
   7953 
   7954 		/* Set up offload parameters for this packet. */
   7955 		uint32_t cmdlen, fields, dcmdlen;
   7956 		if (m0->m_pkthdr.csum_flags &
   7957 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7958 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7959 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7960 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7961 			    &do_csum) != 0) {
   7962 				/* Error message already displayed. */
   7963 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7964 				continue;
   7965 			}
   7966 		} else {
   7967 			do_csum = false;
   7968 			cmdlen = 0;
   7969 			fields = 0;
   7970 		}
   7971 
   7972 		/* Sync the DMA map. */
   7973 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7974 		    BUS_DMASYNC_PREWRITE);
   7975 
   7976 		/* Initialize the first transmit descriptor. */
   7977 		nexttx = txq->txq_next;
   7978 		if (!do_csum) {
   7979 			/* setup a legacy descriptor */
   7980 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7981 			    dmamap->dm_segs[0].ds_addr);
   7982 			txq->txq_descs[nexttx].wtx_cmdlen =
   7983 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7984 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7985 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7986 			if (vlan_has_tag(m0)) {
   7987 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7988 				    htole32(WTX_CMD_VLE);
   7989 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7990 				    htole16(vlan_get_tag(m0));
   7991 			} else {
   7992 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7993 			}
   7994 			dcmdlen = 0;
   7995 		} else {
   7996 			/* setup an advanced data descriptor */
   7997 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7998 			    htole64(dmamap->dm_segs[0].ds_addr);
   7999 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8000 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8001 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   8002 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8003 			    htole32(fields);
   8004 			DPRINTF(WM_DEBUG_TX,
   8005 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8006 				device_xname(sc->sc_dev), nexttx,
   8007 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8008 			DPRINTF(WM_DEBUG_TX,
   8009 			    ("\t 0x%08x%08x\n", fields,
   8010 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8011 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8012 		}
   8013 
   8014 		lasttx = nexttx;
   8015 		nexttx = WM_NEXTTX(txq, nexttx);
   8016 		/*
   8017 		 * fill in the next descriptors. legacy or advanced format
   8018 		 * is the same here
   8019 		 */
   8020 		for (seg = 1; seg < dmamap->dm_nsegs;
   8021 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8022 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8023 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8024 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8025 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8026 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8027 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8028 			lasttx = nexttx;
   8029 
   8030 			DPRINTF(WM_DEBUG_TX,
   8031 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8032 				device_xname(sc->sc_dev), nexttx,
   8033 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8034 				dmamap->dm_segs[seg].ds_len));
   8035 		}
   8036 
   8037 		KASSERT(lasttx != -1);
   8038 
   8039 		/*
   8040 		 * Set up the command byte on the last descriptor of
   8041 		 * the packet. If we're in the interrupt delay window,
   8042 		 * delay the interrupt.
   8043 		 */
   8044 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8045 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8046 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8047 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8048 
   8049 		txs->txs_lastdesc = lasttx;
   8050 
   8051 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8052 		    device_xname(sc->sc_dev),
   8053 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8054 
   8055 		/* Sync the descriptors we're using. */
   8056 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8057 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8058 
   8059 		/* Give the packet to the chip. */
   8060 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8061 		sent = true;
   8062 
   8063 		DPRINTF(WM_DEBUG_TX,
   8064 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8065 
   8066 		DPRINTF(WM_DEBUG_TX,
   8067 		    ("%s: TX: finished transmitting packet, job %d\n",
   8068 			device_xname(sc->sc_dev), txq->txq_snext));
   8069 
   8070 		/* Advance the tx pointer. */
   8071 		txq->txq_free -= txs->txs_ndesc;
   8072 		txq->txq_next = nexttx;
   8073 
   8074 		txq->txq_sfree--;
   8075 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8076 
   8077 		/* Pass the packet to any BPF listeners. */
   8078 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8079 	}
   8080 
   8081 	if (m0 != NULL) {
   8082 		if (!is_transmit)
   8083 			ifp->if_flags |= IFF_OACTIVE;
   8084 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8085 		WM_Q_EVCNT_INCR(txq, descdrop);
   8086 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8087 			__func__));
   8088 		m_freem(m0);
   8089 	}
   8090 
   8091 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8092 		/* No more slots; notify upper layer. */
   8093 		if (!is_transmit)
   8094 			ifp->if_flags |= IFF_OACTIVE;
   8095 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8096 	}
   8097 
   8098 	if (sent) {
   8099 		/* Set a watchdog timer in case the chip flakes out. */
   8100 		txq->txq_lastsent = time_uptime;
   8101 		txq->txq_sending = true;
   8102 	}
   8103 }
   8104 
   8105 static void
   8106 wm_deferred_start_locked(struct wm_txqueue *txq)
   8107 {
   8108 	struct wm_softc *sc = txq->txq_sc;
   8109 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8110 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8111 	int qid = wmq->wmq_id;
   8112 
   8113 	KASSERT(mutex_owned(txq->txq_lock));
   8114 
   8115 	if (txq->txq_stopping) {
   8116 		mutex_exit(txq->txq_lock);
   8117 		return;
   8118 	}
   8119 
   8120 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8121 		/* XXX need for ALTQ or one CPU system */
   8122 		if (qid == 0)
   8123 			wm_nq_start_locked(ifp);
   8124 		wm_nq_transmit_locked(ifp, txq);
   8125 	} else {
   8126 		/* XXX need for ALTQ or one CPU system */
   8127 		if (qid == 0)
   8128 			wm_start_locked(ifp);
   8129 		wm_transmit_locked(ifp, txq);
   8130 	}
   8131 }
   8132 
   8133 /* Interrupt */
   8134 
   8135 /*
   8136  * wm_txeof:
   8137  *
   8138  *	Helper; handle transmit interrupts.
   8139  */
   8140 static bool
   8141 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8142 {
   8143 	struct wm_softc *sc = txq->txq_sc;
   8144 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8145 	struct wm_txsoft *txs;
   8146 	int count = 0;
   8147 	int i;
   8148 	uint8_t status;
   8149 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8150 	bool more = false;
   8151 
   8152 	KASSERT(mutex_owned(txq->txq_lock));
   8153 
   8154 	if (txq->txq_stopping)
   8155 		return false;
   8156 
   8157 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8158 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8159 	if (wmq->wmq_id == 0)
   8160 		ifp->if_flags &= ~IFF_OACTIVE;
   8161 
   8162 	/*
   8163 	 * Go through the Tx list and free mbufs for those
   8164 	 * frames which have been transmitted.
   8165 	 */
   8166 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8167 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8168 		if (limit-- == 0) {
   8169 			more = true;
   8170 			DPRINTF(WM_DEBUG_TX,
   8171 			    ("%s: TX: loop limited, job %d is not processed\n",
   8172 				device_xname(sc->sc_dev), i));
   8173 			break;
   8174 		}
   8175 
   8176 		txs = &txq->txq_soft[i];
   8177 
   8178 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8179 			device_xname(sc->sc_dev), i));
   8180 
   8181 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8182 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8183 
   8184 		status =
   8185 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8186 		if ((status & WTX_ST_DD) == 0) {
   8187 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8188 			    BUS_DMASYNC_PREREAD);
   8189 			break;
   8190 		}
   8191 
   8192 		count++;
   8193 		DPRINTF(WM_DEBUG_TX,
   8194 		    ("%s: TX: job %d done: descs %d..%d\n",
   8195 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8196 		    txs->txs_lastdesc));
   8197 
   8198 		/*
   8199 		 * XXX We should probably be using the statistics
   8200 		 * XXX registers, but I don't know if they exist
   8201 		 * XXX on chips before the i82544.
   8202 		 */
   8203 
   8204 #ifdef WM_EVENT_COUNTERS
   8205 		if (status & WTX_ST_TU)
   8206 			WM_Q_EVCNT_INCR(txq, underrun);
   8207 #endif /* WM_EVENT_COUNTERS */
   8208 
   8209 		/*
   8210 		 * 82574 and newer's document says the status field has neither
   8211 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8212 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8213 		 * Developer's Manual", 82574 datasheet and newer.
   8214 		 *
   8215 		 * XXX I saw the LC bit was set on I218 even though the media
   8216 		 * was full duplex, so the bit might be used for other
   8217 		 * meaning ...(I have no document).
   8218 		 */
   8219 
   8220 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8221 		    && ((sc->sc_type < WM_T_82574)
   8222 			|| (sc->sc_type == WM_T_80003))) {
   8223 			ifp->if_oerrors++;
   8224 			if (status & WTX_ST_LC)
   8225 				log(LOG_WARNING, "%s: late collision\n",
   8226 				    device_xname(sc->sc_dev));
   8227 			else if (status & WTX_ST_EC) {
   8228 				ifp->if_collisions +=
   8229 				    TX_COLLISION_THRESHOLD + 1;
   8230 				log(LOG_WARNING, "%s: excessive collisions\n",
   8231 				    device_xname(sc->sc_dev));
   8232 			}
   8233 		} else
   8234 			ifp->if_opackets++;
   8235 
   8236 		txq->txq_packets++;
   8237 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8238 
   8239 		txq->txq_free += txs->txs_ndesc;
   8240 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8241 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8242 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8243 		m_freem(txs->txs_mbuf);
   8244 		txs->txs_mbuf = NULL;
   8245 	}
   8246 
   8247 	/* Update the dirty transmit buffer pointer. */
   8248 	txq->txq_sdirty = i;
   8249 	DPRINTF(WM_DEBUG_TX,
   8250 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8251 
   8252 	if (count != 0)
   8253 		rnd_add_uint32(&sc->rnd_source, count);
   8254 
   8255 	/*
   8256 	 * If there are no more pending transmissions, cancel the watchdog
   8257 	 * timer.
   8258 	 */
   8259 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8260 		txq->txq_sending = false;
   8261 
   8262 	return more;
   8263 }
   8264 
   8265 static inline uint32_t
   8266 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8267 {
   8268 	struct wm_softc *sc = rxq->rxq_sc;
   8269 
   8270 	if (sc->sc_type == WM_T_82574)
   8271 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8272 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8273 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8274 	else
   8275 		return rxq->rxq_descs[idx].wrx_status;
   8276 }
   8277 
   8278 static inline uint32_t
   8279 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8280 {
   8281 	struct wm_softc *sc = rxq->rxq_sc;
   8282 
   8283 	if (sc->sc_type == WM_T_82574)
   8284 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8285 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8286 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8287 	else
   8288 		return rxq->rxq_descs[idx].wrx_errors;
   8289 }
   8290 
   8291 static inline uint16_t
   8292 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8293 {
   8294 	struct wm_softc *sc = rxq->rxq_sc;
   8295 
   8296 	if (sc->sc_type == WM_T_82574)
   8297 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8298 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8299 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8300 	else
   8301 		return rxq->rxq_descs[idx].wrx_special;
   8302 }
   8303 
   8304 static inline int
   8305 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8306 {
   8307 	struct wm_softc *sc = rxq->rxq_sc;
   8308 
   8309 	if (sc->sc_type == WM_T_82574)
   8310 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8311 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8312 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8313 	else
   8314 		return rxq->rxq_descs[idx].wrx_len;
   8315 }
   8316 
   8317 #ifdef WM_DEBUG
   8318 static inline uint32_t
   8319 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8320 {
   8321 	struct wm_softc *sc = rxq->rxq_sc;
   8322 
   8323 	if (sc->sc_type == WM_T_82574)
   8324 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8325 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8326 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8327 	else
   8328 		return 0;
   8329 }
   8330 
   8331 static inline uint8_t
   8332 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8333 {
   8334 	struct wm_softc *sc = rxq->rxq_sc;
   8335 
   8336 	if (sc->sc_type == WM_T_82574)
   8337 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8338 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8339 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8340 	else
   8341 		return 0;
   8342 }
   8343 #endif /* WM_DEBUG */
   8344 
   8345 static inline bool
   8346 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8347     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8348 {
   8349 
   8350 	if (sc->sc_type == WM_T_82574)
   8351 		return (status & ext_bit) != 0;
   8352 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8353 		return (status & nq_bit) != 0;
   8354 	else
   8355 		return (status & legacy_bit) != 0;
   8356 }
   8357 
   8358 static inline bool
   8359 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8360     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8361 {
   8362 
   8363 	if (sc->sc_type == WM_T_82574)
   8364 		return (error & ext_bit) != 0;
   8365 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8366 		return (error & nq_bit) != 0;
   8367 	else
   8368 		return (error & legacy_bit) != 0;
   8369 }
   8370 
   8371 static inline bool
   8372 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8373 {
   8374 
   8375 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8376 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8377 		return true;
   8378 	else
   8379 		return false;
   8380 }
   8381 
   8382 static inline bool
   8383 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8384 {
   8385 	struct wm_softc *sc = rxq->rxq_sc;
   8386 
   8387 	/* XXXX missing error bit for newqueue? */
   8388 	if (wm_rxdesc_is_set_error(sc, errors,
   8389 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8390 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8391 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8392 		NQRXC_ERROR_RXE)) {
   8393 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8394 		    EXTRXC_ERROR_SE, 0))
   8395 			log(LOG_WARNING, "%s: symbol error\n",
   8396 			    device_xname(sc->sc_dev));
   8397 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8398 		    EXTRXC_ERROR_SEQ, 0))
   8399 			log(LOG_WARNING, "%s: receive sequence error\n",
   8400 			    device_xname(sc->sc_dev));
   8401 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8402 		    EXTRXC_ERROR_CE, 0))
   8403 			log(LOG_WARNING, "%s: CRC error\n",
   8404 			    device_xname(sc->sc_dev));
   8405 		return true;
   8406 	}
   8407 
   8408 	return false;
   8409 }
   8410 
   8411 static inline bool
   8412 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8413 {
   8414 	struct wm_softc *sc = rxq->rxq_sc;
   8415 
   8416 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8417 		NQRXC_STATUS_DD)) {
   8418 		/* We have processed all of the receive descriptors. */
   8419 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8420 		return false;
   8421 	}
   8422 
   8423 	return true;
   8424 }
   8425 
   8426 static inline bool
   8427 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8428     uint16_t vlantag, struct mbuf *m)
   8429 {
   8430 
   8431 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8432 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8433 		vlan_set_tag(m, le16toh(vlantag));
   8434 	}
   8435 
   8436 	return true;
   8437 }
   8438 
   8439 static inline void
   8440 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8441     uint32_t errors, struct mbuf *m)
   8442 {
   8443 	struct wm_softc *sc = rxq->rxq_sc;
   8444 
   8445 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8446 		if (wm_rxdesc_is_set_status(sc, status,
   8447 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8448 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8449 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8450 			if (wm_rxdesc_is_set_error(sc, errors,
   8451 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8452 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8453 		}
   8454 		if (wm_rxdesc_is_set_status(sc, status,
   8455 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8456 			/*
   8457 			 * Note: we don't know if this was TCP or UDP,
   8458 			 * so we just set both bits, and expect the
   8459 			 * upper layers to deal.
   8460 			 */
   8461 			WM_Q_EVCNT_INCR(rxq, tusum);
   8462 			m->m_pkthdr.csum_flags |=
   8463 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8464 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8465 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8466 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8467 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8468 		}
   8469 	}
   8470 }
   8471 
   8472 /*
   8473  * wm_rxeof:
   8474  *
   8475  *	Helper; handle receive interrupts.
   8476  */
   8477 static bool
   8478 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8479 {
   8480 	struct wm_softc *sc = rxq->rxq_sc;
   8481 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8482 	struct wm_rxsoft *rxs;
   8483 	struct mbuf *m;
   8484 	int i, len;
   8485 	int count = 0;
   8486 	uint32_t status, errors;
   8487 	uint16_t vlantag;
   8488 	bool more = false;
   8489 
   8490 	KASSERT(mutex_owned(rxq->rxq_lock));
   8491 
   8492 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8493 		if (limit-- == 0) {
   8494 			rxq->rxq_ptr = i;
   8495 			more = true;
   8496 			DPRINTF(WM_DEBUG_RX,
   8497 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8498 				device_xname(sc->sc_dev), i));
   8499 			break;
   8500 		}
   8501 
   8502 		rxs = &rxq->rxq_soft[i];
   8503 
   8504 		DPRINTF(WM_DEBUG_RX,
   8505 		    ("%s: RX: checking descriptor %d\n",
   8506 			device_xname(sc->sc_dev), i));
   8507 		wm_cdrxsync(rxq, i,
   8508 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8509 
   8510 		status = wm_rxdesc_get_status(rxq, i);
   8511 		errors = wm_rxdesc_get_errors(rxq, i);
   8512 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8513 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8514 #ifdef WM_DEBUG
   8515 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8516 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8517 #endif
   8518 
   8519 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8520 			/*
   8521 			 * Update the receive pointer holding rxq_lock
   8522 			 * consistent with increment counter.
   8523 			 */
   8524 			rxq->rxq_ptr = i;
   8525 			break;
   8526 		}
   8527 
   8528 		count++;
   8529 		if (__predict_false(rxq->rxq_discard)) {
   8530 			DPRINTF(WM_DEBUG_RX,
   8531 			    ("%s: RX: discarding contents of descriptor %d\n",
   8532 				device_xname(sc->sc_dev), i));
   8533 			wm_init_rxdesc(rxq, i);
   8534 			if (wm_rxdesc_is_eop(rxq, status)) {
   8535 				/* Reset our state. */
   8536 				DPRINTF(WM_DEBUG_RX,
   8537 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8538 					device_xname(sc->sc_dev)));
   8539 				rxq->rxq_discard = 0;
   8540 			}
   8541 			continue;
   8542 		}
   8543 
   8544 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8545 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8546 
   8547 		m = rxs->rxs_mbuf;
   8548 
   8549 		/*
   8550 		 * Add a new receive buffer to the ring, unless of
   8551 		 * course the length is zero. Treat the latter as a
   8552 		 * failed mapping.
   8553 		 */
   8554 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8555 			/*
   8556 			 * Failed, throw away what we've done so
   8557 			 * far, and discard the rest of the packet.
   8558 			 */
   8559 			ifp->if_ierrors++;
   8560 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8561 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8562 			wm_init_rxdesc(rxq, i);
   8563 			if (!wm_rxdesc_is_eop(rxq, status))
   8564 				rxq->rxq_discard = 1;
   8565 			if (rxq->rxq_head != NULL)
   8566 				m_freem(rxq->rxq_head);
   8567 			WM_RXCHAIN_RESET(rxq);
   8568 			DPRINTF(WM_DEBUG_RX,
   8569 			    ("%s: RX: Rx buffer allocation failed, "
   8570 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8571 				rxq->rxq_discard ? " (discard)" : ""));
   8572 			continue;
   8573 		}
   8574 
   8575 		m->m_len = len;
   8576 		rxq->rxq_len += len;
   8577 		DPRINTF(WM_DEBUG_RX,
   8578 		    ("%s: RX: buffer at %p len %d\n",
   8579 			device_xname(sc->sc_dev), m->m_data, len));
   8580 
   8581 		/* If this is not the end of the packet, keep looking. */
   8582 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8583 			WM_RXCHAIN_LINK(rxq, m);
   8584 			DPRINTF(WM_DEBUG_RX,
   8585 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8586 				device_xname(sc->sc_dev), rxq->rxq_len));
   8587 			continue;
   8588 		}
   8589 
   8590 		/*
   8591 		 * Okay, we have the entire packet now. The chip is
   8592 		 * configured to include the FCS except I350 and I21[01]
   8593 		 * (not all chips can be configured to strip it),
   8594 		 * so we need to trim it.
   8595 		 * May need to adjust length of previous mbuf in the
   8596 		 * chain if the current mbuf is too short.
   8597 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8598 		 * is always set in I350, so we don't trim it.
   8599 		 */
   8600 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8601 		    && (sc->sc_type != WM_T_I210)
   8602 		    && (sc->sc_type != WM_T_I211)) {
   8603 			if (m->m_len < ETHER_CRC_LEN) {
   8604 				rxq->rxq_tail->m_len
   8605 				    -= (ETHER_CRC_LEN - m->m_len);
   8606 				m->m_len = 0;
   8607 			} else
   8608 				m->m_len -= ETHER_CRC_LEN;
   8609 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8610 		} else
   8611 			len = rxq->rxq_len;
   8612 
   8613 		WM_RXCHAIN_LINK(rxq, m);
   8614 
   8615 		*rxq->rxq_tailp = NULL;
   8616 		m = rxq->rxq_head;
   8617 
   8618 		WM_RXCHAIN_RESET(rxq);
   8619 
   8620 		DPRINTF(WM_DEBUG_RX,
   8621 		    ("%s: RX: have entire packet, len -> %d\n",
   8622 			device_xname(sc->sc_dev), len));
   8623 
   8624 		/* If an error occurred, update stats and drop the packet. */
   8625 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8626 			m_freem(m);
   8627 			continue;
   8628 		}
   8629 
   8630 		/* No errors.  Receive the packet. */
   8631 		m_set_rcvif(m, ifp);
   8632 		m->m_pkthdr.len = len;
   8633 		/*
   8634 		 * TODO
   8635 		 * should be save rsshash and rsstype to this mbuf.
   8636 		 */
   8637 		DPRINTF(WM_DEBUG_RX,
   8638 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8639 			device_xname(sc->sc_dev), rsstype, rsshash));
   8640 
   8641 		/*
   8642 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8643 		 * for us.  Associate the tag with the packet.
   8644 		 */
   8645 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8646 			continue;
   8647 
   8648 		/* Set up checksum info for this packet. */
   8649 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8650 		/*
   8651 		 * Update the receive pointer holding rxq_lock consistent with
   8652 		 * increment counter.
   8653 		 */
   8654 		rxq->rxq_ptr = i;
   8655 		rxq->rxq_packets++;
   8656 		rxq->rxq_bytes += len;
   8657 		mutex_exit(rxq->rxq_lock);
   8658 
   8659 		/* Pass it on. */
   8660 		if_percpuq_enqueue(sc->sc_ipq, m);
   8661 
   8662 		mutex_enter(rxq->rxq_lock);
   8663 
   8664 		if (rxq->rxq_stopping)
   8665 			break;
   8666 	}
   8667 
   8668 	if (count != 0)
   8669 		rnd_add_uint32(&sc->rnd_source, count);
   8670 
   8671 	DPRINTF(WM_DEBUG_RX,
   8672 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8673 
   8674 	return more;
   8675 }
   8676 
   8677 /*
   8678  * wm_linkintr_gmii:
   8679  *
   8680  *	Helper; handle link interrupts for GMII.
   8681  */
   8682 static void
   8683 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8684 {
   8685 
   8686 	KASSERT(WM_CORE_LOCKED(sc));
   8687 
   8688 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8689 		__func__));
   8690 
   8691 	if (icr & ICR_LSC) {
   8692 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8693 		uint32_t reg;
   8694 		bool link;
   8695 
   8696 		link = status & STATUS_LU;
   8697 		if (link) {
   8698 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8699 				device_xname(sc->sc_dev),
   8700 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8701 		} else {
   8702 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8703 				device_xname(sc->sc_dev)));
   8704 		}
   8705 		if ((sc->sc_type == WM_T_ICH8) && (link == false))
   8706 			wm_gig_downshift_workaround_ich8lan(sc);
   8707 
   8708 		if ((sc->sc_type == WM_T_ICH8)
   8709 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8710 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8711 		}
   8712 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8713 			device_xname(sc->sc_dev)));
   8714 		mii_pollstat(&sc->sc_mii);
   8715 		if (sc->sc_type == WM_T_82543) {
   8716 			int miistatus, active;
   8717 
   8718 			/*
   8719 			 * With 82543, we need to force speed and
   8720 			 * duplex on the MAC equal to what the PHY
   8721 			 * speed and duplex configuration is.
   8722 			 */
   8723 			miistatus = sc->sc_mii.mii_media_status;
   8724 
   8725 			if (miistatus & IFM_ACTIVE) {
   8726 				active = sc->sc_mii.mii_media_active;
   8727 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8728 				switch (IFM_SUBTYPE(active)) {
   8729 				case IFM_10_T:
   8730 					sc->sc_ctrl |= CTRL_SPEED_10;
   8731 					break;
   8732 				case IFM_100_TX:
   8733 					sc->sc_ctrl |= CTRL_SPEED_100;
   8734 					break;
   8735 				case IFM_1000_T:
   8736 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8737 					break;
   8738 				default:
   8739 					/*
   8740 					 * fiber?
   8741 					 * Shoud not enter here.
   8742 					 */
   8743 					printf("unknown media (%x)\n", active);
   8744 					break;
   8745 				}
   8746 				if (active & IFM_FDX)
   8747 					sc->sc_ctrl |= CTRL_FD;
   8748 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8749 			}
   8750 		} else if (sc->sc_type == WM_T_PCH) {
   8751 			wm_k1_gig_workaround_hv(sc,
   8752 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8753 		}
   8754 
   8755 		if ((sc->sc_phytype == WMPHY_82578)
   8756 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8757 			== IFM_1000_T)) {
   8758 
   8759 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8760 				delay(200*1000); /* XXX too big */
   8761 
   8762 				/* Link stall fix for link up */
   8763 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8764 				    HV_MUX_DATA_CTRL,
   8765 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8766 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8767 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8768 				    HV_MUX_DATA_CTRL,
   8769 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8770 			}
   8771 		}
   8772 		/*
   8773 		 * I217 Packet Loss issue:
   8774 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8775 		 * on power up.
   8776 		 * Set the Beacon Duration for I217 to 8 usec
   8777 		 */
   8778 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8779 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8780 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8781 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8782 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8783 		}
   8784 
   8785 		/* Work-around I218 hang issue */
   8786 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   8787 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   8788 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   8789 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   8790 			wm_k1_workaround_lpt_lp(sc, link);
   8791 
   8792 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8793 			/*
   8794 			 * Set platform power management values for Latency
   8795 			 * Tolerance Reporting (LTR)
   8796 			 */
   8797 			wm_platform_pm_pch_lpt(sc,
   8798 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8799 		}
   8800 
   8801 		/* FEXTNVM6 K1-off workaround */
   8802 		if (sc->sc_type == WM_T_PCH_SPT) {
   8803 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8804 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8805 			    & FEXTNVM6_K1_OFF_ENABLE)
   8806 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8807 			else
   8808 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8809 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8810 		}
   8811 	} else if (icr & ICR_RXSEQ) {
   8812 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8813 			device_xname(sc->sc_dev)));
   8814 	}
   8815 }
   8816 
   8817 /*
   8818  * wm_linkintr_tbi:
   8819  *
   8820  *	Helper; handle link interrupts for TBI mode.
   8821  */
   8822 static void
   8823 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8824 {
   8825 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8826 	uint32_t status;
   8827 
   8828 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8829 		__func__));
   8830 
   8831 	status = CSR_READ(sc, WMREG_STATUS);
   8832 	if (icr & ICR_LSC) {
   8833 		wm_check_for_link(sc);
   8834 		if (status & STATUS_LU) {
   8835 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8836 				device_xname(sc->sc_dev),
   8837 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8838 			/*
   8839 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8840 			 * so we should update sc->sc_ctrl
   8841 			 */
   8842 
   8843 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8844 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8845 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8846 			if (status & STATUS_FD)
   8847 				sc->sc_tctl |=
   8848 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8849 			else
   8850 				sc->sc_tctl |=
   8851 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8852 			if (sc->sc_ctrl & CTRL_TFCE)
   8853 				sc->sc_fcrtl |= FCRTL_XONE;
   8854 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8855 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8856 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   8857 			sc->sc_tbi_linkup = 1;
   8858 			if_link_state_change(ifp, LINK_STATE_UP);
   8859 		} else {
   8860 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8861 				device_xname(sc->sc_dev)));
   8862 			sc->sc_tbi_linkup = 0;
   8863 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8864 		}
   8865 		/* Update LED */
   8866 		wm_tbi_serdes_set_linkled(sc);
   8867 	} else if (icr & ICR_RXSEQ) {
   8868 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   8869 			device_xname(sc->sc_dev)));
   8870 	}
   8871 }
   8872 
   8873 /*
   8874  * wm_linkintr_serdes:
   8875  *
   8876  *	Helper; handle link interrupts for TBI mode.
   8877  */
   8878 static void
   8879 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8880 {
   8881 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8882 	struct mii_data *mii = &sc->sc_mii;
   8883 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8884 	uint32_t pcs_adv, pcs_lpab, reg;
   8885 
   8886 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8887 		__func__));
   8888 
   8889 	if (icr & ICR_LSC) {
   8890 		/* Check PCS */
   8891 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8892 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8893 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8894 				device_xname(sc->sc_dev)));
   8895 			mii->mii_media_status |= IFM_ACTIVE;
   8896 			sc->sc_tbi_linkup = 1;
   8897 			if_link_state_change(ifp, LINK_STATE_UP);
   8898 		} else {
   8899 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8900 				device_xname(sc->sc_dev)));
   8901 			mii->mii_media_status |= IFM_NONE;
   8902 			sc->sc_tbi_linkup = 0;
   8903 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8904 			wm_tbi_serdes_set_linkled(sc);
   8905 			return;
   8906 		}
   8907 		mii->mii_media_active |= IFM_1000_SX;
   8908 		if ((reg & PCS_LSTS_FDX) != 0)
   8909 			mii->mii_media_active |= IFM_FDX;
   8910 		else
   8911 			mii->mii_media_active |= IFM_HDX;
   8912 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8913 			/* Check flow */
   8914 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8915 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8916 				DPRINTF(WM_DEBUG_LINK,
   8917 				    ("XXX LINKOK but not ACOMP\n"));
   8918 				return;
   8919 			}
   8920 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8921 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8922 			DPRINTF(WM_DEBUG_LINK,
   8923 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8924 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8925 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8926 				mii->mii_media_active |= IFM_FLOW
   8927 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8928 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8929 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8930 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8931 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8932 				mii->mii_media_active |= IFM_FLOW
   8933 				    | IFM_ETH_TXPAUSE;
   8934 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8935 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8936 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8937 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8938 				mii->mii_media_active |= IFM_FLOW
   8939 				    | IFM_ETH_RXPAUSE;
   8940 		}
   8941 		/* Update LED */
   8942 		wm_tbi_serdes_set_linkled(sc);
   8943 	} else {
   8944 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   8945 		    device_xname(sc->sc_dev)));
   8946 	}
   8947 }
   8948 
   8949 /*
   8950  * wm_linkintr:
   8951  *
   8952  *	Helper; handle link interrupts.
   8953  */
   8954 static void
   8955 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8956 {
   8957 
   8958 	KASSERT(WM_CORE_LOCKED(sc));
   8959 
   8960 	if (sc->sc_flags & WM_F_HAS_MII)
   8961 		wm_linkintr_gmii(sc, icr);
   8962 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8963 	    && (sc->sc_type >= WM_T_82575))
   8964 		wm_linkintr_serdes(sc, icr);
   8965 	else
   8966 		wm_linkintr_tbi(sc, icr);
   8967 }
   8968 
   8969 /*
   8970  * wm_intr_legacy:
   8971  *
   8972  *	Interrupt service routine for INTx and MSI.
   8973  */
   8974 static int
   8975 wm_intr_legacy(void *arg)
   8976 {
   8977 	struct wm_softc *sc = arg;
   8978 	struct wm_queue *wmq = &sc->sc_queue[0];
   8979 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8980 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8981 	uint32_t icr, rndval = 0;
   8982 	int handled = 0;
   8983 
   8984 	while (1 /* CONSTCOND */) {
   8985 		icr = CSR_READ(sc, WMREG_ICR);
   8986 		if ((icr & sc->sc_icr) == 0)
   8987 			break;
   8988 		if (handled == 0) {
   8989 			DPRINTF(WM_DEBUG_TX,
   8990 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8991 		}
   8992 		if (rndval == 0)
   8993 			rndval = icr;
   8994 
   8995 		mutex_enter(rxq->rxq_lock);
   8996 
   8997 		if (rxq->rxq_stopping) {
   8998 			mutex_exit(rxq->rxq_lock);
   8999 			break;
   9000 		}
   9001 
   9002 		handled = 1;
   9003 
   9004 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9005 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9006 			DPRINTF(WM_DEBUG_RX,
   9007 			    ("%s: RX: got Rx intr 0x%08x\n",
   9008 				device_xname(sc->sc_dev),
   9009 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9010 			WM_Q_EVCNT_INCR(rxq, intr);
   9011 		}
   9012 #endif
   9013 		/*
   9014 		 * wm_rxeof() does *not* call upper layer functions directly,
   9015 		 * as if_percpuq_enqueue() just call softint_schedule().
   9016 		 * So, we can call wm_rxeof() in interrupt context.
   9017 		 */
   9018 		wm_rxeof(rxq, UINT_MAX);
   9019 
   9020 		mutex_exit(rxq->rxq_lock);
   9021 		mutex_enter(txq->txq_lock);
   9022 
   9023 		if (txq->txq_stopping) {
   9024 			mutex_exit(txq->txq_lock);
   9025 			break;
   9026 		}
   9027 
   9028 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9029 		if (icr & ICR_TXDW) {
   9030 			DPRINTF(WM_DEBUG_TX,
   9031 			    ("%s: TX: got TXDW interrupt\n",
   9032 				device_xname(sc->sc_dev)));
   9033 			WM_Q_EVCNT_INCR(txq, txdw);
   9034 		}
   9035 #endif
   9036 		wm_txeof(txq, UINT_MAX);
   9037 
   9038 		mutex_exit(txq->txq_lock);
   9039 		WM_CORE_LOCK(sc);
   9040 
   9041 		if (sc->sc_core_stopping) {
   9042 			WM_CORE_UNLOCK(sc);
   9043 			break;
   9044 		}
   9045 
   9046 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9047 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9048 			wm_linkintr(sc, icr);
   9049 		}
   9050 
   9051 		WM_CORE_UNLOCK(sc);
   9052 
   9053 		if (icr & ICR_RXO) {
   9054 #if defined(WM_DEBUG)
   9055 			log(LOG_WARNING, "%s: Receive overrun\n",
   9056 			    device_xname(sc->sc_dev));
   9057 #endif /* defined(WM_DEBUG) */
   9058 		}
   9059 	}
   9060 
   9061 	rnd_add_uint32(&sc->rnd_source, rndval);
   9062 
   9063 	if (handled) {
   9064 		/* Try to get more packets going. */
   9065 		softint_schedule(wmq->wmq_si);
   9066 	}
   9067 
   9068 	return handled;
   9069 }
   9070 
   9071 static inline void
   9072 wm_txrxintr_disable(struct wm_queue *wmq)
   9073 {
   9074 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9075 
   9076 	if (sc->sc_type == WM_T_82574)
   9077 		CSR_WRITE(sc, WMREG_IMC,
   9078 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9079 	else if (sc->sc_type == WM_T_82575)
   9080 		CSR_WRITE(sc, WMREG_EIMC,
   9081 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9082 	else
   9083 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9084 }
   9085 
   9086 static inline void
   9087 wm_txrxintr_enable(struct wm_queue *wmq)
   9088 {
   9089 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9090 
   9091 	wm_itrs_calculate(sc, wmq);
   9092 
   9093 	/*
   9094 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9095 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9096 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9097 	 * while each wm_handle_queue(wmq) is runnig.
   9098 	 */
   9099 	if (sc->sc_type == WM_T_82574)
   9100 		CSR_WRITE(sc, WMREG_IMS,
   9101 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9102 	else if (sc->sc_type == WM_T_82575)
   9103 		CSR_WRITE(sc, WMREG_EIMS,
   9104 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9105 	else
   9106 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9107 }
   9108 
   9109 static int
   9110 wm_txrxintr_msix(void *arg)
   9111 {
   9112 	struct wm_queue *wmq = arg;
   9113 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9114 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9115 	struct wm_softc *sc = txq->txq_sc;
   9116 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9117 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9118 	bool txmore;
   9119 	bool rxmore;
   9120 
   9121 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9122 
   9123 	DPRINTF(WM_DEBUG_TX,
   9124 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9125 
   9126 	wm_txrxintr_disable(wmq);
   9127 
   9128 	mutex_enter(txq->txq_lock);
   9129 
   9130 	if (txq->txq_stopping) {
   9131 		mutex_exit(txq->txq_lock);
   9132 		return 0;
   9133 	}
   9134 
   9135 	WM_Q_EVCNT_INCR(txq, txdw);
   9136 	txmore = wm_txeof(txq, txlimit);
   9137 	/* wm_deferred start() is done in wm_handle_queue(). */
   9138 	mutex_exit(txq->txq_lock);
   9139 
   9140 	DPRINTF(WM_DEBUG_RX,
   9141 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9142 	mutex_enter(rxq->rxq_lock);
   9143 
   9144 	if (rxq->rxq_stopping) {
   9145 		mutex_exit(rxq->rxq_lock);
   9146 		return 0;
   9147 	}
   9148 
   9149 	WM_Q_EVCNT_INCR(rxq, intr);
   9150 	rxmore = wm_rxeof(rxq, rxlimit);
   9151 	mutex_exit(rxq->rxq_lock);
   9152 
   9153 	wm_itrs_writereg(sc, wmq);
   9154 
   9155 	if (txmore || rxmore)
   9156 		softint_schedule(wmq->wmq_si);
   9157 	else
   9158 		wm_txrxintr_enable(wmq);
   9159 
   9160 	return 1;
   9161 }
   9162 
   9163 static void
   9164 wm_handle_queue(void *arg)
   9165 {
   9166 	struct wm_queue *wmq = arg;
   9167 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9168 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9169 	struct wm_softc *sc = txq->txq_sc;
   9170 	u_int txlimit = sc->sc_tx_process_limit;
   9171 	u_int rxlimit = sc->sc_rx_process_limit;
   9172 	bool txmore;
   9173 	bool rxmore;
   9174 
   9175 	mutex_enter(txq->txq_lock);
   9176 	if (txq->txq_stopping) {
   9177 		mutex_exit(txq->txq_lock);
   9178 		return;
   9179 	}
   9180 	txmore = wm_txeof(txq, txlimit);
   9181 	wm_deferred_start_locked(txq);
   9182 	mutex_exit(txq->txq_lock);
   9183 
   9184 	mutex_enter(rxq->rxq_lock);
   9185 	if (rxq->rxq_stopping) {
   9186 		mutex_exit(rxq->rxq_lock);
   9187 		return;
   9188 	}
   9189 	WM_Q_EVCNT_INCR(rxq, defer);
   9190 	rxmore = wm_rxeof(rxq, rxlimit);
   9191 	mutex_exit(rxq->rxq_lock);
   9192 
   9193 	if (txmore || rxmore)
   9194 		softint_schedule(wmq->wmq_si);
   9195 	else
   9196 		wm_txrxintr_enable(wmq);
   9197 }
   9198 
   9199 /*
   9200  * wm_linkintr_msix:
   9201  *
   9202  *	Interrupt service routine for link status change for MSI-X.
   9203  */
   9204 static int
   9205 wm_linkintr_msix(void *arg)
   9206 {
   9207 	struct wm_softc *sc = arg;
   9208 	uint32_t reg;
   9209 	bool has_rxo;
   9210 
   9211 	DPRINTF(WM_DEBUG_LINK,
   9212 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9213 
   9214 	reg = CSR_READ(sc, WMREG_ICR);
   9215 	WM_CORE_LOCK(sc);
   9216 	if (sc->sc_core_stopping)
   9217 		goto out;
   9218 
   9219 	if ((reg & ICR_LSC) != 0) {
   9220 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9221 		wm_linkintr(sc, ICR_LSC);
   9222 	}
   9223 
   9224 	/*
   9225 	 * XXX 82574 MSI-X mode workaround
   9226 	 *
   9227 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9228 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9229 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9230 	 * interrupts by writing WMREG_ICS to process receive packets.
   9231 	 */
   9232 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9233 #if defined(WM_DEBUG)
   9234 		log(LOG_WARNING, "%s: Receive overrun\n",
   9235 		    device_xname(sc->sc_dev));
   9236 #endif /* defined(WM_DEBUG) */
   9237 
   9238 		has_rxo = true;
   9239 		/*
   9240 		 * The RXO interrupt is very high rate when receive traffic is
   9241 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9242 		 * interrupts. ICR_OTHER will be enabled at the end of
   9243 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9244 		 * ICR_RXQ(1) interrupts.
   9245 		 */
   9246 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9247 
   9248 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9249 	}
   9250 
   9251 
   9252 
   9253 out:
   9254 	WM_CORE_UNLOCK(sc);
   9255 
   9256 	if (sc->sc_type == WM_T_82574) {
   9257 		if (!has_rxo)
   9258 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9259 		else
   9260 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9261 	} else if (sc->sc_type == WM_T_82575)
   9262 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9263 	else
   9264 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9265 
   9266 	return 1;
   9267 }
   9268 
   9269 /*
   9270  * Media related.
   9271  * GMII, SGMII, TBI (and SERDES)
   9272  */
   9273 
   9274 /* Common */
   9275 
   9276 /*
   9277  * wm_tbi_serdes_set_linkled:
   9278  *
   9279  *	Update the link LED on TBI and SERDES devices.
   9280  */
   9281 static void
   9282 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9283 {
   9284 
   9285 	if (sc->sc_tbi_linkup)
   9286 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9287 	else
   9288 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9289 
   9290 	/* 82540 or newer devices are active low */
   9291 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9292 
   9293 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9294 }
   9295 
   9296 /* GMII related */
   9297 
   9298 /*
   9299  * wm_gmii_reset:
   9300  *
   9301  *	Reset the PHY.
   9302  */
   9303 static void
   9304 wm_gmii_reset(struct wm_softc *sc)
   9305 {
   9306 	uint32_t reg;
   9307 	int rv;
   9308 
   9309 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9310 		device_xname(sc->sc_dev), __func__));
   9311 
   9312 	rv = sc->phy.acquire(sc);
   9313 	if (rv != 0) {
   9314 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9315 		    __func__);
   9316 		return;
   9317 	}
   9318 
   9319 	switch (sc->sc_type) {
   9320 	case WM_T_82542_2_0:
   9321 	case WM_T_82542_2_1:
   9322 		/* null */
   9323 		break;
   9324 	case WM_T_82543:
   9325 		/*
   9326 		 * With 82543, we need to force speed and duplex on the MAC
   9327 		 * equal to what the PHY speed and duplex configuration is.
   9328 		 * In addition, we need to perform a hardware reset on the PHY
   9329 		 * to take it out of reset.
   9330 		 */
   9331 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9332 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9333 
   9334 		/* The PHY reset pin is active-low. */
   9335 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9336 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9337 		    CTRL_EXT_SWDPIN(4));
   9338 		reg |= CTRL_EXT_SWDPIO(4);
   9339 
   9340 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9341 		CSR_WRITE_FLUSH(sc);
   9342 		delay(10*1000);
   9343 
   9344 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9345 		CSR_WRITE_FLUSH(sc);
   9346 		delay(150);
   9347 #if 0
   9348 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9349 #endif
   9350 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9351 		break;
   9352 	case WM_T_82544:	/* reset 10000us */
   9353 	case WM_T_82540:
   9354 	case WM_T_82545:
   9355 	case WM_T_82545_3:
   9356 	case WM_T_82546:
   9357 	case WM_T_82546_3:
   9358 	case WM_T_82541:
   9359 	case WM_T_82541_2:
   9360 	case WM_T_82547:
   9361 	case WM_T_82547_2:
   9362 	case WM_T_82571:	/* reset 100us */
   9363 	case WM_T_82572:
   9364 	case WM_T_82573:
   9365 	case WM_T_82574:
   9366 	case WM_T_82575:
   9367 	case WM_T_82576:
   9368 	case WM_T_82580:
   9369 	case WM_T_I350:
   9370 	case WM_T_I354:
   9371 	case WM_T_I210:
   9372 	case WM_T_I211:
   9373 	case WM_T_82583:
   9374 	case WM_T_80003:
   9375 		/* generic reset */
   9376 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9377 		CSR_WRITE_FLUSH(sc);
   9378 		delay(20000);
   9379 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9380 		CSR_WRITE_FLUSH(sc);
   9381 		delay(20000);
   9382 
   9383 		if ((sc->sc_type == WM_T_82541)
   9384 		    || (sc->sc_type == WM_T_82541_2)
   9385 		    || (sc->sc_type == WM_T_82547)
   9386 		    || (sc->sc_type == WM_T_82547_2)) {
   9387 			/* workaround for igp are done in igp_reset() */
   9388 			/* XXX add code to set LED after phy reset */
   9389 		}
   9390 		break;
   9391 	case WM_T_ICH8:
   9392 	case WM_T_ICH9:
   9393 	case WM_T_ICH10:
   9394 	case WM_T_PCH:
   9395 	case WM_T_PCH2:
   9396 	case WM_T_PCH_LPT:
   9397 	case WM_T_PCH_SPT:
   9398 	case WM_T_PCH_CNP:
   9399 		/* generic reset */
   9400 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9401 		CSR_WRITE_FLUSH(sc);
   9402 		delay(100);
   9403 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9404 		CSR_WRITE_FLUSH(sc);
   9405 		delay(150);
   9406 		break;
   9407 	default:
   9408 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9409 		    __func__);
   9410 		break;
   9411 	}
   9412 
   9413 	sc->phy.release(sc);
   9414 
   9415 	/* get_cfg_done */
   9416 	wm_get_cfg_done(sc);
   9417 
   9418 	/* extra setup */
   9419 	switch (sc->sc_type) {
   9420 	case WM_T_82542_2_0:
   9421 	case WM_T_82542_2_1:
   9422 	case WM_T_82543:
   9423 	case WM_T_82544:
   9424 	case WM_T_82540:
   9425 	case WM_T_82545:
   9426 	case WM_T_82545_3:
   9427 	case WM_T_82546:
   9428 	case WM_T_82546_3:
   9429 	case WM_T_82541_2:
   9430 	case WM_T_82547_2:
   9431 	case WM_T_82571:
   9432 	case WM_T_82572:
   9433 	case WM_T_82573:
   9434 	case WM_T_82574:
   9435 	case WM_T_82583:
   9436 	case WM_T_82575:
   9437 	case WM_T_82576:
   9438 	case WM_T_82580:
   9439 	case WM_T_I350:
   9440 	case WM_T_I354:
   9441 	case WM_T_I210:
   9442 	case WM_T_I211:
   9443 	case WM_T_80003:
   9444 		/* null */
   9445 		break;
   9446 	case WM_T_82541:
   9447 	case WM_T_82547:
   9448 		/* XXX Configure actively LED after PHY reset */
   9449 		break;
   9450 	case WM_T_ICH8:
   9451 	case WM_T_ICH9:
   9452 	case WM_T_ICH10:
   9453 	case WM_T_PCH:
   9454 	case WM_T_PCH2:
   9455 	case WM_T_PCH_LPT:
   9456 	case WM_T_PCH_SPT:
   9457 	case WM_T_PCH_CNP:
   9458 		wm_phy_post_reset(sc);
   9459 		break;
   9460 	default:
   9461 		panic("%s: unknown type\n", __func__);
   9462 		break;
   9463 	}
   9464 }
   9465 
   9466 /*
   9467  * Setup sc_phytype and mii_{read|write}reg.
   9468  *
   9469  *  To identify PHY type, correct read/write function should be selected.
   9470  * To select correct read/write function, PCI ID or MAC type are required
   9471  * without accessing PHY registers.
   9472  *
   9473  *  On the first call of this function, PHY ID is not known yet. Check
   9474  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9475  * result might be incorrect.
   9476  *
   9477  *  In the second call, PHY OUI and model is used to identify PHY type.
   9478  * It might not be perfpect because of the lack of compared entry, but it
   9479  * would be better than the first call.
   9480  *
   9481  *  If the detected new result and previous assumption is different,
   9482  * diagnous message will be printed.
   9483  */
   9484 static void
   9485 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9486     uint16_t phy_model)
   9487 {
   9488 	device_t dev = sc->sc_dev;
   9489 	struct mii_data *mii = &sc->sc_mii;
   9490 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9491 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9492 	mii_readreg_t new_readreg;
   9493 	mii_writereg_t new_writereg;
   9494 
   9495 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9496 		device_xname(sc->sc_dev), __func__));
   9497 
   9498 	if (mii->mii_readreg == NULL) {
   9499 		/*
   9500 		 *  This is the first call of this function. For ICH and PCH
   9501 		 * variants, it's difficult to determine the PHY access method
   9502 		 * by sc_type, so use the PCI product ID for some devices.
   9503 		 */
   9504 
   9505 		switch (sc->sc_pcidevid) {
   9506 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9507 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9508 			/* 82577 */
   9509 			new_phytype = WMPHY_82577;
   9510 			break;
   9511 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9512 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9513 			/* 82578 */
   9514 			new_phytype = WMPHY_82578;
   9515 			break;
   9516 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9517 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9518 			/* 82579 */
   9519 			new_phytype = WMPHY_82579;
   9520 			break;
   9521 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9522 		case PCI_PRODUCT_INTEL_82801I_BM:
   9523 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9524 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9525 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9526 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9527 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9528 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9529 			/* ICH8, 9, 10 with 82567 */
   9530 			new_phytype = WMPHY_BM;
   9531 			break;
   9532 		default:
   9533 			break;
   9534 		}
   9535 	} else {
   9536 		/* It's not the first call. Use PHY OUI and model */
   9537 		switch (phy_oui) {
   9538 		case MII_OUI_ATHEROS: /* XXX ??? */
   9539 			switch (phy_model) {
   9540 			case 0x0004: /* XXX */
   9541 				new_phytype = WMPHY_82578;
   9542 				break;
   9543 			default:
   9544 				break;
   9545 			}
   9546 			break;
   9547 		case MII_OUI_xxMARVELL:
   9548 			switch (phy_model) {
   9549 			case MII_MODEL_xxMARVELL_I210:
   9550 				new_phytype = WMPHY_I210;
   9551 				break;
   9552 			case MII_MODEL_xxMARVELL_E1011:
   9553 			case MII_MODEL_xxMARVELL_E1000_3:
   9554 			case MII_MODEL_xxMARVELL_E1000_5:
   9555 			case MII_MODEL_xxMARVELL_E1112:
   9556 				new_phytype = WMPHY_M88;
   9557 				break;
   9558 			case MII_MODEL_xxMARVELL_E1149:
   9559 				new_phytype = WMPHY_BM;
   9560 				break;
   9561 			case MII_MODEL_xxMARVELL_E1111:
   9562 			case MII_MODEL_xxMARVELL_I347:
   9563 			case MII_MODEL_xxMARVELL_E1512:
   9564 			case MII_MODEL_xxMARVELL_E1340M:
   9565 			case MII_MODEL_xxMARVELL_E1543:
   9566 				new_phytype = WMPHY_M88;
   9567 				break;
   9568 			case MII_MODEL_xxMARVELL_I82563:
   9569 				new_phytype = WMPHY_GG82563;
   9570 				break;
   9571 			default:
   9572 				break;
   9573 			}
   9574 			break;
   9575 		case MII_OUI_INTEL:
   9576 			switch (phy_model) {
   9577 			case MII_MODEL_INTEL_I82577:
   9578 				new_phytype = WMPHY_82577;
   9579 				break;
   9580 			case MII_MODEL_INTEL_I82579:
   9581 				new_phytype = WMPHY_82579;
   9582 				break;
   9583 			case MII_MODEL_INTEL_I217:
   9584 				new_phytype = WMPHY_I217;
   9585 				break;
   9586 			case MII_MODEL_INTEL_I82580:
   9587 			case MII_MODEL_INTEL_I350:
   9588 				new_phytype = WMPHY_82580;
   9589 				break;
   9590 			default:
   9591 				break;
   9592 			}
   9593 			break;
   9594 		case MII_OUI_yyINTEL:
   9595 			switch (phy_model) {
   9596 			case MII_MODEL_yyINTEL_I82562G:
   9597 			case MII_MODEL_yyINTEL_I82562EM:
   9598 			case MII_MODEL_yyINTEL_I82562ET:
   9599 				new_phytype = WMPHY_IFE;
   9600 				break;
   9601 			case MII_MODEL_yyINTEL_IGP01E1000:
   9602 				new_phytype = WMPHY_IGP;
   9603 				break;
   9604 			case MII_MODEL_yyINTEL_I82566:
   9605 				new_phytype = WMPHY_IGP_3;
   9606 				break;
   9607 			default:
   9608 				break;
   9609 			}
   9610 			break;
   9611 		default:
   9612 			break;
   9613 		}
   9614 		if (new_phytype == WMPHY_UNKNOWN)
   9615 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9616 			    __func__);
   9617 
   9618 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9619 		    && (sc->sc_phytype != new_phytype )) {
   9620 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9621 			    "was incorrect. PHY type from PHY ID = %u\n",
   9622 			    sc->sc_phytype, new_phytype);
   9623 		}
   9624 	}
   9625 
   9626 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9627 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9628 		/* SGMII */
   9629 		new_readreg = wm_sgmii_readreg;
   9630 		new_writereg = wm_sgmii_writereg;
   9631 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9632 		/* BM2 (phyaddr == 1) */
   9633 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9634 		    && (new_phytype != WMPHY_BM)
   9635 		    && (new_phytype != WMPHY_UNKNOWN))
   9636 			doubt_phytype = new_phytype;
   9637 		new_phytype = WMPHY_BM;
   9638 		new_readreg = wm_gmii_bm_readreg;
   9639 		new_writereg = wm_gmii_bm_writereg;
   9640 	} else if (sc->sc_type >= WM_T_PCH) {
   9641 		/* All PCH* use _hv_ */
   9642 		new_readreg = wm_gmii_hv_readreg;
   9643 		new_writereg = wm_gmii_hv_writereg;
   9644 	} else if (sc->sc_type >= WM_T_ICH8) {
   9645 		/* non-82567 ICH8, 9 and 10 */
   9646 		new_readreg = wm_gmii_i82544_readreg;
   9647 		new_writereg = wm_gmii_i82544_writereg;
   9648 	} else if (sc->sc_type >= WM_T_80003) {
   9649 		/* 80003 */
   9650 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9651 		    && (new_phytype != WMPHY_GG82563)
   9652 		    && (new_phytype != WMPHY_UNKNOWN))
   9653 			doubt_phytype = new_phytype;
   9654 		new_phytype = WMPHY_GG82563;
   9655 		new_readreg = wm_gmii_i80003_readreg;
   9656 		new_writereg = wm_gmii_i80003_writereg;
   9657 	} else if (sc->sc_type >= WM_T_I210) {
   9658 		/* I210 and I211 */
   9659 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9660 		    && (new_phytype != WMPHY_I210)
   9661 		    && (new_phytype != WMPHY_UNKNOWN))
   9662 			doubt_phytype = new_phytype;
   9663 		new_phytype = WMPHY_I210;
   9664 		new_readreg = wm_gmii_gs40g_readreg;
   9665 		new_writereg = wm_gmii_gs40g_writereg;
   9666 	} else if (sc->sc_type >= WM_T_82580) {
   9667 		/* 82580, I350 and I354 */
   9668 		new_readreg = wm_gmii_82580_readreg;
   9669 		new_writereg = wm_gmii_82580_writereg;
   9670 	} else if (sc->sc_type >= WM_T_82544) {
   9671 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9672 		new_readreg = wm_gmii_i82544_readreg;
   9673 		new_writereg = wm_gmii_i82544_writereg;
   9674 	} else {
   9675 		new_readreg = wm_gmii_i82543_readreg;
   9676 		new_writereg = wm_gmii_i82543_writereg;
   9677 	}
   9678 
   9679 	if (new_phytype == WMPHY_BM) {
   9680 		/* All BM use _bm_ */
   9681 		new_readreg = wm_gmii_bm_readreg;
   9682 		new_writereg = wm_gmii_bm_writereg;
   9683 	}
   9684 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   9685 		/* All PCH* use _hv_ */
   9686 		new_readreg = wm_gmii_hv_readreg;
   9687 		new_writereg = wm_gmii_hv_writereg;
   9688 	}
   9689 
   9690 	/* Diag output */
   9691 	if (doubt_phytype != WMPHY_UNKNOWN)
   9692 		aprint_error_dev(dev, "Assumed new PHY type was "
   9693 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9694 		    new_phytype);
   9695 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9696 	    && (sc->sc_phytype != new_phytype ))
   9697 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9698 		    "was incorrect. New PHY type = %u\n",
   9699 		    sc->sc_phytype, new_phytype);
   9700 
   9701 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9702 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9703 
   9704 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9705 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9706 		    "function was incorrect.\n");
   9707 
   9708 	/* Update now */
   9709 	sc->sc_phytype = new_phytype;
   9710 	mii->mii_readreg = new_readreg;
   9711 	mii->mii_writereg = new_writereg;
   9712 }
   9713 
   9714 /*
   9715  * wm_get_phy_id_82575:
   9716  *
   9717  * Return PHY ID. Return -1 if it failed.
   9718  */
   9719 static int
   9720 wm_get_phy_id_82575(struct wm_softc *sc)
   9721 {
   9722 	uint32_t reg;
   9723 	int phyid = -1;
   9724 
   9725 	/* XXX */
   9726 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9727 		return -1;
   9728 
   9729 	if (wm_sgmii_uses_mdio(sc)) {
   9730 		switch (sc->sc_type) {
   9731 		case WM_T_82575:
   9732 		case WM_T_82576:
   9733 			reg = CSR_READ(sc, WMREG_MDIC);
   9734 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9735 			break;
   9736 		case WM_T_82580:
   9737 		case WM_T_I350:
   9738 		case WM_T_I354:
   9739 		case WM_T_I210:
   9740 		case WM_T_I211:
   9741 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9742 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9743 			break;
   9744 		default:
   9745 			return -1;
   9746 		}
   9747 	}
   9748 
   9749 	return phyid;
   9750 }
   9751 
   9752 
   9753 /*
   9754  * wm_gmii_mediainit:
   9755  *
   9756  *	Initialize media for use on 1000BASE-T devices.
   9757  */
   9758 static void
   9759 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9760 {
   9761 	device_t dev = sc->sc_dev;
   9762 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9763 	struct mii_data *mii = &sc->sc_mii;
   9764 	uint32_t reg;
   9765 
   9766 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9767 		device_xname(sc->sc_dev), __func__));
   9768 
   9769 	/* We have GMII. */
   9770 	sc->sc_flags |= WM_F_HAS_MII;
   9771 
   9772 	if (sc->sc_type == WM_T_80003)
   9773 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9774 	else
   9775 		sc->sc_tipg = TIPG_1000T_DFLT;
   9776 
   9777 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9778 	if ((sc->sc_type == WM_T_82580)
   9779 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9780 	    || (sc->sc_type == WM_T_I211)) {
   9781 		reg = CSR_READ(sc, WMREG_PHPM);
   9782 		reg &= ~PHPM_GO_LINK_D;
   9783 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9784 	}
   9785 
   9786 	/*
   9787 	 * Let the chip set speed/duplex on its own based on
   9788 	 * signals from the PHY.
   9789 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9790 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9791 	 */
   9792 	sc->sc_ctrl |= CTRL_SLU;
   9793 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9794 
   9795 	/* Initialize our media structures and probe the GMII. */
   9796 	mii->mii_ifp = ifp;
   9797 
   9798 	mii->mii_statchg = wm_gmii_statchg;
   9799 
   9800 	/* get PHY control from SMBus to PCIe */
   9801 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9802 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   9803 	    || (sc->sc_type == WM_T_PCH_CNP))
   9804 		wm_smbustopci(sc);
   9805 
   9806 	wm_gmii_reset(sc);
   9807 
   9808 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9809 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9810 	    wm_gmii_mediastatus);
   9811 
   9812 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9813 	    || (sc->sc_type == WM_T_82580)
   9814 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9815 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9816 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9817 			/* Attach only one port */
   9818 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9819 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9820 		} else {
   9821 			int i, id;
   9822 			uint32_t ctrl_ext;
   9823 
   9824 			id = wm_get_phy_id_82575(sc);
   9825 			if (id != -1) {
   9826 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9827 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9828 			}
   9829 			if ((id == -1)
   9830 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9831 				/* Power on sgmii phy if it is disabled */
   9832 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9833 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9834 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9835 				CSR_WRITE_FLUSH(sc);
   9836 				delay(300*1000); /* XXX too long */
   9837 
   9838 				/* from 1 to 8 */
   9839 				for (i = 1; i < 8; i++)
   9840 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9841 					    0xffffffff, i, MII_OFFSET_ANY,
   9842 					    MIIF_DOPAUSE);
   9843 
   9844 				/* restore previous sfp cage power state */
   9845 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9846 			}
   9847 		}
   9848 	} else {
   9849 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9850 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9851 	}
   9852 
   9853 	/*
   9854 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9855 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9856 	 */
   9857 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   9858 		|| (sc->sc_type == WM_T_PCH_SPT)
   9859 		|| (sc->sc_type == WM_T_PCH_CNP))
   9860 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9861 		wm_set_mdio_slow_mode_hv(sc);
   9862 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9863 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9864 	}
   9865 
   9866 	/*
   9867 	 * (For ICH8 variants)
   9868 	 * If PHY detection failed, use BM's r/w function and retry.
   9869 	 */
   9870 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9871 		/* if failed, retry with *_bm_* */
   9872 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9873 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9874 		    sc->sc_phytype);
   9875 		sc->sc_phytype = WMPHY_BM;
   9876 		mii->mii_readreg = wm_gmii_bm_readreg;
   9877 		mii->mii_writereg = wm_gmii_bm_writereg;
   9878 
   9879 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9880 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9881 	}
   9882 
   9883 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9884 		/* Any PHY wasn't find */
   9885 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9886 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9887 		sc->sc_phytype = WMPHY_NONE;
   9888 	} else {
   9889 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9890 
   9891 		/*
   9892 		 * PHY Found! Check PHY type again by the second call of
   9893 		 * wm_gmii_setup_phytype.
   9894 		 */
   9895 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9896 		    child->mii_mpd_model);
   9897 
   9898 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9899 	}
   9900 }
   9901 
   9902 /*
   9903  * wm_gmii_mediachange:	[ifmedia interface function]
   9904  *
   9905  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9906  */
   9907 static int
   9908 wm_gmii_mediachange(struct ifnet *ifp)
   9909 {
   9910 	struct wm_softc *sc = ifp->if_softc;
   9911 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9912 	int rc;
   9913 
   9914 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9915 		device_xname(sc->sc_dev), __func__));
   9916 	if ((ifp->if_flags & IFF_UP) == 0)
   9917 		return 0;
   9918 
   9919 	/* Disable D0 LPLU. */
   9920 	wm_lplu_d0_disable(sc);
   9921 
   9922 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9923 	sc->sc_ctrl |= CTRL_SLU;
   9924 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9925 	    || (sc->sc_type > WM_T_82543)) {
   9926 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9927 	} else {
   9928 		sc->sc_ctrl &= ~CTRL_ASDE;
   9929 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9930 		if (ife->ifm_media & IFM_FDX)
   9931 			sc->sc_ctrl |= CTRL_FD;
   9932 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9933 		case IFM_10_T:
   9934 			sc->sc_ctrl |= CTRL_SPEED_10;
   9935 			break;
   9936 		case IFM_100_TX:
   9937 			sc->sc_ctrl |= CTRL_SPEED_100;
   9938 			break;
   9939 		case IFM_1000_T:
   9940 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9941 			break;
   9942 		default:
   9943 			panic("wm_gmii_mediachange: bad media 0x%x",
   9944 			    ife->ifm_media);
   9945 		}
   9946 	}
   9947 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9948 	CSR_WRITE_FLUSH(sc);
   9949 	if (sc->sc_type <= WM_T_82543)
   9950 		wm_gmii_reset(sc);
   9951 
   9952 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9953 		return 0;
   9954 	return rc;
   9955 }
   9956 
   9957 /*
   9958  * wm_gmii_mediastatus:	[ifmedia interface function]
   9959  *
   9960  *	Get the current interface media status on a 1000BASE-T device.
   9961  */
   9962 static void
   9963 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9964 {
   9965 	struct wm_softc *sc = ifp->if_softc;
   9966 
   9967 	ether_mediastatus(ifp, ifmr);
   9968 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9969 	    | sc->sc_flowflags;
   9970 }
   9971 
   9972 #define	MDI_IO		CTRL_SWDPIN(2)
   9973 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9974 #define	MDI_CLK		CTRL_SWDPIN(3)
   9975 
   9976 static void
   9977 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9978 {
   9979 	uint32_t i, v;
   9980 
   9981 	v = CSR_READ(sc, WMREG_CTRL);
   9982 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9983 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9984 
   9985 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9986 		if (data & i)
   9987 			v |= MDI_IO;
   9988 		else
   9989 			v &= ~MDI_IO;
   9990 		CSR_WRITE(sc, WMREG_CTRL, v);
   9991 		CSR_WRITE_FLUSH(sc);
   9992 		delay(10);
   9993 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9994 		CSR_WRITE_FLUSH(sc);
   9995 		delay(10);
   9996 		CSR_WRITE(sc, WMREG_CTRL, v);
   9997 		CSR_WRITE_FLUSH(sc);
   9998 		delay(10);
   9999 	}
   10000 }
   10001 
   10002 static uint32_t
   10003 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10004 {
   10005 	uint32_t v, i, data = 0;
   10006 
   10007 	v = CSR_READ(sc, WMREG_CTRL);
   10008 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10009 	v |= CTRL_SWDPIO(3);
   10010 
   10011 	CSR_WRITE(sc, WMREG_CTRL, v);
   10012 	CSR_WRITE_FLUSH(sc);
   10013 	delay(10);
   10014 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10015 	CSR_WRITE_FLUSH(sc);
   10016 	delay(10);
   10017 	CSR_WRITE(sc, WMREG_CTRL, v);
   10018 	CSR_WRITE_FLUSH(sc);
   10019 	delay(10);
   10020 
   10021 	for (i = 0; i < 16; i++) {
   10022 		data <<= 1;
   10023 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10024 		CSR_WRITE_FLUSH(sc);
   10025 		delay(10);
   10026 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10027 			data |= 1;
   10028 		CSR_WRITE(sc, WMREG_CTRL, v);
   10029 		CSR_WRITE_FLUSH(sc);
   10030 		delay(10);
   10031 	}
   10032 
   10033 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10034 	CSR_WRITE_FLUSH(sc);
   10035 	delay(10);
   10036 	CSR_WRITE(sc, WMREG_CTRL, v);
   10037 	CSR_WRITE_FLUSH(sc);
   10038 	delay(10);
   10039 
   10040 	return data;
   10041 }
   10042 
   10043 #undef MDI_IO
   10044 #undef MDI_DIR
   10045 #undef MDI_CLK
   10046 
   10047 /*
   10048  * wm_gmii_i82543_readreg:	[mii interface function]
   10049  *
   10050  *	Read a PHY register on the GMII (i82543 version).
   10051  */
   10052 static int
   10053 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   10054 {
   10055 	struct wm_softc *sc = device_private(dev);
   10056 	int rv;
   10057 
   10058 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10059 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10060 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10061 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   10062 
   10063 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   10064 		device_xname(dev), phy, reg, rv));
   10065 
   10066 	return rv;
   10067 }
   10068 
   10069 /*
   10070  * wm_gmii_i82543_writereg:	[mii interface function]
   10071  *
   10072  *	Write a PHY register on the GMII (i82543 version).
   10073  */
   10074 static void
   10075 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   10076 {
   10077 	struct wm_softc *sc = device_private(dev);
   10078 
   10079 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10080 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10081 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10082 	    (MII_COMMAND_START << 30), 32);
   10083 }
   10084 
   10085 /*
   10086  * wm_gmii_mdic_readreg:	[mii interface function]
   10087  *
   10088  *	Read a PHY register on the GMII.
   10089  */
   10090 static int
   10091 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   10092 {
   10093 	struct wm_softc *sc = device_private(dev);
   10094 	uint32_t mdic = 0;
   10095 	int i, rv;
   10096 
   10097 	if (reg > MII_ADDRMASK) {
   10098 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10099 		    __func__, sc->sc_phytype, reg);
   10100 		reg &= MII_ADDRMASK;
   10101 	}
   10102 
   10103 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10104 	    MDIC_REGADD(reg));
   10105 
   10106 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10107 		delay(50);
   10108 		mdic = CSR_READ(sc, WMREG_MDIC);
   10109 		if (mdic & MDIC_READY)
   10110 			break;
   10111 	}
   10112 
   10113 	if ((mdic & MDIC_READY) == 0) {
   10114 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   10115 		    device_xname(dev), phy, reg);
   10116 		return 0;
   10117 	} else if (mdic & MDIC_E) {
   10118 #if 0 /* This is normal if no PHY is present. */
   10119 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   10120 		    device_xname(dev), phy, reg);
   10121 #endif
   10122 		return 0;
   10123 	} else {
   10124 		rv = MDIC_DATA(mdic);
   10125 		if (rv == 0xffff)
   10126 			rv = 0;
   10127 	}
   10128 
   10129 	/*
   10130 	 * Allow some time after each MDIC transaction to avoid
   10131 	 * reading duplicate data in the next MDIC transaction.
   10132 	 */
   10133 	if (sc->sc_type == WM_T_PCH2)
   10134 		delay(100);
   10135 
   10136 	return rv;
   10137 }
   10138 
   10139 /*
   10140  * wm_gmii_mdic_writereg:	[mii interface function]
   10141  *
   10142  *	Write a PHY register on the GMII.
   10143  */
   10144 static void
   10145 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   10146 {
   10147 	struct wm_softc *sc = device_private(dev);
   10148 	uint32_t mdic = 0;
   10149 	int i;
   10150 
   10151 	if (reg > MII_ADDRMASK) {
   10152 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10153 		    __func__, sc->sc_phytype, reg);
   10154 		reg &= MII_ADDRMASK;
   10155 	}
   10156 
   10157 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10158 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10159 
   10160 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10161 		delay(50);
   10162 		mdic = CSR_READ(sc, WMREG_MDIC);
   10163 		if (mdic & MDIC_READY)
   10164 			break;
   10165 	}
   10166 
   10167 	if ((mdic & MDIC_READY) == 0) {
   10168 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   10169 		    device_xname(dev), phy, reg);
   10170 		return;
   10171 	} else if (mdic & MDIC_E) {
   10172 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   10173 		    device_xname(dev), phy, reg);
   10174 		return;
   10175 	}
   10176 
   10177 	/*
   10178 	 * Allow some time after each MDIC transaction to avoid
   10179 	 * reading duplicate data in the next MDIC transaction.
   10180 	 */
   10181 	if (sc->sc_type == WM_T_PCH2)
   10182 		delay(100);
   10183 }
   10184 
   10185 /*
   10186  * wm_gmii_i82544_readreg:	[mii interface function]
   10187  *
   10188  *	Read a PHY register on the GMII.
   10189  */
   10190 static int
   10191 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   10192 {
   10193 	struct wm_softc *sc = device_private(dev);
   10194 	int rv;
   10195 
   10196 	if (sc->phy.acquire(sc)) {
   10197 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10198 		return 0;
   10199 	}
   10200 
   10201 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10202 		switch (sc->sc_phytype) {
   10203 		case WMPHY_IGP:
   10204 		case WMPHY_IGP_2:
   10205 		case WMPHY_IGP_3:
   10206 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10207 			    reg);
   10208 			break;
   10209 		default:
   10210 #ifdef WM_DEBUG
   10211 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10212 			    __func__, sc->sc_phytype, reg);
   10213 #endif
   10214 			break;
   10215 		}
   10216 	}
   10217 
   10218 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10219 	sc->phy.release(sc);
   10220 
   10221 	return rv;
   10222 }
   10223 
   10224 /*
   10225  * wm_gmii_i82544_writereg:	[mii interface function]
   10226  *
   10227  *	Write a PHY register on the GMII.
   10228  */
   10229 static void
   10230 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   10231 {
   10232 	struct wm_softc *sc = device_private(dev);
   10233 
   10234 	if (sc->phy.acquire(sc)) {
   10235 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10236 		return;
   10237 	}
   10238 
   10239 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10240 		switch (sc->sc_phytype) {
   10241 		case WMPHY_IGP:
   10242 		case WMPHY_IGP_2:
   10243 		case WMPHY_IGP_3:
   10244 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10245 			    reg);
   10246 			break;
   10247 		default:
   10248 #ifdef WM_DEBUG
   10249 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10250 			    __func__, sc->sc_phytype, reg);
   10251 #endif
   10252 			break;
   10253 		}
   10254 	}
   10255 
   10256 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10257 	sc->phy.release(sc);
   10258 }
   10259 
   10260 /*
   10261  * wm_gmii_i80003_readreg:	[mii interface function]
   10262  *
   10263  *	Read a PHY register on the kumeran
   10264  * This could be handled by the PHY layer if we didn't have to lock the
   10265  * ressource ...
   10266  */
   10267 static int
   10268 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   10269 {
   10270 	struct wm_softc *sc = device_private(dev);
   10271 	int page_select, temp;
   10272 	int rv;
   10273 
   10274 	if (phy != 1) /* only one PHY on kumeran bus */
   10275 		return 0;
   10276 
   10277 	if (sc->phy.acquire(sc)) {
   10278 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10279 		return 0;
   10280 	}
   10281 
   10282 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10283 		page_select = GG82563_PHY_PAGE_SELECT;
   10284 	else {
   10285 		/*
   10286 		 * Use Alternative Page Select register to access registers
   10287 		 * 30 and 31.
   10288 		 */
   10289 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10290 	}
   10291 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10292 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10293 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10294 		/*
   10295 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10296 		 * register.
   10297 		 */
   10298 		delay(200);
   10299 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10300 			device_printf(dev, "%s failed\n", __func__);
   10301 			rv = 0; /* XXX */
   10302 			goto out;
   10303 		}
   10304 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10305 		delay(200);
   10306 	} else
   10307 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10308 
   10309 out:
   10310 	sc->phy.release(sc);
   10311 	return rv;
   10312 }
   10313 
   10314 /*
   10315  * wm_gmii_i80003_writereg:	[mii interface function]
   10316  *
   10317  *	Write a PHY register on the kumeran.
   10318  * This could be handled by the PHY layer if we didn't have to lock the
   10319  * ressource ...
   10320  */
   10321 static void
   10322 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10323 {
   10324 	struct wm_softc *sc = device_private(dev);
   10325 	int page_select, temp;
   10326 
   10327 	if (phy != 1) /* only one PHY on kumeran bus */
   10328 		return;
   10329 
   10330 	if (sc->phy.acquire(sc)) {
   10331 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10332 		return;
   10333 	}
   10334 
   10335 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10336 		page_select = GG82563_PHY_PAGE_SELECT;
   10337 	else {
   10338 		/*
   10339 		 * Use Alternative Page Select register to access registers
   10340 		 * 30 and 31.
   10341 		 */
   10342 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10343 	}
   10344 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10345 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10346 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10347 		/*
   10348 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10349 		 * register.
   10350 		 */
   10351 		delay(200);
   10352 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10353 			device_printf(dev, "%s failed\n", __func__);
   10354 			goto out;
   10355 		}
   10356 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10357 		delay(200);
   10358 	} else
   10359 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10360 
   10361 out:
   10362 	sc->phy.release(sc);
   10363 }
   10364 
   10365 /*
   10366  * wm_gmii_bm_readreg:	[mii interface function]
   10367  *
   10368  *	Read a PHY register on the kumeran
   10369  * This could be handled by the PHY layer if we didn't have to lock the
   10370  * ressource ...
   10371  */
   10372 static int
   10373 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10374 {
   10375 	struct wm_softc *sc = device_private(dev);
   10376 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10377 	uint16_t val;
   10378 	int rv;
   10379 
   10380 	if (sc->phy.acquire(sc)) {
   10381 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10382 		return 0;
   10383 	}
   10384 
   10385 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10386 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10387 		    || (reg == 31)) ? 1 : phy;
   10388 	/* Page 800 works differently than the rest so it has its own func */
   10389 	if (page == BM_WUC_PAGE) {
   10390 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10391 		rv = val;
   10392 		goto release;
   10393 	}
   10394 
   10395 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10396 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10397 		    && (sc->sc_type != WM_T_82583))
   10398 			wm_gmii_mdic_writereg(dev, phy,
   10399 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10400 		else
   10401 			wm_gmii_mdic_writereg(dev, phy,
   10402 			    BME1000_PHY_PAGE_SELECT, page);
   10403 	}
   10404 
   10405 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10406 
   10407 release:
   10408 	sc->phy.release(sc);
   10409 	return rv;
   10410 }
   10411 
   10412 /*
   10413  * wm_gmii_bm_writereg:	[mii interface function]
   10414  *
   10415  *	Write a PHY register on the kumeran.
   10416  * This could be handled by the PHY layer if we didn't have to lock the
   10417  * ressource ...
   10418  */
   10419 static void
   10420 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10421 {
   10422 	struct wm_softc *sc = device_private(dev);
   10423 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10424 
   10425 	if (sc->phy.acquire(sc)) {
   10426 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10427 		return;
   10428 	}
   10429 
   10430 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10431 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10432 		    || (reg == 31)) ? 1 : phy;
   10433 	/* Page 800 works differently than the rest so it has its own func */
   10434 	if (page == BM_WUC_PAGE) {
   10435 		uint16_t tmp;
   10436 
   10437 		tmp = val;
   10438 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10439 		goto release;
   10440 	}
   10441 
   10442 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10443 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10444 		    && (sc->sc_type != WM_T_82583))
   10445 			wm_gmii_mdic_writereg(dev, phy,
   10446 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10447 		else
   10448 			wm_gmii_mdic_writereg(dev, phy,
   10449 			    BME1000_PHY_PAGE_SELECT, page);
   10450 	}
   10451 
   10452 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10453 
   10454 release:
   10455 	sc->phy.release(sc);
   10456 }
   10457 
   10458 static void
   10459 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10460 {
   10461 	struct wm_softc *sc = device_private(dev);
   10462 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10463 	uint16_t wuce, reg;
   10464 
   10465 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10466 		device_xname(dev), __func__));
   10467 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10468 	if (sc->sc_type == WM_T_PCH) {
   10469 		/* XXX e1000 driver do nothing... why? */
   10470 	}
   10471 
   10472 	/*
   10473 	 * 1) Enable PHY wakeup register first.
   10474 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10475 	 */
   10476 
   10477 	/* Set page 769 */
   10478 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10479 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10480 
   10481 	/* Read WUCE and save it */
   10482 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10483 
   10484 	reg = wuce | BM_WUC_ENABLE_BIT;
   10485 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10486 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10487 
   10488 	/* Select page 800 */
   10489 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10490 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10491 
   10492 	/*
   10493 	 * 2) Access PHY wakeup register.
   10494 	 * See e1000_access_phy_wakeup_reg_bm.
   10495 	 */
   10496 
   10497 	/* Write page 800 */
   10498 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10499 
   10500 	if (rd)
   10501 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10502 	else
   10503 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10504 
   10505 	/*
   10506 	 * 3) Disable PHY wakeup register.
   10507 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10508 	 */
   10509 	/* Set page 769 */
   10510 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10511 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10512 
   10513 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10514 }
   10515 
   10516 /*
   10517  * wm_gmii_hv_readreg:	[mii interface function]
   10518  *
   10519  *	Read a PHY register on the kumeran
   10520  * This could be handled by the PHY layer if we didn't have to lock the
   10521  * ressource ...
   10522  */
   10523 static int
   10524 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10525 {
   10526 	struct wm_softc *sc = device_private(dev);
   10527 	int rv;
   10528 
   10529 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10530 		device_xname(dev), __func__));
   10531 	if (sc->phy.acquire(sc)) {
   10532 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10533 		return 0;
   10534 	}
   10535 
   10536 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
   10537 	sc->phy.release(sc);
   10538 	return rv;
   10539 }
   10540 
   10541 static int
   10542 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
   10543 {
   10544 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10545 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10546 	uint16_t val;
   10547 	int rv;
   10548 
   10549 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10550 
   10551 	/* Page 800 works differently than the rest so it has its own func */
   10552 	if (page == BM_WUC_PAGE) {
   10553 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10554 		return val;
   10555 	}
   10556 
   10557 	/*
   10558 	 * Lower than page 768 works differently than the rest so it has its
   10559 	 * own func
   10560 	 */
   10561 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10562 		printf("gmii_hv_readreg!!!\n");
   10563 		return 0;
   10564 	}
   10565 
   10566 	/*
   10567 	 * XXX I21[789] documents say that the SMBus Address register is at
   10568 	 * PHY address 01, Page 0 (not 768), Register 26.
   10569 	 */
   10570 	if (page == HV_INTC_FC_PAGE_START)
   10571 		page = 0;
   10572 
   10573 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10574 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10575 		    page << BME1000_PAGE_SHIFT);
   10576 	}
   10577 
   10578 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10579 	return rv;
   10580 }
   10581 
   10582 /*
   10583  * wm_gmii_hv_writereg:	[mii interface function]
   10584  *
   10585  *	Write a PHY register on the kumeran.
   10586  * This could be handled by the PHY layer if we didn't have to lock the
   10587  * ressource ...
   10588  */
   10589 static void
   10590 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10591 {
   10592 	struct wm_softc *sc = device_private(dev);
   10593 
   10594 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10595 		device_xname(dev), __func__));
   10596 
   10597 	if (sc->phy.acquire(sc)) {
   10598 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10599 		return;
   10600 	}
   10601 
   10602 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10603 	sc->phy.release(sc);
   10604 }
   10605 
   10606 static void
   10607 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
   10608 {
   10609 	struct wm_softc *sc = device_private(dev);
   10610 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10611 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10612 
   10613 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10614 
   10615 	/* Page 800 works differently than the rest so it has its own func */
   10616 	if (page == BM_WUC_PAGE) {
   10617 		uint16_t tmp;
   10618 
   10619 		tmp = val;
   10620 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10621 		return;
   10622 	}
   10623 
   10624 	/*
   10625 	 * Lower than page 768 works differently than the rest so it has its
   10626 	 * own func
   10627 	 */
   10628 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10629 		printf("gmii_hv_writereg!!!\n");
   10630 		return;
   10631 	}
   10632 
   10633 	{
   10634 		/*
   10635 		 * XXX I21[789] documents say that the SMBus Address register
   10636 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10637 		 */
   10638 		if (page == HV_INTC_FC_PAGE_START)
   10639 			page = 0;
   10640 
   10641 		/*
   10642 		 * XXX Workaround MDIO accesses being disabled after entering
   10643 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10644 		 * register is set)
   10645 		 */
   10646 		if (sc->sc_phytype == WMPHY_82578) {
   10647 			struct mii_softc *child;
   10648 
   10649 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10650 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10651 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10652 			    && ((val & (1 << 11)) != 0)) {
   10653 				printf("XXX need workaround\n");
   10654 			}
   10655 		}
   10656 
   10657 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10658 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10659 			    page << BME1000_PAGE_SHIFT);
   10660 		}
   10661 	}
   10662 
   10663 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10664 }
   10665 
   10666 /*
   10667  * wm_gmii_82580_readreg:	[mii interface function]
   10668  *
   10669  *	Read a PHY register on the 82580 and I350.
   10670  * This could be handled by the PHY layer if we didn't have to lock the
   10671  * ressource ...
   10672  */
   10673 static int
   10674 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10675 {
   10676 	struct wm_softc *sc = device_private(dev);
   10677 	int rv;
   10678 
   10679 	if (sc->phy.acquire(sc) != 0) {
   10680 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10681 		return 0;
   10682 	}
   10683 
   10684 #ifdef DIAGNOSTIC
   10685 	if (reg > MII_ADDRMASK) {
   10686 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10687 		    __func__, sc->sc_phytype, reg);
   10688 		reg &= MII_ADDRMASK;
   10689 	}
   10690 #endif
   10691 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10692 
   10693 	sc->phy.release(sc);
   10694 	return rv;
   10695 }
   10696 
   10697 /*
   10698  * wm_gmii_82580_writereg:	[mii interface function]
   10699  *
   10700  *	Write a PHY register on the 82580 and I350.
   10701  * This could be handled by the PHY layer if we didn't have to lock the
   10702  * ressource ...
   10703  */
   10704 static void
   10705 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10706 {
   10707 	struct wm_softc *sc = device_private(dev);
   10708 
   10709 	if (sc->phy.acquire(sc) != 0) {
   10710 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10711 		return;
   10712 	}
   10713 
   10714 #ifdef DIAGNOSTIC
   10715 	if (reg > MII_ADDRMASK) {
   10716 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10717 		    __func__, sc->sc_phytype, reg);
   10718 		reg &= MII_ADDRMASK;
   10719 	}
   10720 #endif
   10721 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10722 
   10723 	sc->phy.release(sc);
   10724 }
   10725 
   10726 /*
   10727  * wm_gmii_gs40g_readreg:	[mii interface function]
   10728  *
   10729  *	Read a PHY register on the I2100 and I211.
   10730  * This could be handled by the PHY layer if we didn't have to lock the
   10731  * ressource ...
   10732  */
   10733 static int
   10734 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10735 {
   10736 	struct wm_softc *sc = device_private(dev);
   10737 	int page, offset;
   10738 	int rv;
   10739 
   10740 	/* Acquire semaphore */
   10741 	if (sc->phy.acquire(sc)) {
   10742 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10743 		return 0;
   10744 	}
   10745 
   10746 	/* Page select */
   10747 	page = reg >> GS40G_PAGE_SHIFT;
   10748 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10749 
   10750 	/* Read reg */
   10751 	offset = reg & GS40G_OFFSET_MASK;
   10752 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10753 
   10754 	sc->phy.release(sc);
   10755 	return rv;
   10756 }
   10757 
   10758 /*
   10759  * wm_gmii_gs40g_writereg:	[mii interface function]
   10760  *
   10761  *	Write a PHY register on the I210 and I211.
   10762  * This could be handled by the PHY layer if we didn't have to lock the
   10763  * ressource ...
   10764  */
   10765 static void
   10766 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10767 {
   10768 	struct wm_softc *sc = device_private(dev);
   10769 	int page, offset;
   10770 
   10771 	/* Acquire semaphore */
   10772 	if (sc->phy.acquire(sc)) {
   10773 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10774 		return;
   10775 	}
   10776 
   10777 	/* Page select */
   10778 	page = reg >> GS40G_PAGE_SHIFT;
   10779 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10780 
   10781 	/* Write reg */
   10782 	offset = reg & GS40G_OFFSET_MASK;
   10783 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10784 
   10785 	/* Release semaphore */
   10786 	sc->phy.release(sc);
   10787 }
   10788 
   10789 /*
   10790  * wm_gmii_statchg:	[mii interface function]
   10791  *
   10792  *	Callback from MII layer when media changes.
   10793  */
   10794 static void
   10795 wm_gmii_statchg(struct ifnet *ifp)
   10796 {
   10797 	struct wm_softc *sc = ifp->if_softc;
   10798 	struct mii_data *mii = &sc->sc_mii;
   10799 
   10800 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10801 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10802 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10803 
   10804 	/*
   10805 	 * Get flow control negotiation result.
   10806 	 */
   10807 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10808 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10809 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10810 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10811 	}
   10812 
   10813 	if (sc->sc_flowflags & IFM_FLOW) {
   10814 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10815 			sc->sc_ctrl |= CTRL_TFCE;
   10816 			sc->sc_fcrtl |= FCRTL_XONE;
   10817 		}
   10818 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10819 			sc->sc_ctrl |= CTRL_RFCE;
   10820 	}
   10821 
   10822 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10823 		DPRINTF(WM_DEBUG_LINK,
   10824 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10825 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10826 	} else {
   10827 		DPRINTF(WM_DEBUG_LINK,
   10828 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10829 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10830 	}
   10831 
   10832 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10833 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10834 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10835 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10836 	if (sc->sc_type == WM_T_80003) {
   10837 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10838 		case IFM_1000_T:
   10839 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10840 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10841 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10842 			break;
   10843 		default:
   10844 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10845 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10846 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10847 			break;
   10848 		}
   10849 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10850 	}
   10851 }
   10852 
   10853 /* kumeran related (80003, ICH* and PCH*) */
   10854 
   10855 /*
   10856  * wm_kmrn_readreg:
   10857  *
   10858  *	Read a kumeran register
   10859  */
   10860 static int
   10861 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   10862 {
   10863 	int rv;
   10864 
   10865 	if (sc->sc_type == WM_T_80003)
   10866 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10867 	else
   10868 		rv = sc->phy.acquire(sc);
   10869 	if (rv != 0) {
   10870 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10871 		    __func__);
   10872 		return rv;
   10873 	}
   10874 
   10875 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   10876 
   10877 	if (sc->sc_type == WM_T_80003)
   10878 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10879 	else
   10880 		sc->phy.release(sc);
   10881 
   10882 	return rv;
   10883 }
   10884 
   10885 static int
   10886 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   10887 {
   10888 
   10889 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10890 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10891 	    KUMCTRLSTA_REN);
   10892 	CSR_WRITE_FLUSH(sc);
   10893 	delay(2);
   10894 
   10895 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10896 
   10897 	return 0;
   10898 }
   10899 
   10900 /*
   10901  * wm_kmrn_writereg:
   10902  *
   10903  *	Write a kumeran register
   10904  */
   10905 static int
   10906 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   10907 {
   10908 	int rv;
   10909 
   10910 	if (sc->sc_type == WM_T_80003)
   10911 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10912 	else
   10913 		rv = sc->phy.acquire(sc);
   10914 	if (rv != 0) {
   10915 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10916 		    __func__);
   10917 		return rv;
   10918 	}
   10919 
   10920 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   10921 
   10922 	if (sc->sc_type == WM_T_80003)
   10923 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10924 	else
   10925 		sc->phy.release(sc);
   10926 
   10927 	return rv;
   10928 }
   10929 
   10930 static int
   10931 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   10932 {
   10933 
   10934 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10935 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   10936 
   10937 	return 0;
   10938 }
   10939 
   10940 /* SGMII related */
   10941 
   10942 /*
   10943  * wm_sgmii_uses_mdio
   10944  *
   10945  * Check whether the transaction is to the internal PHY or the external
   10946  * MDIO interface. Return true if it's MDIO.
   10947  */
   10948 static bool
   10949 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10950 {
   10951 	uint32_t reg;
   10952 	bool ismdio = false;
   10953 
   10954 	switch (sc->sc_type) {
   10955 	case WM_T_82575:
   10956 	case WM_T_82576:
   10957 		reg = CSR_READ(sc, WMREG_MDIC);
   10958 		ismdio = ((reg & MDIC_DEST) != 0);
   10959 		break;
   10960 	case WM_T_82580:
   10961 	case WM_T_I350:
   10962 	case WM_T_I354:
   10963 	case WM_T_I210:
   10964 	case WM_T_I211:
   10965 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10966 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10967 		break;
   10968 	default:
   10969 		break;
   10970 	}
   10971 
   10972 	return ismdio;
   10973 }
   10974 
   10975 /*
   10976  * wm_sgmii_readreg:	[mii interface function]
   10977  *
   10978  *	Read a PHY register on the SGMII
   10979  * This could be handled by the PHY layer if we didn't have to lock the
   10980  * ressource ...
   10981  */
   10982 static int
   10983 wm_sgmii_readreg(device_t dev, int phy, int reg)
   10984 {
   10985 	struct wm_softc *sc = device_private(dev);
   10986 	uint32_t i2ccmd;
   10987 	int i, rv;
   10988 
   10989 	if (sc->phy.acquire(sc)) {
   10990 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10991 		return 0;
   10992 	}
   10993 
   10994 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10995 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10996 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10997 
   10998 	/* Poll the ready bit */
   10999 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11000 		delay(50);
   11001 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11002 		if (i2ccmd & I2CCMD_READY)
   11003 			break;
   11004 	}
   11005 	if ((i2ccmd & I2CCMD_READY) == 0)
   11006 		device_printf(dev, "I2CCMD Read did not complete\n");
   11007 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11008 		device_printf(dev, "I2CCMD Error bit set\n");
   11009 
   11010 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11011 
   11012 	sc->phy.release(sc);
   11013 	return rv;
   11014 }
   11015 
   11016 /*
   11017  * wm_sgmii_writereg:	[mii interface function]
   11018  *
   11019  *	Write a PHY register on the SGMII.
   11020  * This could be handled by the PHY layer if we didn't have to lock the
   11021  * ressource ...
   11022  */
   11023 static void
   11024 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   11025 {
   11026 	struct wm_softc *sc = device_private(dev);
   11027 	uint32_t i2ccmd;
   11028 	int i;
   11029 	int swapdata;
   11030 
   11031 	if (sc->phy.acquire(sc) != 0) {
   11032 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11033 		return;
   11034 	}
   11035 	/* Swap the data bytes for the I2C interface */
   11036 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11037 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11038 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11039 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11040 
   11041 	/* Poll the ready bit */
   11042 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11043 		delay(50);
   11044 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11045 		if (i2ccmd & I2CCMD_READY)
   11046 			break;
   11047 	}
   11048 	if ((i2ccmd & I2CCMD_READY) == 0)
   11049 		device_printf(dev, "I2CCMD Write did not complete\n");
   11050 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11051 		device_printf(dev, "I2CCMD Error bit set\n");
   11052 
   11053 	sc->phy.release(sc);
   11054 }
   11055 
   11056 /* TBI related */
   11057 
   11058 static bool
   11059 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11060 {
   11061 	bool sig;
   11062 
   11063 	sig = ctrl & CTRL_SWDPIN(1);
   11064 
   11065 	/*
   11066 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11067 	 * detect a signal, 1 if they don't.
   11068 	 */
   11069 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11070 		sig = !sig;
   11071 
   11072 	return sig;
   11073 }
   11074 
   11075 /*
   11076  * wm_tbi_mediainit:
   11077  *
   11078  *	Initialize media for use on 1000BASE-X devices.
   11079  */
   11080 static void
   11081 wm_tbi_mediainit(struct wm_softc *sc)
   11082 {
   11083 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11084 	const char *sep = "";
   11085 
   11086 	if (sc->sc_type < WM_T_82543)
   11087 		sc->sc_tipg = TIPG_WM_DFLT;
   11088 	else
   11089 		sc->sc_tipg = TIPG_LG_DFLT;
   11090 
   11091 	sc->sc_tbi_serdes_anegticks = 5;
   11092 
   11093 	/* Initialize our media structures */
   11094 	sc->sc_mii.mii_ifp = ifp;
   11095 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11096 
   11097 	if ((sc->sc_type >= WM_T_82575)
   11098 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11099 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11100 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11101 	else
   11102 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11103 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11104 
   11105 	/*
   11106 	 * SWD Pins:
   11107 	 *
   11108 	 *	0 = Link LED (output)
   11109 	 *	1 = Loss Of Signal (input)
   11110 	 */
   11111 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11112 
   11113 	/* XXX Perhaps this is only for TBI */
   11114 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11115 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11116 
   11117 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11118 		sc->sc_ctrl &= ~CTRL_LRST;
   11119 
   11120 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11121 
   11122 #define	ADD(ss, mm, dd)							\
   11123 do {									\
   11124 	aprint_normal("%s%s", sep, ss);					\
   11125 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11126 	sep = ", ";							\
   11127 } while (/*CONSTCOND*/0)
   11128 
   11129 	aprint_normal_dev(sc->sc_dev, "");
   11130 
   11131 	if (sc->sc_type == WM_T_I354) {
   11132 		uint32_t status;
   11133 
   11134 		status = CSR_READ(sc, WMREG_STATUS);
   11135 		if (((status & STATUS_2P5_SKU) != 0)
   11136 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11137 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11138 		} else
   11139 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11140 	} else if (sc->sc_type == WM_T_82545) {
   11141 		/* Only 82545 is LX (XXX except SFP) */
   11142 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11143 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11144 	} else {
   11145 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11146 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11147 	}
   11148 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11149 	aprint_normal("\n");
   11150 
   11151 #undef ADD
   11152 
   11153 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11154 }
   11155 
   11156 /*
   11157  * wm_tbi_mediachange:	[ifmedia interface function]
   11158  *
   11159  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11160  */
   11161 static int
   11162 wm_tbi_mediachange(struct ifnet *ifp)
   11163 {
   11164 	struct wm_softc *sc = ifp->if_softc;
   11165 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11166 	uint32_t status, ctrl;
   11167 	bool signal;
   11168 	int i;
   11169 
   11170 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11171 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11172 		/* XXX need some work for >= 82571 and < 82575 */
   11173 		if (sc->sc_type < WM_T_82575)
   11174 			return 0;
   11175 	}
   11176 
   11177 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11178 	    || (sc->sc_type >= WM_T_82575))
   11179 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11180 
   11181 	sc->sc_ctrl &= ~CTRL_LRST;
   11182 	sc->sc_txcw = TXCW_ANE;
   11183 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11184 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11185 	else if (ife->ifm_media & IFM_FDX)
   11186 		sc->sc_txcw |= TXCW_FD;
   11187 	else
   11188 		sc->sc_txcw |= TXCW_HD;
   11189 
   11190 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11191 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11192 
   11193 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11194 		device_xname(sc->sc_dev), sc->sc_txcw));
   11195 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11196 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11197 	CSR_WRITE_FLUSH(sc);
   11198 	delay(1000);
   11199 
   11200 	ctrl =  CSR_READ(sc, WMREG_CTRL);
   11201 	signal = wm_tbi_havesignal(sc, ctrl);
   11202 
   11203 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11204 		signal));
   11205 
   11206 	if (signal) {
   11207 		/* Have signal; wait for the link to come up. */
   11208 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11209 			delay(10000);
   11210 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11211 				break;
   11212 		}
   11213 
   11214 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11215 			device_xname(sc->sc_dev),i));
   11216 
   11217 		status = CSR_READ(sc, WMREG_STATUS);
   11218 		DPRINTF(WM_DEBUG_LINK,
   11219 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11220 			device_xname(sc->sc_dev),status, STATUS_LU));
   11221 		if (status & STATUS_LU) {
   11222 			/* Link is up. */
   11223 			DPRINTF(WM_DEBUG_LINK,
   11224 			    ("%s: LINK: set media -> link up %s\n",
   11225 				device_xname(sc->sc_dev),
   11226 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11227 
   11228 			/*
   11229 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11230 			 * so we should update sc->sc_ctrl
   11231 			 */
   11232 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11233 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11234 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11235 			if (status & STATUS_FD)
   11236 				sc->sc_tctl |=
   11237 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11238 			else
   11239 				sc->sc_tctl |=
   11240 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11241 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11242 				sc->sc_fcrtl |= FCRTL_XONE;
   11243 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11244 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11245 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11246 			sc->sc_tbi_linkup = 1;
   11247 		} else {
   11248 			if (i == WM_LINKUP_TIMEOUT)
   11249 				wm_check_for_link(sc);
   11250 			/* Link is down. */
   11251 			DPRINTF(WM_DEBUG_LINK,
   11252 			    ("%s: LINK: set media -> link down\n",
   11253 				device_xname(sc->sc_dev)));
   11254 			sc->sc_tbi_linkup = 0;
   11255 		}
   11256 	} else {
   11257 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11258 			device_xname(sc->sc_dev)));
   11259 		sc->sc_tbi_linkup = 0;
   11260 	}
   11261 
   11262 	wm_tbi_serdes_set_linkled(sc);
   11263 
   11264 	return 0;
   11265 }
   11266 
   11267 /*
   11268  * wm_tbi_mediastatus:	[ifmedia interface function]
   11269  *
   11270  *	Get the current interface media status on a 1000BASE-X device.
   11271  */
   11272 static void
   11273 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11274 {
   11275 	struct wm_softc *sc = ifp->if_softc;
   11276 	uint32_t ctrl, status;
   11277 
   11278 	ifmr->ifm_status = IFM_AVALID;
   11279 	ifmr->ifm_active = IFM_ETHER;
   11280 
   11281 	status = CSR_READ(sc, WMREG_STATUS);
   11282 	if ((status & STATUS_LU) == 0) {
   11283 		ifmr->ifm_active |= IFM_NONE;
   11284 		return;
   11285 	}
   11286 
   11287 	ifmr->ifm_status |= IFM_ACTIVE;
   11288 	/* Only 82545 is LX */
   11289 	if (sc->sc_type == WM_T_82545)
   11290 		ifmr->ifm_active |= IFM_1000_LX;
   11291 	else
   11292 		ifmr->ifm_active |= IFM_1000_SX;
   11293 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11294 		ifmr->ifm_active |= IFM_FDX;
   11295 	else
   11296 		ifmr->ifm_active |= IFM_HDX;
   11297 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11298 	if (ctrl & CTRL_RFCE)
   11299 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11300 	if (ctrl & CTRL_TFCE)
   11301 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11302 }
   11303 
   11304 /* XXX TBI only */
   11305 static int
   11306 wm_check_for_link(struct wm_softc *sc)
   11307 {
   11308 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11309 	uint32_t rxcw;
   11310 	uint32_t ctrl;
   11311 	uint32_t status;
   11312 	bool signal;
   11313 
   11314 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11315 		device_xname(sc->sc_dev), __func__));
   11316 
   11317 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11318 		/* XXX need some work for >= 82571 */
   11319 		if (sc->sc_type >= WM_T_82571) {
   11320 			sc->sc_tbi_linkup = 1;
   11321 			return 0;
   11322 		}
   11323 	}
   11324 
   11325 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11326 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11327 	status = CSR_READ(sc, WMREG_STATUS);
   11328 	signal = wm_tbi_havesignal(sc, ctrl);
   11329 
   11330 	DPRINTF(WM_DEBUG_LINK,
   11331 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11332 		device_xname(sc->sc_dev), __func__, signal,
   11333 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11334 
   11335 	/*
   11336 	 * SWDPIN   LU RXCW
   11337 	 *	0    0	  0
   11338 	 *	0    0	  1	(should not happen)
   11339 	 *	0    1	  0	(should not happen)
   11340 	 *	0    1	  1	(should not happen)
   11341 	 *	1    0	  0	Disable autonego and force linkup
   11342 	 *	1    0	  1	got /C/ but not linkup yet
   11343 	 *	1    1	  0	(linkup)
   11344 	 *	1    1	  1	If IFM_AUTO, back to autonego
   11345 	 *
   11346 	 */
   11347 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   11348 		DPRINTF(WM_DEBUG_LINK,
   11349 		    ("%s: %s: force linkup and fullduplex\n",
   11350 			device_xname(sc->sc_dev), __func__));
   11351 		sc->sc_tbi_linkup = 0;
   11352 		/* Disable auto-negotiation in the TXCW register */
   11353 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11354 
   11355 		/*
   11356 		 * Force link-up and also force full-duplex.
   11357 		 *
   11358 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11359 		 * so we should update sc->sc_ctrl
   11360 		 */
   11361 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11362 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11363 	} else if (((status & STATUS_LU) != 0)
   11364 	    && ((rxcw & RXCW_C) != 0)
   11365 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11366 		sc->sc_tbi_linkup = 1;
   11367 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   11368 			device_xname(sc->sc_dev),
   11369 			__func__));
   11370 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11371 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11372 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   11373 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   11374 			device_xname(sc->sc_dev), __func__));
   11375 	} else {
   11376 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   11377 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   11378 			status));
   11379 	}
   11380 
   11381 	return 0;
   11382 }
   11383 
   11384 /*
   11385  * wm_tbi_tick:
   11386  *
   11387  *	Check the link on TBI devices.
   11388  *	This function acts as mii_tick().
   11389  */
   11390 static void
   11391 wm_tbi_tick(struct wm_softc *sc)
   11392 {
   11393 	struct mii_data *mii = &sc->sc_mii;
   11394 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11395 	uint32_t status;
   11396 
   11397 	KASSERT(WM_CORE_LOCKED(sc));
   11398 
   11399 	status = CSR_READ(sc, WMREG_STATUS);
   11400 
   11401 	/* XXX is this needed? */
   11402 	(void)CSR_READ(sc, WMREG_RXCW);
   11403 	(void)CSR_READ(sc, WMREG_CTRL);
   11404 
   11405 	/* set link status */
   11406 	if ((status & STATUS_LU) == 0) {
   11407 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   11408 			device_xname(sc->sc_dev)));
   11409 		sc->sc_tbi_linkup = 0;
   11410 	} else if (sc->sc_tbi_linkup == 0) {
   11411 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   11412 			device_xname(sc->sc_dev),
   11413 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11414 		sc->sc_tbi_linkup = 1;
   11415 		sc->sc_tbi_serdes_ticks = 0;
   11416 	}
   11417 
   11418 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11419 		goto setled;
   11420 
   11421 	if ((status & STATUS_LU) == 0) {
   11422 		sc->sc_tbi_linkup = 0;
   11423 		/* If the timer expired, retry autonegotiation */
   11424 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11425 		    && (++sc->sc_tbi_serdes_ticks
   11426 			>= sc->sc_tbi_serdes_anegticks)) {
   11427 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11428 			sc->sc_tbi_serdes_ticks = 0;
   11429 			/*
   11430 			 * Reset the link, and let autonegotiation do
   11431 			 * its thing
   11432 			 */
   11433 			sc->sc_ctrl |= CTRL_LRST;
   11434 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11435 			CSR_WRITE_FLUSH(sc);
   11436 			delay(1000);
   11437 			sc->sc_ctrl &= ~CTRL_LRST;
   11438 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11439 			CSR_WRITE_FLUSH(sc);
   11440 			delay(1000);
   11441 			CSR_WRITE(sc, WMREG_TXCW,
   11442 			    sc->sc_txcw & ~TXCW_ANE);
   11443 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11444 		}
   11445 	}
   11446 
   11447 setled:
   11448 	wm_tbi_serdes_set_linkled(sc);
   11449 }
   11450 
   11451 /* SERDES related */
   11452 static void
   11453 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11454 {
   11455 	uint32_t reg;
   11456 
   11457 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11458 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11459 		return;
   11460 
   11461 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11462 	reg |= PCS_CFG_PCS_EN;
   11463 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11464 
   11465 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11466 	reg &= ~CTRL_EXT_SWDPIN(3);
   11467 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11468 	CSR_WRITE_FLUSH(sc);
   11469 }
   11470 
   11471 static int
   11472 wm_serdes_mediachange(struct ifnet *ifp)
   11473 {
   11474 	struct wm_softc *sc = ifp->if_softc;
   11475 	bool pcs_autoneg = true; /* XXX */
   11476 	uint32_t ctrl_ext, pcs_lctl, reg;
   11477 
   11478 	/* XXX Currently, this function is not called on 8257[12] */
   11479 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11480 	    || (sc->sc_type >= WM_T_82575))
   11481 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11482 
   11483 	wm_serdes_power_up_link_82575(sc);
   11484 
   11485 	sc->sc_ctrl |= CTRL_SLU;
   11486 
   11487 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11488 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11489 
   11490 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11491 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11492 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11493 	case CTRL_EXT_LINK_MODE_SGMII:
   11494 		pcs_autoneg = true;
   11495 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11496 		break;
   11497 	case CTRL_EXT_LINK_MODE_1000KX:
   11498 		pcs_autoneg = false;
   11499 		/* FALLTHROUGH */
   11500 	default:
   11501 		if ((sc->sc_type == WM_T_82575)
   11502 		    || (sc->sc_type == WM_T_82576)) {
   11503 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11504 				pcs_autoneg = false;
   11505 		}
   11506 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11507 		    | CTRL_FRCFDX;
   11508 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11509 	}
   11510 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11511 
   11512 	if (pcs_autoneg) {
   11513 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11514 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11515 
   11516 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11517 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11518 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11519 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11520 	} else
   11521 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11522 
   11523 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11524 
   11525 
   11526 	return 0;
   11527 }
   11528 
   11529 static void
   11530 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11531 {
   11532 	struct wm_softc *sc = ifp->if_softc;
   11533 	struct mii_data *mii = &sc->sc_mii;
   11534 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11535 	uint32_t pcs_adv, pcs_lpab, reg;
   11536 
   11537 	ifmr->ifm_status = IFM_AVALID;
   11538 	ifmr->ifm_active = IFM_ETHER;
   11539 
   11540 	/* Check PCS */
   11541 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11542 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11543 		ifmr->ifm_active |= IFM_NONE;
   11544 		sc->sc_tbi_linkup = 0;
   11545 		goto setled;
   11546 	}
   11547 
   11548 	sc->sc_tbi_linkup = 1;
   11549 	ifmr->ifm_status |= IFM_ACTIVE;
   11550 	if (sc->sc_type == WM_T_I354) {
   11551 		uint32_t status;
   11552 
   11553 		status = CSR_READ(sc, WMREG_STATUS);
   11554 		if (((status & STATUS_2P5_SKU) != 0)
   11555 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11556 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11557 		} else
   11558 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11559 	} else {
   11560 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11561 		case PCS_LSTS_SPEED_10:
   11562 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11563 			break;
   11564 		case PCS_LSTS_SPEED_100:
   11565 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11566 			break;
   11567 		case PCS_LSTS_SPEED_1000:
   11568 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11569 			break;
   11570 		default:
   11571 			device_printf(sc->sc_dev, "Unknown speed\n");
   11572 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11573 			break;
   11574 		}
   11575 	}
   11576 	if ((reg & PCS_LSTS_FDX) != 0)
   11577 		ifmr->ifm_active |= IFM_FDX;
   11578 	else
   11579 		ifmr->ifm_active |= IFM_HDX;
   11580 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11581 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11582 		/* Check flow */
   11583 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11584 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11585 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11586 			goto setled;
   11587 		}
   11588 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11589 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11590 		DPRINTF(WM_DEBUG_LINK,
   11591 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11592 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11593 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11594 			mii->mii_media_active |= IFM_FLOW
   11595 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11596 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11597 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11598 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11599 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11600 			mii->mii_media_active |= IFM_FLOW
   11601 			    | IFM_ETH_TXPAUSE;
   11602 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11603 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11604 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11605 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11606 			mii->mii_media_active |= IFM_FLOW
   11607 			    | IFM_ETH_RXPAUSE;
   11608 		}
   11609 	}
   11610 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11611 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11612 setled:
   11613 	wm_tbi_serdes_set_linkled(sc);
   11614 }
   11615 
   11616 /*
   11617  * wm_serdes_tick:
   11618  *
   11619  *	Check the link on serdes devices.
   11620  */
   11621 static void
   11622 wm_serdes_tick(struct wm_softc *sc)
   11623 {
   11624 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11625 	struct mii_data *mii = &sc->sc_mii;
   11626 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11627 	uint32_t reg;
   11628 
   11629 	KASSERT(WM_CORE_LOCKED(sc));
   11630 
   11631 	mii->mii_media_status = IFM_AVALID;
   11632 	mii->mii_media_active = IFM_ETHER;
   11633 
   11634 	/* Check PCS */
   11635 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11636 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11637 		mii->mii_media_status |= IFM_ACTIVE;
   11638 		sc->sc_tbi_linkup = 1;
   11639 		sc->sc_tbi_serdes_ticks = 0;
   11640 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11641 		if ((reg & PCS_LSTS_FDX) != 0)
   11642 			mii->mii_media_active |= IFM_FDX;
   11643 		else
   11644 			mii->mii_media_active |= IFM_HDX;
   11645 	} else {
   11646 		mii->mii_media_status |= IFM_NONE;
   11647 		sc->sc_tbi_linkup = 0;
   11648 		/* If the timer expired, retry autonegotiation */
   11649 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11650 		    && (++sc->sc_tbi_serdes_ticks
   11651 			>= sc->sc_tbi_serdes_anegticks)) {
   11652 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11653 			sc->sc_tbi_serdes_ticks = 0;
   11654 			/* XXX */
   11655 			wm_serdes_mediachange(ifp);
   11656 		}
   11657 	}
   11658 
   11659 	wm_tbi_serdes_set_linkled(sc);
   11660 }
   11661 
   11662 /* SFP related */
   11663 
   11664 static int
   11665 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11666 {
   11667 	uint32_t i2ccmd;
   11668 	int i;
   11669 
   11670 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11671 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11672 
   11673 	/* Poll the ready bit */
   11674 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11675 		delay(50);
   11676 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11677 		if (i2ccmd & I2CCMD_READY)
   11678 			break;
   11679 	}
   11680 	if ((i2ccmd & I2CCMD_READY) == 0)
   11681 		return -1;
   11682 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11683 		return -1;
   11684 
   11685 	*data = i2ccmd & 0x00ff;
   11686 
   11687 	return 0;
   11688 }
   11689 
   11690 static uint32_t
   11691 wm_sfp_get_media_type(struct wm_softc *sc)
   11692 {
   11693 	uint32_t ctrl_ext;
   11694 	uint8_t val = 0;
   11695 	int timeout = 3;
   11696 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11697 	int rv = -1;
   11698 
   11699 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11700 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11701 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11702 	CSR_WRITE_FLUSH(sc);
   11703 
   11704 	/* Read SFP module data */
   11705 	while (timeout) {
   11706 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11707 		if (rv == 0)
   11708 			break;
   11709 		delay(100*1000); /* XXX too big */
   11710 		timeout--;
   11711 	}
   11712 	if (rv != 0)
   11713 		goto out;
   11714 	switch (val) {
   11715 	case SFF_SFP_ID_SFF:
   11716 		aprint_normal_dev(sc->sc_dev,
   11717 		    "Module/Connector soldered to board\n");
   11718 		break;
   11719 	case SFF_SFP_ID_SFP:
   11720 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11721 		break;
   11722 	case SFF_SFP_ID_UNKNOWN:
   11723 		goto out;
   11724 	default:
   11725 		break;
   11726 	}
   11727 
   11728 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11729 	if (rv != 0) {
   11730 		goto out;
   11731 	}
   11732 
   11733 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11734 		mediatype = WM_MEDIATYPE_SERDES;
   11735 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   11736 		sc->sc_flags |= WM_F_SGMII;
   11737 		mediatype = WM_MEDIATYPE_COPPER;
   11738 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   11739 		sc->sc_flags |= WM_F_SGMII;
   11740 		mediatype = WM_MEDIATYPE_SERDES;
   11741 	}
   11742 
   11743 out:
   11744 	/* Restore I2C interface setting */
   11745 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11746 
   11747 	return mediatype;
   11748 }
   11749 
   11750 /*
   11751  * NVM related.
   11752  * Microwire, SPI (w/wo EERD) and Flash.
   11753  */
   11754 
   11755 /* Both spi and uwire */
   11756 
   11757 /*
   11758  * wm_eeprom_sendbits:
   11759  *
   11760  *	Send a series of bits to the EEPROM.
   11761  */
   11762 static void
   11763 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11764 {
   11765 	uint32_t reg;
   11766 	int x;
   11767 
   11768 	reg = CSR_READ(sc, WMREG_EECD);
   11769 
   11770 	for (x = nbits; x > 0; x--) {
   11771 		if (bits & (1U << (x - 1)))
   11772 			reg |= EECD_DI;
   11773 		else
   11774 			reg &= ~EECD_DI;
   11775 		CSR_WRITE(sc, WMREG_EECD, reg);
   11776 		CSR_WRITE_FLUSH(sc);
   11777 		delay(2);
   11778 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11779 		CSR_WRITE_FLUSH(sc);
   11780 		delay(2);
   11781 		CSR_WRITE(sc, WMREG_EECD, reg);
   11782 		CSR_WRITE_FLUSH(sc);
   11783 		delay(2);
   11784 	}
   11785 }
   11786 
   11787 /*
   11788  * wm_eeprom_recvbits:
   11789  *
   11790  *	Receive a series of bits from the EEPROM.
   11791  */
   11792 static void
   11793 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11794 {
   11795 	uint32_t reg, val;
   11796 	int x;
   11797 
   11798 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11799 
   11800 	val = 0;
   11801 	for (x = nbits; x > 0; x--) {
   11802 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11803 		CSR_WRITE_FLUSH(sc);
   11804 		delay(2);
   11805 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11806 			val |= (1U << (x - 1));
   11807 		CSR_WRITE(sc, WMREG_EECD, reg);
   11808 		CSR_WRITE_FLUSH(sc);
   11809 		delay(2);
   11810 	}
   11811 	*valp = val;
   11812 }
   11813 
   11814 /* Microwire */
   11815 
   11816 /*
   11817  * wm_nvm_read_uwire:
   11818  *
   11819  *	Read a word from the EEPROM using the MicroWire protocol.
   11820  */
   11821 static int
   11822 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11823 {
   11824 	uint32_t reg, val;
   11825 	int i;
   11826 
   11827 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11828 		device_xname(sc->sc_dev), __func__));
   11829 
   11830 	if (sc->nvm.acquire(sc) != 0)
   11831 		return -1;
   11832 
   11833 	for (i = 0; i < wordcnt; i++) {
   11834 		/* Clear SK and DI. */
   11835 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11836 		CSR_WRITE(sc, WMREG_EECD, reg);
   11837 
   11838 		/*
   11839 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11840 		 * and Xen.
   11841 		 *
   11842 		 * We use this workaround only for 82540 because qemu's
   11843 		 * e1000 act as 82540.
   11844 		 */
   11845 		if (sc->sc_type == WM_T_82540) {
   11846 			reg |= EECD_SK;
   11847 			CSR_WRITE(sc, WMREG_EECD, reg);
   11848 			reg &= ~EECD_SK;
   11849 			CSR_WRITE(sc, WMREG_EECD, reg);
   11850 			CSR_WRITE_FLUSH(sc);
   11851 			delay(2);
   11852 		}
   11853 		/* XXX: end of workaround */
   11854 
   11855 		/* Set CHIP SELECT. */
   11856 		reg |= EECD_CS;
   11857 		CSR_WRITE(sc, WMREG_EECD, reg);
   11858 		CSR_WRITE_FLUSH(sc);
   11859 		delay(2);
   11860 
   11861 		/* Shift in the READ command. */
   11862 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11863 
   11864 		/* Shift in address. */
   11865 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11866 
   11867 		/* Shift out the data. */
   11868 		wm_eeprom_recvbits(sc, &val, 16);
   11869 		data[i] = val & 0xffff;
   11870 
   11871 		/* Clear CHIP SELECT. */
   11872 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11873 		CSR_WRITE(sc, WMREG_EECD, reg);
   11874 		CSR_WRITE_FLUSH(sc);
   11875 		delay(2);
   11876 	}
   11877 
   11878 	sc->nvm.release(sc);
   11879 	return 0;
   11880 }
   11881 
   11882 /* SPI */
   11883 
   11884 /*
   11885  * Set SPI and FLASH related information from the EECD register.
   11886  * For 82541 and 82547, the word size is taken from EEPROM.
   11887  */
   11888 static int
   11889 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11890 {
   11891 	int size;
   11892 	uint32_t reg;
   11893 	uint16_t data;
   11894 
   11895 	reg = CSR_READ(sc, WMREG_EECD);
   11896 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11897 
   11898 	/* Read the size of NVM from EECD by default */
   11899 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11900 	switch (sc->sc_type) {
   11901 	case WM_T_82541:
   11902 	case WM_T_82541_2:
   11903 	case WM_T_82547:
   11904 	case WM_T_82547_2:
   11905 		/* Set dummy value to access EEPROM */
   11906 		sc->sc_nvm_wordsize = 64;
   11907 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   11908 			aprint_error_dev(sc->sc_dev,
   11909 			    "%s: failed to read EEPROM size\n", __func__);
   11910 		}
   11911 		reg = data;
   11912 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11913 		if (size == 0)
   11914 			size = 6; /* 64 word size */
   11915 		else
   11916 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11917 		break;
   11918 	case WM_T_80003:
   11919 	case WM_T_82571:
   11920 	case WM_T_82572:
   11921 	case WM_T_82573: /* SPI case */
   11922 	case WM_T_82574: /* SPI case */
   11923 	case WM_T_82583: /* SPI case */
   11924 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11925 		if (size > 14)
   11926 			size = 14;
   11927 		break;
   11928 	case WM_T_82575:
   11929 	case WM_T_82576:
   11930 	case WM_T_82580:
   11931 	case WM_T_I350:
   11932 	case WM_T_I354:
   11933 	case WM_T_I210:
   11934 	case WM_T_I211:
   11935 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11936 		if (size > 15)
   11937 			size = 15;
   11938 		break;
   11939 	default:
   11940 		aprint_error_dev(sc->sc_dev,
   11941 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11942 		return -1;
   11943 		break;
   11944 	}
   11945 
   11946 	sc->sc_nvm_wordsize = 1 << size;
   11947 
   11948 	return 0;
   11949 }
   11950 
   11951 /*
   11952  * wm_nvm_ready_spi:
   11953  *
   11954  *	Wait for a SPI EEPROM to be ready for commands.
   11955  */
   11956 static int
   11957 wm_nvm_ready_spi(struct wm_softc *sc)
   11958 {
   11959 	uint32_t val;
   11960 	int usec;
   11961 
   11962 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11963 		device_xname(sc->sc_dev), __func__));
   11964 
   11965 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11966 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11967 		wm_eeprom_recvbits(sc, &val, 8);
   11968 		if ((val & SPI_SR_RDY) == 0)
   11969 			break;
   11970 	}
   11971 	if (usec >= SPI_MAX_RETRIES) {
   11972 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11973 		return -1;
   11974 	}
   11975 	return 0;
   11976 }
   11977 
   11978 /*
   11979  * wm_nvm_read_spi:
   11980  *
   11981  *	Read a work from the EEPROM using the SPI protocol.
   11982  */
   11983 static int
   11984 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11985 {
   11986 	uint32_t reg, val;
   11987 	int i;
   11988 	uint8_t opc;
   11989 	int rv = 0;
   11990 
   11991 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11992 		device_xname(sc->sc_dev), __func__));
   11993 
   11994 	if (sc->nvm.acquire(sc) != 0)
   11995 		return -1;
   11996 
   11997 	/* Clear SK and CS. */
   11998 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11999 	CSR_WRITE(sc, WMREG_EECD, reg);
   12000 	CSR_WRITE_FLUSH(sc);
   12001 	delay(2);
   12002 
   12003 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12004 		goto out;
   12005 
   12006 	/* Toggle CS to flush commands. */
   12007 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12008 	CSR_WRITE_FLUSH(sc);
   12009 	delay(2);
   12010 	CSR_WRITE(sc, WMREG_EECD, reg);
   12011 	CSR_WRITE_FLUSH(sc);
   12012 	delay(2);
   12013 
   12014 	opc = SPI_OPC_READ;
   12015 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12016 		opc |= SPI_OPC_A8;
   12017 
   12018 	wm_eeprom_sendbits(sc, opc, 8);
   12019 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12020 
   12021 	for (i = 0; i < wordcnt; i++) {
   12022 		wm_eeprom_recvbits(sc, &val, 16);
   12023 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12024 	}
   12025 
   12026 	/* Raise CS and clear SK. */
   12027 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12028 	CSR_WRITE(sc, WMREG_EECD, reg);
   12029 	CSR_WRITE_FLUSH(sc);
   12030 	delay(2);
   12031 
   12032 out:
   12033 	sc->nvm.release(sc);
   12034 	return rv;
   12035 }
   12036 
   12037 /* Using with EERD */
   12038 
   12039 static int
   12040 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12041 {
   12042 	uint32_t attempts = 100000;
   12043 	uint32_t i, reg = 0;
   12044 	int32_t done = -1;
   12045 
   12046 	for (i = 0; i < attempts; i++) {
   12047 		reg = CSR_READ(sc, rw);
   12048 
   12049 		if (reg & EERD_DONE) {
   12050 			done = 0;
   12051 			break;
   12052 		}
   12053 		delay(5);
   12054 	}
   12055 
   12056 	return done;
   12057 }
   12058 
   12059 static int
   12060 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12061 {
   12062 	int i, eerd = 0;
   12063 	int rv = 0;
   12064 
   12065 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12066 		device_xname(sc->sc_dev), __func__));
   12067 
   12068 	if (sc->nvm.acquire(sc) != 0)
   12069 		return -1;
   12070 
   12071 	for (i = 0; i < wordcnt; i++) {
   12072 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12073 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12074 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12075 		if (rv != 0) {
   12076 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12077 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12078 			break;
   12079 		}
   12080 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12081 	}
   12082 
   12083 	sc->nvm.release(sc);
   12084 	return rv;
   12085 }
   12086 
   12087 /* Flash */
   12088 
   12089 static int
   12090 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12091 {
   12092 	uint32_t eecd;
   12093 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12094 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12095 	uint32_t nvm_dword = 0;
   12096 	uint8_t sig_byte = 0;
   12097 	int rv;
   12098 
   12099 	switch (sc->sc_type) {
   12100 	case WM_T_PCH_SPT:
   12101 	case WM_T_PCH_CNP:
   12102 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12103 		act_offset = ICH_NVM_SIG_WORD * 2;
   12104 
   12105 		/* set bank to 0 in case flash read fails. */
   12106 		*bank = 0;
   12107 
   12108 		/* Check bank 0 */
   12109 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12110 		if (rv != 0)
   12111 			return rv;
   12112 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12113 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12114 			*bank = 0;
   12115 			return 0;
   12116 		}
   12117 
   12118 		/* Check bank 1 */
   12119 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12120 		    &nvm_dword);
   12121 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12122 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12123 			*bank = 1;
   12124 			return 0;
   12125 		}
   12126 		aprint_error_dev(sc->sc_dev,
   12127 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12128 		return -1;
   12129 	case WM_T_ICH8:
   12130 	case WM_T_ICH9:
   12131 		eecd = CSR_READ(sc, WMREG_EECD);
   12132 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12133 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12134 			return 0;
   12135 		}
   12136 		/* FALLTHROUGH */
   12137 	default:
   12138 		/* Default to 0 */
   12139 		*bank = 0;
   12140 
   12141 		/* Check bank 0 */
   12142 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12143 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12144 			*bank = 0;
   12145 			return 0;
   12146 		}
   12147 
   12148 		/* Check bank 1 */
   12149 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12150 		    &sig_byte);
   12151 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12152 			*bank = 1;
   12153 			return 0;
   12154 		}
   12155 	}
   12156 
   12157 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12158 		device_xname(sc->sc_dev)));
   12159 	return -1;
   12160 }
   12161 
   12162 /******************************************************************************
   12163  * This function does initial flash setup so that a new read/write/erase cycle
   12164  * can be started.
   12165  *
   12166  * sc - The pointer to the hw structure
   12167  ****************************************************************************/
   12168 static int32_t
   12169 wm_ich8_cycle_init(struct wm_softc *sc)
   12170 {
   12171 	uint16_t hsfsts;
   12172 	int32_t error = 1;
   12173 	int32_t i     = 0;
   12174 
   12175 	if (sc->sc_type >= WM_T_PCH_SPT)
   12176 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12177 	else
   12178 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12179 
   12180 	/* May be check the Flash Des Valid bit in Hw status */
   12181 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   12182 		return error;
   12183 	}
   12184 
   12185 	/* Clear FCERR in Hw status by writing 1 */
   12186 	/* Clear DAEL in Hw status by writing a 1 */
   12187 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12188 
   12189 	if (sc->sc_type >= WM_T_PCH_SPT)
   12190 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12191 	else
   12192 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12193 
   12194 	/*
   12195 	 * Either we should have a hardware SPI cycle in progress bit to check
   12196 	 * against, in order to start a new cycle or FDONE bit should be
   12197 	 * changed in the hardware so that it is 1 after harware reset, which
   12198 	 * can then be used as an indication whether a cycle is in progress or
   12199 	 * has been completed .. we should also have some software semaphore
   12200 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12201 	 * threads access to those bits can be sequentiallized or a way so that
   12202 	 * 2 threads dont start the cycle at the same time
   12203 	 */
   12204 
   12205 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12206 		/*
   12207 		 * There is no cycle running at present, so we can start a
   12208 		 * cycle
   12209 		 */
   12210 
   12211 		/* Begin by setting Flash Cycle Done. */
   12212 		hsfsts |= HSFSTS_DONE;
   12213 		if (sc->sc_type >= WM_T_PCH_SPT)
   12214 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12215 			    hsfsts & 0xffffUL);
   12216 		else
   12217 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12218 		error = 0;
   12219 	} else {
   12220 		/*
   12221 		 * otherwise poll for sometime so the current cycle has a
   12222 		 * chance to end before giving up.
   12223 		 */
   12224 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12225 			if (sc->sc_type >= WM_T_PCH_SPT)
   12226 				hsfsts = ICH8_FLASH_READ32(sc,
   12227 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12228 			else
   12229 				hsfsts = ICH8_FLASH_READ16(sc,
   12230 				    ICH_FLASH_HSFSTS);
   12231 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12232 				error = 0;
   12233 				break;
   12234 			}
   12235 			delay(1);
   12236 		}
   12237 		if (error == 0) {
   12238 			/*
   12239 			 * Successful in waiting for previous cycle to timeout,
   12240 			 * now set the Flash Cycle Done.
   12241 			 */
   12242 			hsfsts |= HSFSTS_DONE;
   12243 			if (sc->sc_type >= WM_T_PCH_SPT)
   12244 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12245 				    hsfsts & 0xffffUL);
   12246 			else
   12247 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12248 				    hsfsts);
   12249 		}
   12250 	}
   12251 	return error;
   12252 }
   12253 
   12254 /******************************************************************************
   12255  * This function starts a flash cycle and waits for its completion
   12256  *
   12257  * sc - The pointer to the hw structure
   12258  ****************************************************************************/
   12259 static int32_t
   12260 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12261 {
   12262 	uint16_t hsflctl;
   12263 	uint16_t hsfsts;
   12264 	int32_t error = 1;
   12265 	uint32_t i = 0;
   12266 
   12267 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12268 	if (sc->sc_type >= WM_T_PCH_SPT)
   12269 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12270 	else
   12271 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12272 	hsflctl |= HSFCTL_GO;
   12273 	if (sc->sc_type >= WM_T_PCH_SPT)
   12274 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12275 		    (uint32_t)hsflctl << 16);
   12276 	else
   12277 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12278 
   12279 	/* Wait till FDONE bit is set to 1 */
   12280 	do {
   12281 		if (sc->sc_type >= WM_T_PCH_SPT)
   12282 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12283 			    & 0xffffUL;
   12284 		else
   12285 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12286 		if (hsfsts & HSFSTS_DONE)
   12287 			break;
   12288 		delay(1);
   12289 		i++;
   12290 	} while (i < timeout);
   12291 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12292 		error = 0;
   12293 
   12294 	return error;
   12295 }
   12296 
   12297 /******************************************************************************
   12298  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12299  *
   12300  * sc - The pointer to the hw structure
   12301  * index - The index of the byte or word to read.
   12302  * size - Size of data to read, 1=byte 2=word, 4=dword
   12303  * data - Pointer to the word to store the value read.
   12304  *****************************************************************************/
   12305 static int32_t
   12306 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12307     uint32_t size, uint32_t *data)
   12308 {
   12309 	uint16_t hsfsts;
   12310 	uint16_t hsflctl;
   12311 	uint32_t flash_linear_address;
   12312 	uint32_t flash_data = 0;
   12313 	int32_t error = 1;
   12314 	int32_t count = 0;
   12315 
   12316 	if (size < 1  || size > 4 || data == 0x0 ||
   12317 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12318 		return error;
   12319 
   12320 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12321 	    sc->sc_ich8_flash_base;
   12322 
   12323 	do {
   12324 		delay(1);
   12325 		/* Steps */
   12326 		error = wm_ich8_cycle_init(sc);
   12327 		if (error)
   12328 			break;
   12329 
   12330 		if (sc->sc_type >= WM_T_PCH_SPT)
   12331 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12332 			    >> 16;
   12333 		else
   12334 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12335 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12336 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12337 		    & HSFCTL_BCOUNT_MASK;
   12338 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12339 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12340 			/*
   12341 			 * In SPT, This register is in Lan memory space, not
   12342 			 * flash. Therefore, only 32 bit access is supported.
   12343 			 */
   12344 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12345 			    (uint32_t)hsflctl << 16);
   12346 		} else
   12347 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12348 
   12349 		/*
   12350 		 * Write the last 24 bits of index into Flash Linear address
   12351 		 * field in Flash Address
   12352 		 */
   12353 		/* TODO: TBD maybe check the index against the size of flash */
   12354 
   12355 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12356 
   12357 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12358 
   12359 		/*
   12360 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12361 		 * the whole sequence a few more times, else read in (shift in)
   12362 		 * the Flash Data0, the order is least significant byte first
   12363 		 * msb to lsb
   12364 		 */
   12365 		if (error == 0) {
   12366 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12367 			if (size == 1)
   12368 				*data = (uint8_t)(flash_data & 0x000000FF);
   12369 			else if (size == 2)
   12370 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12371 			else if (size == 4)
   12372 				*data = (uint32_t)flash_data;
   12373 			break;
   12374 		} else {
   12375 			/*
   12376 			 * If we've gotten here, then things are probably
   12377 			 * completely hosed, but if the error condition is
   12378 			 * detected, it won't hurt to give it another try...
   12379 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12380 			 */
   12381 			if (sc->sc_type >= WM_T_PCH_SPT)
   12382 				hsfsts = ICH8_FLASH_READ32(sc,
   12383 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12384 			else
   12385 				hsfsts = ICH8_FLASH_READ16(sc,
   12386 				    ICH_FLASH_HSFSTS);
   12387 
   12388 			if (hsfsts & HSFSTS_ERR) {
   12389 				/* Repeat for some time before giving up. */
   12390 				continue;
   12391 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12392 				break;
   12393 		}
   12394 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12395 
   12396 	return error;
   12397 }
   12398 
   12399 /******************************************************************************
   12400  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12401  *
   12402  * sc - pointer to wm_hw structure
   12403  * index - The index of the byte to read.
   12404  * data - Pointer to a byte to store the value read.
   12405  *****************************************************************************/
   12406 static int32_t
   12407 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12408 {
   12409 	int32_t status;
   12410 	uint32_t word = 0;
   12411 
   12412 	status = wm_read_ich8_data(sc, index, 1, &word);
   12413 	if (status == 0)
   12414 		*data = (uint8_t)word;
   12415 	else
   12416 		*data = 0;
   12417 
   12418 	return status;
   12419 }
   12420 
   12421 /******************************************************************************
   12422  * Reads a word from the NVM using the ICH8 flash access registers.
   12423  *
   12424  * sc - pointer to wm_hw structure
   12425  * index - The starting byte index of the word to read.
   12426  * data - Pointer to a word to store the value read.
   12427  *****************************************************************************/
   12428 static int32_t
   12429 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12430 {
   12431 	int32_t status;
   12432 	uint32_t word = 0;
   12433 
   12434 	status = wm_read_ich8_data(sc, index, 2, &word);
   12435 	if (status == 0)
   12436 		*data = (uint16_t)word;
   12437 	else
   12438 		*data = 0;
   12439 
   12440 	return status;
   12441 }
   12442 
   12443 /******************************************************************************
   12444  * Reads a dword from the NVM using the ICH8 flash access registers.
   12445  *
   12446  * sc - pointer to wm_hw structure
   12447  * index - The starting byte index of the word to read.
   12448  * data - Pointer to a word to store the value read.
   12449  *****************************************************************************/
   12450 static int32_t
   12451 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12452 {
   12453 	int32_t status;
   12454 
   12455 	status = wm_read_ich8_data(sc, index, 4, data);
   12456 	return status;
   12457 }
   12458 
   12459 /******************************************************************************
   12460  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12461  * register.
   12462  *
   12463  * sc - Struct containing variables accessed by shared code
   12464  * offset - offset of word in the EEPROM to read
   12465  * data - word read from the EEPROM
   12466  * words - number of words to read
   12467  *****************************************************************************/
   12468 static int
   12469 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12470 {
   12471 	int32_t	 rv = 0;
   12472 	uint32_t flash_bank = 0;
   12473 	uint32_t act_offset = 0;
   12474 	uint32_t bank_offset = 0;
   12475 	uint16_t word = 0;
   12476 	uint16_t i = 0;
   12477 
   12478 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12479 		device_xname(sc->sc_dev), __func__));
   12480 
   12481 	if (sc->nvm.acquire(sc) != 0)
   12482 		return -1;
   12483 
   12484 	/*
   12485 	 * We need to know which is the valid flash bank.  In the event
   12486 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12487 	 * managing flash_bank. So it cannot be trusted and needs
   12488 	 * to be updated with each read.
   12489 	 */
   12490 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12491 	if (rv) {
   12492 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12493 			device_xname(sc->sc_dev)));
   12494 		flash_bank = 0;
   12495 	}
   12496 
   12497 	/*
   12498 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12499 	 * size
   12500 	 */
   12501 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12502 
   12503 	for (i = 0; i < words; i++) {
   12504 		/* The NVM part needs a byte offset, hence * 2 */
   12505 		act_offset = bank_offset + ((offset + i) * 2);
   12506 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12507 		if (rv) {
   12508 			aprint_error_dev(sc->sc_dev,
   12509 			    "%s: failed to read NVM\n", __func__);
   12510 			break;
   12511 		}
   12512 		data[i] = word;
   12513 	}
   12514 
   12515 	sc->nvm.release(sc);
   12516 	return rv;
   12517 }
   12518 
   12519 /******************************************************************************
   12520  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12521  * register.
   12522  *
   12523  * sc - Struct containing variables accessed by shared code
   12524  * offset - offset of word in the EEPROM to read
   12525  * data - word read from the EEPROM
   12526  * words - number of words to read
   12527  *****************************************************************************/
   12528 static int
   12529 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12530 {
   12531 	int32_t	 rv = 0;
   12532 	uint32_t flash_bank = 0;
   12533 	uint32_t act_offset = 0;
   12534 	uint32_t bank_offset = 0;
   12535 	uint32_t dword = 0;
   12536 	uint16_t i = 0;
   12537 
   12538 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12539 		device_xname(sc->sc_dev), __func__));
   12540 
   12541 	if (sc->nvm.acquire(sc) != 0)
   12542 		return -1;
   12543 
   12544 	/*
   12545 	 * We need to know which is the valid flash bank.  In the event
   12546 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12547 	 * managing flash_bank. So it cannot be trusted and needs
   12548 	 * to be updated with each read.
   12549 	 */
   12550 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12551 	if (rv) {
   12552 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12553 			device_xname(sc->sc_dev)));
   12554 		flash_bank = 0;
   12555 	}
   12556 
   12557 	/*
   12558 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12559 	 * size
   12560 	 */
   12561 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12562 
   12563 	for (i = 0; i < words; i++) {
   12564 		/* The NVM part needs a byte offset, hence * 2 */
   12565 		act_offset = bank_offset + ((offset + i) * 2);
   12566 		/* but we must read dword aligned, so mask ... */
   12567 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12568 		if (rv) {
   12569 			aprint_error_dev(sc->sc_dev,
   12570 			    "%s: failed to read NVM\n", __func__);
   12571 			break;
   12572 		}
   12573 		/* ... and pick out low or high word */
   12574 		if ((act_offset & 0x2) == 0)
   12575 			data[i] = (uint16_t)(dword & 0xFFFF);
   12576 		else
   12577 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12578 	}
   12579 
   12580 	sc->nvm.release(sc);
   12581 	return rv;
   12582 }
   12583 
   12584 /* iNVM */
   12585 
   12586 static int
   12587 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12588 {
   12589 	int32_t	 rv = 0;
   12590 	uint32_t invm_dword;
   12591 	uint16_t i;
   12592 	uint8_t record_type, word_address;
   12593 
   12594 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12595 		device_xname(sc->sc_dev), __func__));
   12596 
   12597 	for (i = 0; i < INVM_SIZE; i++) {
   12598 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12599 		/* Get record type */
   12600 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12601 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12602 			break;
   12603 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12604 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12605 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12606 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12607 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12608 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12609 			if (word_address == address) {
   12610 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12611 				rv = 0;
   12612 				break;
   12613 			}
   12614 		}
   12615 	}
   12616 
   12617 	return rv;
   12618 }
   12619 
   12620 static int
   12621 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12622 {
   12623 	int rv = 0;
   12624 	int i;
   12625 
   12626 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12627 		device_xname(sc->sc_dev), __func__));
   12628 
   12629 	if (sc->nvm.acquire(sc) != 0)
   12630 		return -1;
   12631 
   12632 	for (i = 0; i < words; i++) {
   12633 		switch (offset + i) {
   12634 		case NVM_OFF_MACADDR:
   12635 		case NVM_OFF_MACADDR1:
   12636 		case NVM_OFF_MACADDR2:
   12637 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12638 			if (rv != 0) {
   12639 				data[i] = 0xffff;
   12640 				rv = -1;
   12641 			}
   12642 			break;
   12643 		case NVM_OFF_CFG2:
   12644 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12645 			if (rv != 0) {
   12646 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12647 				rv = 0;
   12648 			}
   12649 			break;
   12650 		case NVM_OFF_CFG4:
   12651 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12652 			if (rv != 0) {
   12653 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12654 				rv = 0;
   12655 			}
   12656 			break;
   12657 		case NVM_OFF_LED_1_CFG:
   12658 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12659 			if (rv != 0) {
   12660 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12661 				rv = 0;
   12662 			}
   12663 			break;
   12664 		case NVM_OFF_LED_0_2_CFG:
   12665 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12666 			if (rv != 0) {
   12667 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12668 				rv = 0;
   12669 			}
   12670 			break;
   12671 		case NVM_OFF_ID_LED_SETTINGS:
   12672 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12673 			if (rv != 0) {
   12674 				*data = ID_LED_RESERVED_FFFF;
   12675 				rv = 0;
   12676 			}
   12677 			break;
   12678 		default:
   12679 			DPRINTF(WM_DEBUG_NVM,
   12680 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12681 			*data = NVM_RESERVED_WORD;
   12682 			break;
   12683 		}
   12684 	}
   12685 
   12686 	sc->nvm.release(sc);
   12687 	return rv;
   12688 }
   12689 
   12690 /* Lock, detecting NVM type, validate checksum, version and read */
   12691 
   12692 static int
   12693 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12694 {
   12695 	uint32_t eecd = 0;
   12696 
   12697 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12698 	    || sc->sc_type == WM_T_82583) {
   12699 		eecd = CSR_READ(sc, WMREG_EECD);
   12700 
   12701 		/* Isolate bits 15 & 16 */
   12702 		eecd = ((eecd >> 15) & 0x03);
   12703 
   12704 		/* If both bits are set, device is Flash type */
   12705 		if (eecd == 0x03)
   12706 			return 0;
   12707 	}
   12708 	return 1;
   12709 }
   12710 
   12711 static int
   12712 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   12713 {
   12714 	uint32_t eec;
   12715 
   12716 	eec = CSR_READ(sc, WMREG_EEC);
   12717 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12718 		return 1;
   12719 
   12720 	return 0;
   12721 }
   12722 
   12723 /*
   12724  * wm_nvm_validate_checksum
   12725  *
   12726  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12727  */
   12728 static int
   12729 wm_nvm_validate_checksum(struct wm_softc *sc)
   12730 {
   12731 	uint16_t checksum;
   12732 	uint16_t eeprom_data;
   12733 #ifdef WM_DEBUG
   12734 	uint16_t csum_wordaddr, valid_checksum;
   12735 #endif
   12736 	int i;
   12737 
   12738 	checksum = 0;
   12739 
   12740 	/* Don't check for I211 */
   12741 	if (sc->sc_type == WM_T_I211)
   12742 		return 0;
   12743 
   12744 #ifdef WM_DEBUG
   12745 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   12746 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   12747 		csum_wordaddr = NVM_OFF_COMPAT;
   12748 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12749 	} else {
   12750 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12751 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12752 	}
   12753 
   12754 	/* Dump EEPROM image for debug */
   12755 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12756 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12757 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12758 		/* XXX PCH_SPT? */
   12759 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12760 		if ((eeprom_data & valid_checksum) == 0) {
   12761 			DPRINTF(WM_DEBUG_NVM,
   12762 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12763 				device_xname(sc->sc_dev), eeprom_data,
   12764 				    valid_checksum));
   12765 		}
   12766 	}
   12767 
   12768 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12769 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12770 		for (i = 0; i < NVM_SIZE; i++) {
   12771 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12772 				printf("XXXX ");
   12773 			else
   12774 				printf("%04hx ", eeprom_data);
   12775 			if (i % 8 == 7)
   12776 				printf("\n");
   12777 		}
   12778 	}
   12779 
   12780 #endif /* WM_DEBUG */
   12781 
   12782 	for (i = 0; i < NVM_SIZE; i++) {
   12783 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12784 			return 1;
   12785 		checksum += eeprom_data;
   12786 	}
   12787 
   12788 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12789 #ifdef WM_DEBUG
   12790 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12791 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12792 #endif
   12793 	}
   12794 
   12795 	return 0;
   12796 }
   12797 
   12798 static void
   12799 wm_nvm_version_invm(struct wm_softc *sc)
   12800 {
   12801 	uint32_t dword;
   12802 
   12803 	/*
   12804 	 * Linux's code to decode version is very strange, so we don't
   12805 	 * obey that algorithm and just use word 61 as the document.
   12806 	 * Perhaps it's not perfect though...
   12807 	 *
   12808 	 * Example:
   12809 	 *
   12810 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12811 	 */
   12812 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12813 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12814 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12815 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12816 }
   12817 
   12818 static void
   12819 wm_nvm_version(struct wm_softc *sc)
   12820 {
   12821 	uint16_t major, minor, build, patch;
   12822 	uint16_t uid0, uid1;
   12823 	uint16_t nvm_data;
   12824 	uint16_t off;
   12825 	bool check_version = false;
   12826 	bool check_optionrom = false;
   12827 	bool have_build = false;
   12828 	bool have_uid = true;
   12829 
   12830 	/*
   12831 	 * Version format:
   12832 	 *
   12833 	 * XYYZ
   12834 	 * X0YZ
   12835 	 * X0YY
   12836 	 *
   12837 	 * Example:
   12838 	 *
   12839 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12840 	 *	82571	0x50a6	5.10.6?
   12841 	 *	82572	0x506a	5.6.10?
   12842 	 *	82572EI	0x5069	5.6.9?
   12843 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12844 	 *		0x2013	2.1.3?
   12845 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12846 	 */
   12847 
   12848 	/*
   12849 	 * XXX
   12850 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   12851 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   12852 	 */
   12853 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   12854 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   12855 		have_uid = false;
   12856 
   12857 	switch (sc->sc_type) {
   12858 	case WM_T_82571:
   12859 	case WM_T_82572:
   12860 	case WM_T_82574:
   12861 	case WM_T_82583:
   12862 		check_version = true;
   12863 		check_optionrom = true;
   12864 		have_build = true;
   12865 		break;
   12866 	case WM_T_82575:
   12867 	case WM_T_82576:
   12868 	case WM_T_82580:
   12869 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12870 			check_version = true;
   12871 		break;
   12872 	case WM_T_I211:
   12873 		wm_nvm_version_invm(sc);
   12874 		have_uid = false;
   12875 		goto printver;
   12876 	case WM_T_I210:
   12877 		if (!wm_nvm_flash_presence_i210(sc)) {
   12878 			wm_nvm_version_invm(sc);
   12879 			have_uid = false;
   12880 			goto printver;
   12881 		}
   12882 		/* FALLTHROUGH */
   12883 	case WM_T_I350:
   12884 	case WM_T_I354:
   12885 		check_version = true;
   12886 		check_optionrom = true;
   12887 		break;
   12888 	default:
   12889 		return;
   12890 	}
   12891 	if (check_version
   12892 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   12893 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12894 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12895 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12896 			build = nvm_data & NVM_BUILD_MASK;
   12897 			have_build = true;
   12898 		} else
   12899 			minor = nvm_data & 0x00ff;
   12900 
   12901 		/* Decimal */
   12902 		minor = (minor / 16) * 10 + (minor % 16);
   12903 		sc->sc_nvm_ver_major = major;
   12904 		sc->sc_nvm_ver_minor = minor;
   12905 
   12906 printver:
   12907 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12908 		    sc->sc_nvm_ver_minor);
   12909 		if (have_build) {
   12910 			sc->sc_nvm_ver_build = build;
   12911 			aprint_verbose(".%d", build);
   12912 		}
   12913 	}
   12914 
   12915 	/* Assume the Option ROM area is at avove NVM_SIZE */
   12916 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   12917 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   12918 		/* Option ROM Version */
   12919 		if ((off != 0x0000) && (off != 0xffff)) {
   12920 			int rv;
   12921 
   12922 			off += NVM_COMBO_VER_OFF;
   12923 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   12924 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   12925 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   12926 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12927 				/* 16bits */
   12928 				major = uid0 >> 8;
   12929 				build = (uid0 << 8) | (uid1 >> 8);
   12930 				patch = uid1 & 0x00ff;
   12931 				aprint_verbose(", option ROM Version %d.%d.%d",
   12932 				    major, build, patch);
   12933 			}
   12934 		}
   12935 	}
   12936 
   12937 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   12938 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12939 }
   12940 
   12941 /*
   12942  * wm_nvm_read:
   12943  *
   12944  *	Read data from the serial EEPROM.
   12945  */
   12946 static int
   12947 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12948 {
   12949 	int rv;
   12950 
   12951 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12952 		device_xname(sc->sc_dev), __func__));
   12953 
   12954 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12955 		return -1;
   12956 
   12957 	rv = sc->nvm.read(sc, word, wordcnt, data);
   12958 
   12959 	return rv;
   12960 }
   12961 
   12962 /*
   12963  * Hardware semaphores.
   12964  * Very complexed...
   12965  */
   12966 
   12967 static int
   12968 wm_get_null(struct wm_softc *sc)
   12969 {
   12970 
   12971 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12972 		device_xname(sc->sc_dev), __func__));
   12973 	return 0;
   12974 }
   12975 
   12976 static void
   12977 wm_put_null(struct wm_softc *sc)
   12978 {
   12979 
   12980 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12981 		device_xname(sc->sc_dev), __func__));
   12982 	return;
   12983 }
   12984 
   12985 static int
   12986 wm_get_eecd(struct wm_softc *sc)
   12987 {
   12988 	uint32_t reg;
   12989 	int x;
   12990 
   12991 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12992 		device_xname(sc->sc_dev), __func__));
   12993 
   12994 	reg = CSR_READ(sc, WMREG_EECD);
   12995 
   12996 	/* Request EEPROM access. */
   12997 	reg |= EECD_EE_REQ;
   12998 	CSR_WRITE(sc, WMREG_EECD, reg);
   12999 
   13000 	/* ..and wait for it to be granted. */
   13001 	for (x = 0; x < 1000; x++) {
   13002 		reg = CSR_READ(sc, WMREG_EECD);
   13003 		if (reg & EECD_EE_GNT)
   13004 			break;
   13005 		delay(5);
   13006 	}
   13007 	if ((reg & EECD_EE_GNT) == 0) {
   13008 		aprint_error_dev(sc->sc_dev,
   13009 		    "could not acquire EEPROM GNT\n");
   13010 		reg &= ~EECD_EE_REQ;
   13011 		CSR_WRITE(sc, WMREG_EECD, reg);
   13012 		return -1;
   13013 	}
   13014 
   13015 	return 0;
   13016 }
   13017 
   13018 static void
   13019 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13020 {
   13021 
   13022 	*eecd |= EECD_SK;
   13023 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13024 	CSR_WRITE_FLUSH(sc);
   13025 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13026 		delay(1);
   13027 	else
   13028 		delay(50);
   13029 }
   13030 
   13031 static void
   13032 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13033 {
   13034 
   13035 	*eecd &= ~EECD_SK;
   13036 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13037 	CSR_WRITE_FLUSH(sc);
   13038 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13039 		delay(1);
   13040 	else
   13041 		delay(50);
   13042 }
   13043 
   13044 static void
   13045 wm_put_eecd(struct wm_softc *sc)
   13046 {
   13047 	uint32_t reg;
   13048 
   13049 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13050 		device_xname(sc->sc_dev), __func__));
   13051 
   13052 	/* Stop nvm */
   13053 	reg = CSR_READ(sc, WMREG_EECD);
   13054 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13055 		/* Pull CS high */
   13056 		reg |= EECD_CS;
   13057 		wm_nvm_eec_clock_lower(sc, &reg);
   13058 	} else {
   13059 		/* CS on Microwire is active-high */
   13060 		reg &= ~(EECD_CS | EECD_DI);
   13061 		CSR_WRITE(sc, WMREG_EECD, reg);
   13062 		wm_nvm_eec_clock_raise(sc, &reg);
   13063 		wm_nvm_eec_clock_lower(sc, &reg);
   13064 	}
   13065 
   13066 	reg = CSR_READ(sc, WMREG_EECD);
   13067 	reg &= ~EECD_EE_REQ;
   13068 	CSR_WRITE(sc, WMREG_EECD, reg);
   13069 
   13070 	return;
   13071 }
   13072 
   13073 /*
   13074  * Get hardware semaphore.
   13075  * Same as e1000_get_hw_semaphore_generic()
   13076  */
   13077 static int
   13078 wm_get_swsm_semaphore(struct wm_softc *sc)
   13079 {
   13080 	int32_t timeout;
   13081 	uint32_t swsm;
   13082 
   13083 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13084 		device_xname(sc->sc_dev), __func__));
   13085 	KASSERT(sc->sc_nvm_wordsize > 0);
   13086 
   13087 retry:
   13088 	/* Get the SW semaphore. */
   13089 	timeout = sc->sc_nvm_wordsize + 1;
   13090 	while (timeout) {
   13091 		swsm = CSR_READ(sc, WMREG_SWSM);
   13092 
   13093 		if ((swsm & SWSM_SMBI) == 0)
   13094 			break;
   13095 
   13096 		delay(50);
   13097 		timeout--;
   13098 	}
   13099 
   13100 	if (timeout == 0) {
   13101 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13102 			/*
   13103 			 * In rare circumstances, the SW semaphore may already
   13104 			 * be held unintentionally. Clear the semaphore once
   13105 			 * before giving up.
   13106 			 */
   13107 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13108 			wm_put_swsm_semaphore(sc);
   13109 			goto retry;
   13110 		}
   13111 		aprint_error_dev(sc->sc_dev,
   13112 		    "could not acquire SWSM SMBI\n");
   13113 		return 1;
   13114 	}
   13115 
   13116 	/* Get the FW semaphore. */
   13117 	timeout = sc->sc_nvm_wordsize + 1;
   13118 	while (timeout) {
   13119 		swsm = CSR_READ(sc, WMREG_SWSM);
   13120 		swsm |= SWSM_SWESMBI;
   13121 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13122 		/* If we managed to set the bit we got the semaphore. */
   13123 		swsm = CSR_READ(sc, WMREG_SWSM);
   13124 		if (swsm & SWSM_SWESMBI)
   13125 			break;
   13126 
   13127 		delay(50);
   13128 		timeout--;
   13129 	}
   13130 
   13131 	if (timeout == 0) {
   13132 		aprint_error_dev(sc->sc_dev,
   13133 		    "could not acquire SWSM SWESMBI\n");
   13134 		/* Release semaphores */
   13135 		wm_put_swsm_semaphore(sc);
   13136 		return 1;
   13137 	}
   13138 	return 0;
   13139 }
   13140 
   13141 /*
   13142  * Put hardware semaphore.
   13143  * Same as e1000_put_hw_semaphore_generic()
   13144  */
   13145 static void
   13146 wm_put_swsm_semaphore(struct wm_softc *sc)
   13147 {
   13148 	uint32_t swsm;
   13149 
   13150 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13151 		device_xname(sc->sc_dev), __func__));
   13152 
   13153 	swsm = CSR_READ(sc, WMREG_SWSM);
   13154 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13155 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13156 }
   13157 
   13158 /*
   13159  * Get SW/FW semaphore.
   13160  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13161  */
   13162 static int
   13163 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13164 {
   13165 	uint32_t swfw_sync;
   13166 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13167 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13168 	int timeout;
   13169 
   13170 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13171 		device_xname(sc->sc_dev), __func__));
   13172 
   13173 	if (sc->sc_type == WM_T_80003)
   13174 		timeout = 50;
   13175 	else
   13176 		timeout = 200;
   13177 
   13178 	while (timeout) {
   13179 		if (wm_get_swsm_semaphore(sc)) {
   13180 			aprint_error_dev(sc->sc_dev,
   13181 			    "%s: failed to get semaphore\n",
   13182 			    __func__);
   13183 			return 1;
   13184 		}
   13185 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13186 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13187 			swfw_sync |= swmask;
   13188 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13189 			wm_put_swsm_semaphore(sc);
   13190 			return 0;
   13191 		}
   13192 		wm_put_swsm_semaphore(sc);
   13193 		delay(5000);
   13194 		timeout--;
   13195 	}
   13196 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13197 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13198 	return 1;
   13199 }
   13200 
   13201 static void
   13202 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13203 {
   13204 	uint32_t swfw_sync;
   13205 
   13206 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13207 		device_xname(sc->sc_dev), __func__));
   13208 
   13209 	while (wm_get_swsm_semaphore(sc) != 0)
   13210 		continue;
   13211 
   13212 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13213 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13214 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13215 
   13216 	wm_put_swsm_semaphore(sc);
   13217 }
   13218 
   13219 static int
   13220 wm_get_nvm_80003(struct wm_softc *sc)
   13221 {
   13222 	int rv;
   13223 
   13224 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13225 		device_xname(sc->sc_dev), __func__));
   13226 
   13227 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13228 		aprint_error_dev(sc->sc_dev,
   13229 		    "%s: failed to get semaphore(SWFW)\n",
   13230 		    __func__);
   13231 		return rv;
   13232 	}
   13233 
   13234 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13235 	    && (rv = wm_get_eecd(sc)) != 0) {
   13236 		aprint_error_dev(sc->sc_dev,
   13237 		    "%s: failed to get semaphore(EECD)\n",
   13238 		    __func__);
   13239 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13240 		return rv;
   13241 	}
   13242 
   13243 	return 0;
   13244 }
   13245 
   13246 static void
   13247 wm_put_nvm_80003(struct wm_softc *sc)
   13248 {
   13249 
   13250 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13251 		device_xname(sc->sc_dev), __func__));
   13252 
   13253 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13254 		wm_put_eecd(sc);
   13255 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13256 }
   13257 
   13258 static int
   13259 wm_get_nvm_82571(struct wm_softc *sc)
   13260 {
   13261 	int rv;
   13262 
   13263 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13264 		device_xname(sc->sc_dev), __func__));
   13265 
   13266 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13267 		return rv;
   13268 
   13269 	switch (sc->sc_type) {
   13270 	case WM_T_82573:
   13271 		break;
   13272 	default:
   13273 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13274 			rv = wm_get_eecd(sc);
   13275 		break;
   13276 	}
   13277 
   13278 	if (rv != 0) {
   13279 		aprint_error_dev(sc->sc_dev,
   13280 		    "%s: failed to get semaphore\n",
   13281 		    __func__);
   13282 		wm_put_swsm_semaphore(sc);
   13283 	}
   13284 
   13285 	return rv;
   13286 }
   13287 
   13288 static void
   13289 wm_put_nvm_82571(struct wm_softc *sc)
   13290 {
   13291 
   13292 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13293 		device_xname(sc->sc_dev), __func__));
   13294 
   13295 	switch (sc->sc_type) {
   13296 	case WM_T_82573:
   13297 		break;
   13298 	default:
   13299 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13300 			wm_put_eecd(sc);
   13301 		break;
   13302 	}
   13303 
   13304 	wm_put_swsm_semaphore(sc);
   13305 }
   13306 
   13307 static int
   13308 wm_get_phy_82575(struct wm_softc *sc)
   13309 {
   13310 
   13311 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13312 		device_xname(sc->sc_dev), __func__));
   13313 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13314 }
   13315 
   13316 static void
   13317 wm_put_phy_82575(struct wm_softc *sc)
   13318 {
   13319 
   13320 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13321 		device_xname(sc->sc_dev), __func__));
   13322 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13323 }
   13324 
   13325 static int
   13326 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13327 {
   13328 	uint32_t ext_ctrl;
   13329 	int timeout = 200;
   13330 
   13331 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13332 		device_xname(sc->sc_dev), __func__));
   13333 
   13334 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13335 	for (timeout = 0; timeout < 200; timeout++) {
   13336 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13337 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13338 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13339 
   13340 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13341 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13342 			return 0;
   13343 		delay(5000);
   13344 	}
   13345 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13346 	    device_xname(sc->sc_dev), ext_ctrl);
   13347 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13348 	return 1;
   13349 }
   13350 
   13351 static void
   13352 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13353 {
   13354 	uint32_t ext_ctrl;
   13355 
   13356 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13357 		device_xname(sc->sc_dev), __func__));
   13358 
   13359 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13360 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13361 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13362 
   13363 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13364 }
   13365 
   13366 static int
   13367 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13368 {
   13369 	uint32_t ext_ctrl;
   13370 	int timeout;
   13371 
   13372 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13373 		device_xname(sc->sc_dev), __func__));
   13374 	mutex_enter(sc->sc_ich_phymtx);
   13375 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13376 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13377 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13378 			break;
   13379 		delay(1000);
   13380 	}
   13381 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13382 		printf("%s: SW has already locked the resource\n",
   13383 		    device_xname(sc->sc_dev));
   13384 		goto out;
   13385 	}
   13386 
   13387 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13388 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13389 	for (timeout = 0; timeout < 1000; timeout++) {
   13390 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13391 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13392 			break;
   13393 		delay(1000);
   13394 	}
   13395 	if (timeout >= 1000) {
   13396 		printf("%s: failed to acquire semaphore\n",
   13397 		    device_xname(sc->sc_dev));
   13398 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13399 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13400 		goto out;
   13401 	}
   13402 	return 0;
   13403 
   13404 out:
   13405 	mutex_exit(sc->sc_ich_phymtx);
   13406 	return 1;
   13407 }
   13408 
   13409 static void
   13410 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13411 {
   13412 	uint32_t ext_ctrl;
   13413 
   13414 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13415 		device_xname(sc->sc_dev), __func__));
   13416 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13417 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13418 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13419 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13420 	} else {
   13421 		printf("%s: Semaphore unexpectedly released\n",
   13422 		    device_xname(sc->sc_dev));
   13423 	}
   13424 
   13425 	mutex_exit(sc->sc_ich_phymtx);
   13426 }
   13427 
   13428 static int
   13429 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13430 {
   13431 
   13432 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13433 		device_xname(sc->sc_dev), __func__));
   13434 	mutex_enter(sc->sc_ich_nvmmtx);
   13435 
   13436 	return 0;
   13437 }
   13438 
   13439 static void
   13440 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13441 {
   13442 
   13443 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13444 		device_xname(sc->sc_dev), __func__));
   13445 	mutex_exit(sc->sc_ich_nvmmtx);
   13446 }
   13447 
   13448 static int
   13449 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13450 {
   13451 	int i = 0;
   13452 	uint32_t reg;
   13453 
   13454 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13455 		device_xname(sc->sc_dev), __func__));
   13456 
   13457 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13458 	do {
   13459 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13460 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13461 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13462 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13463 			break;
   13464 		delay(2*1000);
   13465 		i++;
   13466 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13467 
   13468 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13469 		wm_put_hw_semaphore_82573(sc);
   13470 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13471 		    device_xname(sc->sc_dev));
   13472 		return -1;
   13473 	}
   13474 
   13475 	return 0;
   13476 }
   13477 
   13478 static void
   13479 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13480 {
   13481 	uint32_t reg;
   13482 
   13483 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13484 		device_xname(sc->sc_dev), __func__));
   13485 
   13486 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13487 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13488 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13489 }
   13490 
   13491 /*
   13492  * Management mode and power management related subroutines.
   13493  * BMC, AMT, suspend/resume and EEE.
   13494  */
   13495 
   13496 #ifdef WM_WOL
   13497 static int
   13498 wm_check_mng_mode(struct wm_softc *sc)
   13499 {
   13500 	int rv;
   13501 
   13502 	switch (sc->sc_type) {
   13503 	case WM_T_ICH8:
   13504 	case WM_T_ICH9:
   13505 	case WM_T_ICH10:
   13506 	case WM_T_PCH:
   13507 	case WM_T_PCH2:
   13508 	case WM_T_PCH_LPT:
   13509 	case WM_T_PCH_SPT:
   13510 	case WM_T_PCH_CNP:
   13511 		rv = wm_check_mng_mode_ich8lan(sc);
   13512 		break;
   13513 	case WM_T_82574:
   13514 	case WM_T_82583:
   13515 		rv = wm_check_mng_mode_82574(sc);
   13516 		break;
   13517 	case WM_T_82571:
   13518 	case WM_T_82572:
   13519 	case WM_T_82573:
   13520 	case WM_T_80003:
   13521 		rv = wm_check_mng_mode_generic(sc);
   13522 		break;
   13523 	default:
   13524 		/* noting to do */
   13525 		rv = 0;
   13526 		break;
   13527 	}
   13528 
   13529 	return rv;
   13530 }
   13531 
   13532 static int
   13533 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13534 {
   13535 	uint32_t fwsm;
   13536 
   13537 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13538 
   13539 	if (((fwsm & FWSM_FW_VALID) != 0)
   13540 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13541 		return 1;
   13542 
   13543 	return 0;
   13544 }
   13545 
   13546 static int
   13547 wm_check_mng_mode_82574(struct wm_softc *sc)
   13548 {
   13549 	uint16_t data;
   13550 
   13551 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13552 
   13553 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13554 		return 1;
   13555 
   13556 	return 0;
   13557 }
   13558 
   13559 static int
   13560 wm_check_mng_mode_generic(struct wm_softc *sc)
   13561 {
   13562 	uint32_t fwsm;
   13563 
   13564 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13565 
   13566 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13567 		return 1;
   13568 
   13569 	return 0;
   13570 }
   13571 #endif /* WM_WOL */
   13572 
   13573 static int
   13574 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13575 {
   13576 	uint32_t manc, fwsm, factps;
   13577 
   13578 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13579 		return 0;
   13580 
   13581 	manc = CSR_READ(sc, WMREG_MANC);
   13582 
   13583 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13584 		device_xname(sc->sc_dev), manc));
   13585 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13586 		return 0;
   13587 
   13588 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13589 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13590 		factps = CSR_READ(sc, WMREG_FACTPS);
   13591 		if (((factps & FACTPS_MNGCG) == 0)
   13592 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13593 			return 1;
   13594 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13595 		uint16_t data;
   13596 
   13597 		factps = CSR_READ(sc, WMREG_FACTPS);
   13598 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13599 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13600 			device_xname(sc->sc_dev), factps, data));
   13601 		if (((factps & FACTPS_MNGCG) == 0)
   13602 		    && ((data & NVM_CFG2_MNGM_MASK)
   13603 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13604 			return 1;
   13605 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13606 	    && ((manc & MANC_ASF_EN) == 0))
   13607 		return 1;
   13608 
   13609 	return 0;
   13610 }
   13611 
   13612 static bool
   13613 wm_phy_resetisblocked(struct wm_softc *sc)
   13614 {
   13615 	bool blocked = false;
   13616 	uint32_t reg;
   13617 	int i = 0;
   13618 
   13619 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13620 		device_xname(sc->sc_dev), __func__));
   13621 
   13622 	switch (sc->sc_type) {
   13623 	case WM_T_ICH8:
   13624 	case WM_T_ICH9:
   13625 	case WM_T_ICH10:
   13626 	case WM_T_PCH:
   13627 	case WM_T_PCH2:
   13628 	case WM_T_PCH_LPT:
   13629 	case WM_T_PCH_SPT:
   13630 	case WM_T_PCH_CNP:
   13631 		do {
   13632 			reg = CSR_READ(sc, WMREG_FWSM);
   13633 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13634 				blocked = true;
   13635 				delay(10*1000);
   13636 				continue;
   13637 			}
   13638 			blocked = false;
   13639 		} while (blocked && (i++ < 30));
   13640 		return blocked;
   13641 		break;
   13642 	case WM_T_82571:
   13643 	case WM_T_82572:
   13644 	case WM_T_82573:
   13645 	case WM_T_82574:
   13646 	case WM_T_82583:
   13647 	case WM_T_80003:
   13648 		reg = CSR_READ(sc, WMREG_MANC);
   13649 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13650 			return true;
   13651 		else
   13652 			return false;
   13653 		break;
   13654 	default:
   13655 		/* no problem */
   13656 		break;
   13657 	}
   13658 
   13659 	return false;
   13660 }
   13661 
   13662 static void
   13663 wm_get_hw_control(struct wm_softc *sc)
   13664 {
   13665 	uint32_t reg;
   13666 
   13667 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13668 		device_xname(sc->sc_dev), __func__));
   13669 
   13670 	if (sc->sc_type == WM_T_82573) {
   13671 		reg = CSR_READ(sc, WMREG_SWSM);
   13672 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13673 	} else if (sc->sc_type >= WM_T_82571) {
   13674 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13675 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13676 	}
   13677 }
   13678 
   13679 static void
   13680 wm_release_hw_control(struct wm_softc *sc)
   13681 {
   13682 	uint32_t reg;
   13683 
   13684 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13685 		device_xname(sc->sc_dev), __func__));
   13686 
   13687 	if (sc->sc_type == WM_T_82573) {
   13688 		reg = CSR_READ(sc, WMREG_SWSM);
   13689 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13690 	} else if (sc->sc_type >= WM_T_82571) {
   13691 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13692 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13693 	}
   13694 }
   13695 
   13696 static void
   13697 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13698 {
   13699 	uint32_t reg;
   13700 
   13701 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13702 		device_xname(sc->sc_dev), __func__));
   13703 
   13704 	if (sc->sc_type < WM_T_PCH2)
   13705 		return;
   13706 
   13707 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13708 
   13709 	if (gate)
   13710 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13711 	else
   13712 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13713 
   13714 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13715 }
   13716 
   13717 static void
   13718 wm_smbustopci(struct wm_softc *sc)
   13719 {
   13720 	uint32_t fwsm, reg;
   13721 	int rv = 0;
   13722 
   13723 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13724 		device_xname(sc->sc_dev), __func__));
   13725 
   13726 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13727 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13728 
   13729 	/* Disable ULP */
   13730 	wm_ulp_disable(sc);
   13731 
   13732 	/* Acquire PHY semaphore */
   13733 	sc->phy.acquire(sc);
   13734 
   13735 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13736 	switch (sc->sc_type) {
   13737 	case WM_T_PCH_LPT:
   13738 	case WM_T_PCH_SPT:
   13739 	case WM_T_PCH_CNP:
   13740 		if (wm_phy_is_accessible_pchlan(sc))
   13741 			break;
   13742 
   13743 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13744 		reg |= CTRL_EXT_FORCE_SMBUS;
   13745 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13746 #if 0
   13747 		/* XXX Isn't this required??? */
   13748 		CSR_WRITE_FLUSH(sc);
   13749 #endif
   13750 		delay(50 * 1000);
   13751 		/* FALLTHROUGH */
   13752 	case WM_T_PCH2:
   13753 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13754 			break;
   13755 		/* FALLTHROUGH */
   13756 	case WM_T_PCH:
   13757 		if (sc->sc_type == WM_T_PCH)
   13758 			if ((fwsm & FWSM_FW_VALID) != 0)
   13759 				break;
   13760 
   13761 		if (wm_phy_resetisblocked(sc) == true) {
   13762 			printf("XXX reset is blocked(3)\n");
   13763 			break;
   13764 		}
   13765 
   13766 		wm_toggle_lanphypc_pch_lpt(sc);
   13767 
   13768 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13769 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13770 				break;
   13771 
   13772 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13773 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13774 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13775 
   13776 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13777 				break;
   13778 			rv = -1;
   13779 		}
   13780 		break;
   13781 	default:
   13782 		break;
   13783 	}
   13784 
   13785 	/* Release semaphore */
   13786 	sc->phy.release(sc);
   13787 
   13788 	if (rv == 0) {
   13789 		if (wm_phy_resetisblocked(sc)) {
   13790 			printf("XXX reset is blocked(4)\n");
   13791 			goto out;
   13792 		}
   13793 		wm_reset_phy(sc);
   13794 		if (wm_phy_resetisblocked(sc))
   13795 			printf("XXX reset is blocked(4)\n");
   13796 	}
   13797 
   13798 out:
   13799 	/*
   13800 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13801 	 */
   13802 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13803 		delay(10*1000);
   13804 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13805 	}
   13806 }
   13807 
   13808 static void
   13809 wm_init_manageability(struct wm_softc *sc)
   13810 {
   13811 
   13812 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13813 		device_xname(sc->sc_dev), __func__));
   13814 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13815 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13816 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13817 
   13818 		/* Disable hardware interception of ARP */
   13819 		manc &= ~MANC_ARP_EN;
   13820 
   13821 		/* Enable receiving management packets to the host */
   13822 		if (sc->sc_type >= WM_T_82571) {
   13823 			manc |= MANC_EN_MNG2HOST;
   13824 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   13825 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13826 		}
   13827 
   13828 		CSR_WRITE(sc, WMREG_MANC, manc);
   13829 	}
   13830 }
   13831 
   13832 static void
   13833 wm_release_manageability(struct wm_softc *sc)
   13834 {
   13835 
   13836 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13837 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13838 
   13839 		manc |= MANC_ARP_EN;
   13840 		if (sc->sc_type >= WM_T_82571)
   13841 			manc &= ~MANC_EN_MNG2HOST;
   13842 
   13843 		CSR_WRITE(sc, WMREG_MANC, manc);
   13844 	}
   13845 }
   13846 
   13847 static void
   13848 wm_get_wakeup(struct wm_softc *sc)
   13849 {
   13850 
   13851 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13852 	switch (sc->sc_type) {
   13853 	case WM_T_82573:
   13854 	case WM_T_82583:
   13855 		sc->sc_flags |= WM_F_HAS_AMT;
   13856 		/* FALLTHROUGH */
   13857 	case WM_T_80003:
   13858 	case WM_T_82575:
   13859 	case WM_T_82576:
   13860 	case WM_T_82580:
   13861 	case WM_T_I350:
   13862 	case WM_T_I354:
   13863 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13864 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13865 		/* FALLTHROUGH */
   13866 	case WM_T_82541:
   13867 	case WM_T_82541_2:
   13868 	case WM_T_82547:
   13869 	case WM_T_82547_2:
   13870 	case WM_T_82571:
   13871 	case WM_T_82572:
   13872 	case WM_T_82574:
   13873 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13874 		break;
   13875 	case WM_T_ICH8:
   13876 	case WM_T_ICH9:
   13877 	case WM_T_ICH10:
   13878 	case WM_T_PCH:
   13879 	case WM_T_PCH2:
   13880 	case WM_T_PCH_LPT:
   13881 	case WM_T_PCH_SPT:
   13882 	case WM_T_PCH_CNP:
   13883 		sc->sc_flags |= WM_F_HAS_AMT;
   13884 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13885 		break;
   13886 	default:
   13887 		break;
   13888 	}
   13889 
   13890 	/* 1: HAS_MANAGE */
   13891 	if (wm_enable_mng_pass_thru(sc) != 0)
   13892 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13893 
   13894 	/*
   13895 	 * Note that the WOL flags is set after the resetting of the eeprom
   13896 	 * stuff
   13897 	 */
   13898 }
   13899 
   13900 /*
   13901  * Unconfigure Ultra Low Power mode.
   13902  * Only for I217 and newer (see below).
   13903  */
   13904 static void
   13905 wm_ulp_disable(struct wm_softc *sc)
   13906 {
   13907 	uint32_t reg;
   13908 	int i = 0;
   13909 
   13910 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13911 		device_xname(sc->sc_dev), __func__));
   13912 	/* Exclude old devices */
   13913 	if ((sc->sc_type < WM_T_PCH_LPT)
   13914 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13915 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13916 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13917 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13918 		return;
   13919 
   13920 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13921 		/* Request ME un-configure ULP mode in the PHY */
   13922 		reg = CSR_READ(sc, WMREG_H2ME);
   13923 		reg &= ~H2ME_ULP;
   13924 		reg |= H2ME_ENFORCE_SETTINGS;
   13925 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13926 
   13927 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13928 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13929 			if (i++ == 30) {
   13930 				printf("%s timed out\n", __func__);
   13931 				return;
   13932 			}
   13933 			delay(10 * 1000);
   13934 		}
   13935 		reg = CSR_READ(sc, WMREG_H2ME);
   13936 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13937 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13938 
   13939 		return;
   13940 	}
   13941 
   13942 	/* Acquire semaphore */
   13943 	sc->phy.acquire(sc);
   13944 
   13945 	/* Toggle LANPHYPC */
   13946 	wm_toggle_lanphypc_pch_lpt(sc);
   13947 
   13948 	/* Unforce SMBus mode in PHY */
   13949 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13950 	if (reg == 0x0000 || reg == 0xffff) {
   13951 		uint32_t reg2;
   13952 
   13953 		printf("%s: Force SMBus first.\n", __func__);
   13954 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13955 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13956 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13957 		delay(50 * 1000);
   13958 
   13959 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13960 	}
   13961 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13962 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13963 
   13964 	/* Unforce SMBus mode in MAC */
   13965 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13966 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13967 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13968 
   13969 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13970 	reg |= HV_PM_CTRL_K1_ENA;
   13971 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13972 
   13973 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13974 	reg &= ~(I218_ULP_CONFIG1_IND
   13975 	    | I218_ULP_CONFIG1_STICKY_ULP
   13976 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13977 	    | I218_ULP_CONFIG1_WOL_HOST
   13978 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13979 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13980 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13981 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13982 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13983 	reg |= I218_ULP_CONFIG1_START;
   13984 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13985 
   13986 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13987 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13988 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13989 
   13990 	/* Release semaphore */
   13991 	sc->phy.release(sc);
   13992 	wm_gmii_reset(sc);
   13993 	delay(50 * 1000);
   13994 }
   13995 
   13996 /* WOL in the newer chipset interfaces (pchlan) */
   13997 static void
   13998 wm_enable_phy_wakeup(struct wm_softc *sc)
   13999 {
   14000 #if 0
   14001 	uint16_t preg;
   14002 
   14003 	/* Copy MAC RARs to PHY RARs */
   14004 
   14005 	/* Copy MAC MTA to PHY MTA */
   14006 
   14007 	/* Configure PHY Rx Control register */
   14008 
   14009 	/* Enable PHY wakeup in MAC register */
   14010 
   14011 	/* Configure and enable PHY wakeup in PHY registers */
   14012 
   14013 	/* Activate PHY wakeup */
   14014 
   14015 	/* XXX */
   14016 #endif
   14017 }
   14018 
   14019 /* Power down workaround on D3 */
   14020 static void
   14021 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14022 {
   14023 	uint32_t reg;
   14024 	int i;
   14025 
   14026 	for (i = 0; i < 2; i++) {
   14027 		/* Disable link */
   14028 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14029 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14030 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14031 
   14032 		/*
   14033 		 * Call gig speed drop workaround on Gig disable before
   14034 		 * accessing any PHY registers
   14035 		 */
   14036 		if (sc->sc_type == WM_T_ICH8)
   14037 			wm_gig_downshift_workaround_ich8lan(sc);
   14038 
   14039 		/* Write VR power-down enable */
   14040 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   14041 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14042 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14043 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   14044 
   14045 		/* Read it back and test */
   14046 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   14047 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14048 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14049 			break;
   14050 
   14051 		/* Issue PHY reset and repeat at most one more time */
   14052 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14053 	}
   14054 }
   14055 
   14056 static void
   14057 wm_enable_wakeup(struct wm_softc *sc)
   14058 {
   14059 	uint32_t reg, pmreg;
   14060 	pcireg_t pmode;
   14061 
   14062 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14063 		device_xname(sc->sc_dev), __func__));
   14064 
   14065 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14066 		&pmreg, NULL) == 0)
   14067 		return;
   14068 
   14069 	/* Advertise the wakeup capability */
   14070 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   14071 	    | CTRL_SWDPIN(3));
   14072 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   14073 
   14074 	/* ICH workaround */
   14075 	switch (sc->sc_type) {
   14076 	case WM_T_ICH8:
   14077 	case WM_T_ICH9:
   14078 	case WM_T_ICH10:
   14079 	case WM_T_PCH:
   14080 	case WM_T_PCH2:
   14081 	case WM_T_PCH_LPT:
   14082 	case WM_T_PCH_SPT:
   14083 	case WM_T_PCH_CNP:
   14084 		/* Disable gig during WOL */
   14085 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14086 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   14087 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14088 		if (sc->sc_type == WM_T_PCH)
   14089 			wm_gmii_reset(sc);
   14090 
   14091 		/* Power down workaround */
   14092 		if (sc->sc_phytype == WMPHY_82577) {
   14093 			struct mii_softc *child;
   14094 
   14095 			/* Assume that the PHY is copper */
   14096 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14097 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   14098 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   14099 				    (768 << 5) | 25, 0x0444); /* magic num */
   14100 		}
   14101 		break;
   14102 	default:
   14103 		break;
   14104 	}
   14105 
   14106 	/* Keep the laser running on fiber adapters */
   14107 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   14108 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   14109 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14110 		reg |= CTRL_EXT_SWDPIN(3);
   14111 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14112 	}
   14113 
   14114 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   14115 #if 0	/* for the multicast packet */
   14116 	reg |= WUFC_MC;
   14117 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   14118 #endif
   14119 
   14120 	if (sc->sc_type >= WM_T_PCH)
   14121 		wm_enable_phy_wakeup(sc);
   14122 	else {
   14123 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   14124 		CSR_WRITE(sc, WMREG_WUFC, reg);
   14125 	}
   14126 
   14127 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14128 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14129 		|| (sc->sc_type == WM_T_PCH2))
   14130 	    && (sc->sc_phytype == WMPHY_IGP_3))
   14131 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   14132 
   14133 	/* Request PME */
   14134 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   14135 #if 0
   14136 	/* Disable WOL */
   14137 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   14138 #else
   14139 	/* For WOL */
   14140 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   14141 #endif
   14142 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   14143 }
   14144 
   14145 /* Disable ASPM L0s and/or L1 for workaround */
   14146 static void
   14147 wm_disable_aspm(struct wm_softc *sc)
   14148 {
   14149 	pcireg_t reg, mask = 0;
   14150 	unsigned const char *str = "";
   14151 
   14152 	/*
   14153 	 *  Only for PCIe device which has PCIe capability in the PCI config
   14154 	 * space.
   14155 	 */
   14156 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   14157 		return;
   14158 
   14159 	switch (sc->sc_type) {
   14160 	case WM_T_82571:
   14161 	case WM_T_82572:
   14162 		/*
   14163 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   14164 		 * State Power management L1 State (ASPM L1).
   14165 		 */
   14166 		mask = PCIE_LCSR_ASPM_L1;
   14167 		str = "L1 is";
   14168 		break;
   14169 	case WM_T_82573:
   14170 	case WM_T_82574:
   14171 	case WM_T_82583:
   14172 		/*
   14173 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   14174 		 *
   14175 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   14176 		 * some chipset.  The document of 82574 and 82583 says that
   14177 		 * disabling L0s with some specific chipset is sufficient,
   14178 		 * but we follow as of the Intel em driver does.
   14179 		 *
   14180 		 * References:
   14181 		 * Errata 8 of the Specification Update of i82573.
   14182 		 * Errata 20 of the Specification Update of i82574.
   14183 		 * Errata 9 of the Specification Update of i82583.
   14184 		 */
   14185 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   14186 		str = "L0s and L1 are";
   14187 		break;
   14188 	default:
   14189 		return;
   14190 	}
   14191 
   14192 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14193 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   14194 	reg &= ~mask;
   14195 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14196 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   14197 
   14198 	/* Print only in wm_attach() */
   14199 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   14200 		aprint_verbose_dev(sc->sc_dev,
   14201 		    "ASPM %s disabled to workaround the errata.\n", str);
   14202 }
   14203 
   14204 /* LPLU */
   14205 
   14206 static void
   14207 wm_lplu_d0_disable(struct wm_softc *sc)
   14208 {
   14209 	struct mii_data *mii = &sc->sc_mii;
   14210 	uint32_t reg;
   14211 
   14212 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14213 		device_xname(sc->sc_dev), __func__));
   14214 
   14215 	if (sc->sc_phytype == WMPHY_IFE)
   14216 		return;
   14217 
   14218 	switch (sc->sc_type) {
   14219 	case WM_T_82571:
   14220 	case WM_T_82572:
   14221 	case WM_T_82573:
   14222 	case WM_T_82575:
   14223 	case WM_T_82576:
   14224 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   14225 		reg &= ~PMR_D0_LPLU;
   14226 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   14227 		break;
   14228 	case WM_T_82580:
   14229 	case WM_T_I350:
   14230 	case WM_T_I210:
   14231 	case WM_T_I211:
   14232 		reg = CSR_READ(sc, WMREG_PHPM);
   14233 		reg &= ~PHPM_D0A_LPLU;
   14234 		CSR_WRITE(sc, WMREG_PHPM, reg);
   14235 		break;
   14236 	case WM_T_82574:
   14237 	case WM_T_82583:
   14238 	case WM_T_ICH8:
   14239 	case WM_T_ICH9:
   14240 	case WM_T_ICH10:
   14241 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14242 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   14243 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14244 		CSR_WRITE_FLUSH(sc);
   14245 		break;
   14246 	case WM_T_PCH:
   14247 	case WM_T_PCH2:
   14248 	case WM_T_PCH_LPT:
   14249 	case WM_T_PCH_SPT:
   14250 	case WM_T_PCH_CNP:
   14251 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   14252 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   14253 		if (wm_phy_resetisblocked(sc) == false)
   14254 			reg |= HV_OEM_BITS_ANEGNOW;
   14255 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   14256 		break;
   14257 	default:
   14258 		break;
   14259 	}
   14260 }
   14261 
   14262 /* EEE */
   14263 
   14264 static void
   14265 wm_set_eee_i350(struct wm_softc *sc)
   14266 {
   14267 	uint32_t ipcnfg, eeer;
   14268 
   14269 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   14270 	eeer = CSR_READ(sc, WMREG_EEER);
   14271 
   14272 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   14273 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14274 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14275 		    | EEER_LPI_FC);
   14276 	} else {
   14277 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14278 		ipcnfg &= ~IPCNFG_10BASE_TE;
   14279 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14280 		    | EEER_LPI_FC);
   14281 	}
   14282 
   14283 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   14284 	CSR_WRITE(sc, WMREG_EEER, eeer);
   14285 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   14286 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   14287 }
   14288 
   14289 /*
   14290  * Workarounds (mainly PHY related).
   14291  * Basically, PHY's workarounds are in the PHY drivers.
   14292  */
   14293 
   14294 /* Work-around for 82566 Kumeran PCS lock loss */
   14295 static void
   14296 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   14297 {
   14298 	struct mii_data *mii = &sc->sc_mii;
   14299 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14300 	int i;
   14301 	int reg;
   14302 
   14303 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14304 		device_xname(sc->sc_dev), __func__));
   14305 
   14306 	/* If the link is not up, do nothing */
   14307 	if ((status & STATUS_LU) == 0)
   14308 		return;
   14309 
   14310 	/* Nothing to do if the link is other than 1Gbps */
   14311 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   14312 		return;
   14313 
   14314 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14315 	for (i = 0; i < 10; i++) {
   14316 		/* read twice */
   14317 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14318 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14319 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   14320 			goto out;	/* GOOD! */
   14321 
   14322 		/* Reset the PHY */
   14323 		wm_reset_phy(sc);
   14324 		delay(5*1000);
   14325 	}
   14326 
   14327 	/* Disable GigE link negotiation */
   14328 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14329 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14330 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14331 
   14332 	/*
   14333 	 * Call gig speed drop workaround on Gig disable before accessing
   14334 	 * any PHY registers.
   14335 	 */
   14336 	wm_gig_downshift_workaround_ich8lan(sc);
   14337 
   14338 out:
   14339 	return;
   14340 }
   14341 
   14342 /* WOL from S5 stops working */
   14343 static void
   14344 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   14345 {
   14346 	uint16_t kmreg;
   14347 
   14348 	/* Only for igp3 */
   14349 	if (sc->sc_phytype == WMPHY_IGP_3) {
   14350 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   14351 			return;
   14352 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   14353 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   14354 			return;
   14355 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   14356 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   14357 	}
   14358 }
   14359 
   14360 /*
   14361  * Workaround for pch's PHYs
   14362  * XXX should be moved to new PHY driver?
   14363  */
   14364 static void
   14365 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   14366 {
   14367 
   14368 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14369 		device_xname(sc->sc_dev), __func__));
   14370 	KASSERT(sc->sc_type == WM_T_PCH);
   14371 
   14372 	if (sc->sc_phytype == WMPHY_82577)
   14373 		wm_set_mdio_slow_mode_hv(sc);
   14374 
   14375 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   14376 
   14377 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   14378 
   14379 	/* 82578 */
   14380 	if (sc->sc_phytype == WMPHY_82578) {
   14381 		struct mii_softc *child;
   14382 
   14383 		/*
   14384 		 * Return registers to default by doing a soft reset then
   14385 		 * writing 0x3140 to the control register
   14386 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   14387 		 */
   14388 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14389 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   14390 			PHY_RESET(child);
   14391 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   14392 			    0x3140);
   14393 		}
   14394 	}
   14395 
   14396 	/* Select page 0 */
   14397 	sc->phy.acquire(sc);
   14398 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   14399 	sc->phy.release(sc);
   14400 
   14401 	/*
   14402 	 * Configure the K1 Si workaround during phy reset assuming there is
   14403 	 * link so that it disables K1 if link is in 1Gbps.
   14404 	 */
   14405 	wm_k1_gig_workaround_hv(sc, 1);
   14406 }
   14407 
   14408 static void
   14409 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   14410 {
   14411 
   14412 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14413 		device_xname(sc->sc_dev), __func__));
   14414 	KASSERT(sc->sc_type == WM_T_PCH2);
   14415 
   14416 	wm_set_mdio_slow_mode_hv(sc);
   14417 }
   14418 
   14419 /**
   14420  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   14421  *  @link: link up bool flag
   14422  *
   14423  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   14424  *  preventing further DMA write requests.  Workaround the issue by disabling
   14425  *  the de-assertion of the clock request when in 1Gpbs mode.
   14426  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   14427  *  speeds in order to avoid Tx hangs.
   14428  **/
   14429 static int
   14430 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   14431 {
   14432 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   14433 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14434 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   14435 	uint16_t phyreg;
   14436 	int rv;
   14437 
   14438 	if (link && (speed == STATUS_SPEED_1000)) {
   14439 		sc->phy.acquire(sc);
   14440 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14441 		    &phyreg);
   14442 		if (rv != 0)
   14443 			goto release;
   14444 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14445 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   14446 		if (rv != 0)
   14447 			goto release;
   14448 		delay(20);
   14449 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   14450 
   14451 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14452 		    &phyreg);
   14453 release:
   14454 		sc->phy.release(sc);
   14455 	} else {
   14456 		struct mii_softc *child;
   14457 
   14458 		fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   14459 
   14460 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14461 		if (((child != NULL) && (child->mii_mpd_rev > 5))
   14462 		    || !link
   14463 		    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   14464 			goto update_fextnvm6;
   14465 
   14466 		phyreg = wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL);
   14467 
   14468 		/* Clear link status transmit timeout */
   14469 		phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   14470 		if (speed == STATUS_SPEED_100) {
   14471 			/* Set inband Tx timeout to 5x10us for 100Half */
   14472 			phyreg |=
   14473 			    5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   14474 
   14475 			/* Do not extend the K1 entry latency for 100Half */
   14476 			fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   14477 		} else {
   14478 			/* Set inband Tx timeout to 50x10us for 10Full/Half */
   14479 			phyreg |=
   14480 			    50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   14481 
   14482 			/* Extend the K1 entry latency for 10 Mbps */
   14483 			fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   14484 		}
   14485 
   14486 		wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   14487 
   14488 update_fextnvm6:
   14489 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   14490 	}
   14491 
   14492 	return rv;
   14493 }
   14494 
   14495 static int
   14496 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   14497 {
   14498 	int k1_enable = sc->sc_nvm_k1_enabled;
   14499 
   14500 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14501 		device_xname(sc->sc_dev), __func__));
   14502 
   14503 	if (sc->phy.acquire(sc) != 0)
   14504 		return -1;
   14505 
   14506 	if (link) {
   14507 		k1_enable = 0;
   14508 
   14509 		/* Link stall fix for link up */
   14510 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14511 		    0x0100);
   14512 	} else {
   14513 		/* Link stall fix for link down */
   14514 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14515 		    0x4100);
   14516 	}
   14517 
   14518 	wm_configure_k1_ich8lan(sc, k1_enable);
   14519 	sc->phy.release(sc);
   14520 
   14521 	return 0;
   14522 }
   14523 
   14524 static void
   14525 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   14526 {
   14527 	uint32_t reg;
   14528 
   14529 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   14530 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   14531 	    reg | HV_KMRN_MDIO_SLOW);
   14532 }
   14533 
   14534 static void
   14535 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   14536 {
   14537 	uint32_t ctrl, ctrl_ext, tmp;
   14538 	uint16_t kmreg;
   14539 	int rv;
   14540 
   14541 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   14542 	if (rv != 0)
   14543 		return;
   14544 
   14545 	if (k1_enable)
   14546 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   14547 	else
   14548 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   14549 
   14550 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   14551 	if (rv != 0)
   14552 		return;
   14553 
   14554 	delay(20);
   14555 
   14556 	ctrl = CSR_READ(sc, WMREG_CTRL);
   14557 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14558 
   14559 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   14560 	tmp |= CTRL_FRCSPD;
   14561 
   14562 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   14563 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   14564 	CSR_WRITE_FLUSH(sc);
   14565 	delay(20);
   14566 
   14567 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   14568 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14569 	CSR_WRITE_FLUSH(sc);
   14570 	delay(20);
   14571 
   14572 	return;
   14573 }
   14574 
   14575 /* special case - for 82575 - need to do manual init ... */
   14576 static void
   14577 wm_reset_init_script_82575(struct wm_softc *sc)
   14578 {
   14579 	/*
   14580 	 * remark: this is untested code - we have no board without EEPROM
   14581 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   14582 	 */
   14583 
   14584 	/* SerDes configuration via SERDESCTRL */
   14585 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   14586 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   14587 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   14588 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   14589 
   14590 	/* CCM configuration via CCMCTL register */
   14591 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   14592 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   14593 
   14594 	/* PCIe lanes configuration */
   14595 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   14596 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   14597 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   14598 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   14599 
   14600 	/* PCIe PLL Configuration */
   14601 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   14602 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   14603 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   14604 }
   14605 
   14606 static void
   14607 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   14608 {
   14609 	uint32_t reg;
   14610 	uint16_t nvmword;
   14611 	int rv;
   14612 
   14613 	if (sc->sc_type != WM_T_82580)
   14614 		return;
   14615 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   14616 		return;
   14617 
   14618 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   14619 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   14620 	if (rv != 0) {
   14621 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   14622 		    __func__);
   14623 		return;
   14624 	}
   14625 
   14626 	reg = CSR_READ(sc, WMREG_MDICNFG);
   14627 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   14628 		reg |= MDICNFG_DEST;
   14629 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   14630 		reg |= MDICNFG_COM_MDIO;
   14631 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14632 }
   14633 
   14634 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   14635 
   14636 static bool
   14637 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   14638 {
   14639 	int i;
   14640 	uint32_t reg;
   14641 	uint16_t id1, id2;
   14642 
   14643 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14644 		device_xname(sc->sc_dev), __func__));
   14645 	id1 = id2 = 0xffff;
   14646 	for (i = 0; i < 2; i++) {
   14647 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   14648 		if (MII_INVALIDID(id1))
   14649 			continue;
   14650 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   14651 		if (MII_INVALIDID(id2))
   14652 			continue;
   14653 		break;
   14654 	}
   14655 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   14656 		goto out;
   14657 	}
   14658 
   14659 	if (sc->sc_type < WM_T_PCH_LPT) {
   14660 		sc->phy.release(sc);
   14661 		wm_set_mdio_slow_mode_hv(sc);
   14662 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   14663 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   14664 		sc->phy.acquire(sc);
   14665 	}
   14666 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   14667 		printf("XXX return with false\n");
   14668 		return false;
   14669 	}
   14670 out:
   14671 	if (sc->sc_type >= WM_T_PCH_LPT) {
   14672 		/* Only unforce SMBus if ME is not active */
   14673 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14674 			/* Unforce SMBus mode in PHY */
   14675 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   14676 			    CV_SMB_CTRL);
   14677 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14678 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   14679 			    CV_SMB_CTRL, reg);
   14680 
   14681 			/* Unforce SMBus mode in MAC */
   14682 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14683 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14684 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14685 		}
   14686 	}
   14687 	return true;
   14688 }
   14689 
   14690 static void
   14691 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   14692 {
   14693 	uint32_t reg;
   14694 	int i;
   14695 
   14696 	/* Set PHY Config Counter to 50msec */
   14697 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   14698 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   14699 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   14700 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   14701 
   14702 	/* Toggle LANPHYPC */
   14703 	reg = CSR_READ(sc, WMREG_CTRL);
   14704 	reg |= CTRL_LANPHYPC_OVERRIDE;
   14705 	reg &= ~CTRL_LANPHYPC_VALUE;
   14706 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14707 	CSR_WRITE_FLUSH(sc);
   14708 	delay(1000);
   14709 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   14710 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14711 	CSR_WRITE_FLUSH(sc);
   14712 
   14713 	if (sc->sc_type < WM_T_PCH_LPT)
   14714 		delay(50 * 1000);
   14715 	else {
   14716 		i = 20;
   14717 
   14718 		do {
   14719 			delay(5 * 1000);
   14720 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   14721 		    && i--);
   14722 
   14723 		delay(30 * 1000);
   14724 	}
   14725 }
   14726 
   14727 static int
   14728 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   14729 {
   14730 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   14731 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   14732 	uint32_t rxa;
   14733 	uint16_t scale = 0, lat_enc = 0;
   14734 	int32_t obff_hwm = 0;
   14735 	int64_t lat_ns, value;
   14736 
   14737 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14738 		device_xname(sc->sc_dev), __func__));
   14739 
   14740 	if (link) {
   14741 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   14742 		uint32_t status;
   14743 		uint16_t speed;
   14744 		pcireg_t preg;
   14745 
   14746 		status = CSR_READ(sc, WMREG_STATUS);
   14747 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   14748 		case STATUS_SPEED_10:
   14749 			speed = 10;
   14750 			break;
   14751 		case STATUS_SPEED_100:
   14752 			speed = 100;
   14753 			break;
   14754 		case STATUS_SPEED_1000:
   14755 			speed = 1000;
   14756 			break;
   14757 		default:
   14758 			device_printf(sc->sc_dev, "Unknown speed "
   14759 			    "(status = %08x)\n", status);
   14760 			return -1;
   14761 		}
   14762 
   14763 		/* Rx Packet Buffer Allocation size (KB) */
   14764 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   14765 
   14766 		/*
   14767 		 * Determine the maximum latency tolerated by the device.
   14768 		 *
   14769 		 * Per the PCIe spec, the tolerated latencies are encoded as
   14770 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   14771 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   14772 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14773 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14774 		 */
   14775 		lat_ns = ((int64_t)rxa * 1024 -
   14776 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14777 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14778 		if (lat_ns < 0)
   14779 			lat_ns = 0;
   14780 		else
   14781 			lat_ns /= speed;
   14782 		value = lat_ns;
   14783 
   14784 		while (value > LTRV_VALUE) {
   14785 			scale ++;
   14786 			value = howmany(value, __BIT(5));
   14787 		}
   14788 		if (scale > LTRV_SCALE_MAX) {
   14789 			printf("%s: Invalid LTR latency scale %d\n",
   14790 			    device_xname(sc->sc_dev), scale);
   14791 			return -1;
   14792 		}
   14793 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14794 
   14795 		/* Determine the maximum latency tolerated by the platform */
   14796 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14797 		    WM_PCI_LTR_CAP_LPT);
   14798 		max_snoop = preg & 0xffff;
   14799 		max_nosnoop = preg >> 16;
   14800 
   14801 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14802 
   14803 		if (lat_enc > max_ltr_enc) {
   14804 			lat_enc = max_ltr_enc;
   14805 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14806 			    * PCI_LTR_SCALETONS(
   14807 				    __SHIFTOUT(lat_enc,
   14808 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14809 		}
   14810 
   14811 		if (lat_ns) {
   14812 			lat_ns *= speed * 1000;
   14813 			lat_ns /= 8;
   14814 			lat_ns /= 1000000000;
   14815 			obff_hwm = (int32_t)(rxa - lat_ns);
   14816 		}
   14817 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14818 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14819 			    "(rxa = %d, lat_ns = %d)\n",
   14820 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14821 			return -1;
   14822 		}
   14823 	}
   14824 	/* Snoop and No-Snoop latencies the same */
   14825 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14826 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14827 
   14828 	/* Set OBFF high water mark */
   14829 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14830 	reg |= obff_hwm;
   14831 	CSR_WRITE(sc, WMREG_SVT, reg);
   14832 
   14833 	/* Enable OBFF */
   14834 	reg = CSR_READ(sc, WMREG_SVCR);
   14835 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14836 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14837 
   14838 	return 0;
   14839 }
   14840 
   14841 /*
   14842  * I210 Errata 25 and I211 Errata 10
   14843  * Slow System Clock.
   14844  */
   14845 static void
   14846 wm_pll_workaround_i210(struct wm_softc *sc)
   14847 {
   14848 	uint32_t mdicnfg, wuc;
   14849 	uint32_t reg;
   14850 	pcireg_t pcireg;
   14851 	uint32_t pmreg;
   14852 	uint16_t nvmword, tmp_nvmword;
   14853 	int phyval;
   14854 	bool wa_done = false;
   14855 	int i;
   14856 
   14857 	/* Save WUC and MDICNFG registers */
   14858 	wuc = CSR_READ(sc, WMREG_WUC);
   14859 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14860 
   14861 	reg = mdicnfg & ~MDICNFG_DEST;
   14862 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14863 
   14864 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14865 		nvmword = INVM_DEFAULT_AL;
   14866 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14867 
   14868 	/* Get Power Management cap offset */
   14869 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14870 		&pmreg, NULL) == 0)
   14871 		return;
   14872 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14873 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14874 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14875 
   14876 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14877 			break; /* OK */
   14878 		}
   14879 
   14880 		wa_done = true;
   14881 		/* Directly reset the internal PHY */
   14882 		reg = CSR_READ(sc, WMREG_CTRL);
   14883 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14884 
   14885 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14886 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14887 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14888 
   14889 		CSR_WRITE(sc, WMREG_WUC, 0);
   14890 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14891 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14892 
   14893 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14894 		    pmreg + PCI_PMCSR);
   14895 		pcireg |= PCI_PMCSR_STATE_D3;
   14896 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14897 		    pmreg + PCI_PMCSR, pcireg);
   14898 		delay(1000);
   14899 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14900 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14901 		    pmreg + PCI_PMCSR, pcireg);
   14902 
   14903 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14904 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14905 
   14906 		/* Restore WUC register */
   14907 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14908 	}
   14909 
   14910 	/* Restore MDICNFG setting */
   14911 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14912 	if (wa_done)
   14913 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14914 }
   14915 
   14916 static void
   14917 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14918 {
   14919 	uint32_t reg;
   14920 
   14921 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14922 		device_xname(sc->sc_dev), __func__));
   14923 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   14924 	    || (sc->sc_type == WM_T_PCH_CNP));
   14925 
   14926 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14927 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14928 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14929 
   14930 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14931 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14932 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14933 }
   14934