Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.596
      1 /*	$NetBSD: if_wm.c,v 1.596 2018/11/03 21:39:10 christos Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.596 2018/11/03 21:39:10 christos Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    161 #else
    162 #define	DPRINTF(x, y)	/* nothing */
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544. We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 64 DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    203  * m_defrag() is called to reduce it.
    204  */
    205 #define	WM_NTXSEGS		64
    206 #define	WM_IFQUEUELEN		256
    207 #define	WM_TXQUEUELEN_MAX	64
    208 #define	WM_TXQUEUELEN_MAX_82547	16
    209 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    210 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    211 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    212 #define	WM_NTXDESC_82542	256
    213 #define	WM_NTXDESC_82544	4096
    214 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    215 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    216 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    217 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    218 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    219 
    220 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    221 
    222 #define	WM_TXINTERQSIZE		256
    223 
    224 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    225 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    226 #endif
    227 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    228 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    229 #endif
    230 
    231 /*
    232  * Receive descriptor list size.  We have one Rx buffer for normal
    233  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    234  * packet.  We allocate 256 receive descriptors, each with a 2k
    235  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    236  */
    237 #define	WM_NRXDESC		256
    238 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    239 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    240 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    241 
    242 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 typedef union txdescs {
    250 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    251 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    252 } txdescs_t;
    253 
    254 typedef union rxdescs {
    255 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    256 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    257 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    258 } rxdescs_t;
    259 
    260 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    261 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    262 
    263 /*
    264  * Software state for transmit jobs.
    265  */
    266 struct wm_txsoft {
    267 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    268 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    269 	int txs_firstdesc;		/* first descriptor in packet */
    270 	int txs_lastdesc;		/* last descriptor in packet */
    271 	int txs_ndesc;			/* # of descriptors used */
    272 };
    273 
    274 /*
    275  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    276  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    277  * them together.
    278  */
    279 struct wm_rxsoft {
    280 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    281 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    282 };
    283 
    284 #define WM_LINKUP_TIMEOUT	50
    285 
    286 static uint16_t swfwphysem[] = {
    287 	SWFW_PHY0_SM,
    288 	SWFW_PHY1_SM,
    289 	SWFW_PHY2_SM,
    290 	SWFW_PHY3_SM
    291 };
    292 
    293 static const uint32_t wm_82580_rxpbs_table[] = {
    294 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    295 };
    296 
    297 struct wm_softc;
    298 
    299 #ifdef WM_EVENT_COUNTERS
    300 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    301 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    302 	struct evcnt qname##_ev_##evname;
    303 
    304 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    305 	do {								\
    306 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    307 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    308 		    "%s%02d%s", #qname, (qnum), #evname);		\
    309 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    310 		    (evtype), NULL, (xname),				\
    311 		    (q)->qname##_##evname##_evcnt_name);		\
    312 	} while (0)
    313 
    314 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    315 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    316 
    317 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    318 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    319 
    320 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    321 	evcnt_detach(&(q)->qname##_ev_##evname);
    322 #endif /* WM_EVENT_COUNTERS */
    323 
    324 struct wm_txqueue {
    325 	kmutex_t *txq_lock;		/* lock for tx operations */
    326 
    327 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    328 
    329 	/* Software state for the transmit descriptors. */
    330 	int txq_num;			/* must be a power of two */
    331 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    332 
    333 	/* TX control data structures. */
    334 	int txq_ndesc;			/* must be a power of two */
    335 	size_t txq_descsize;		/* a tx descriptor size */
    336 	txdescs_t *txq_descs_u;
    337 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    338 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    339 	int txq_desc_rseg;		/* real number of control segment */
    340 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    341 #define	txq_descs	txq_descs_u->sctxu_txdescs
    342 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    343 
    344 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    345 
    346 	int txq_free;			/* number of free Tx descriptors */
    347 	int txq_next;			/* next ready Tx descriptor */
    348 
    349 	int txq_sfree;			/* number of free Tx jobs */
    350 	int txq_snext;			/* next free Tx job */
    351 	int txq_sdirty;			/* dirty Tx jobs */
    352 
    353 	/* These 4 variables are used only on the 82547. */
    354 	int txq_fifo_size;		/* Tx FIFO size */
    355 	int txq_fifo_head;		/* current head of FIFO */
    356 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    357 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    358 
    359 	/*
    360 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    361 	 * CPUs. This queue intermediate them without block.
    362 	 */
    363 	pcq_t *txq_interq;
    364 
    365 	/*
    366 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    367 	 * to manage Tx H/W queue's busy flag.
    368 	 */
    369 	int txq_flags;			/* flags for H/W queue, see below */
    370 #define	WM_TXQ_NO_SPACE	0x1
    371 
    372 	bool txq_stopping;
    373 
    374 	bool txq_sending;
    375 	time_t txq_lastsent;
    376 
    377 	uint32_t txq_packets;		/* for AIM */
    378 	uint32_t txq_bytes;		/* for AIM */
    379 #ifdef WM_EVENT_COUNTERS
    380 	/* TX event counters */
    381 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    382 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    383 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    384 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    385 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    386 					    /* XXX not used? */
    387 
    388 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    389 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    392 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    393 	WM_Q_EVCNT_DEFINE(txq, tsopain)     /* Painful header manip. for TSO */
    394 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    395 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    396 					    /* other than toomanyseg */
    397 
    398 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    399 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    400 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    401 
    402 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    403 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    404 #endif /* WM_EVENT_COUNTERS */
    405 };
    406 
    407 struct wm_rxqueue {
    408 	kmutex_t *rxq_lock;		/* lock for rx operations */
    409 
    410 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    411 
    412 	/* Software state for the receive descriptors. */
    413 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    414 
    415 	/* RX control data structures. */
    416 	int rxq_ndesc;			/* must be a power of two */
    417 	size_t rxq_descsize;		/* a rx descriptor size */
    418 	rxdescs_t *rxq_descs_u;
    419 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    420 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    421 	int rxq_desc_rseg;		/* real number of control segment */
    422 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    423 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    424 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    425 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    426 
    427 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    428 
    429 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    430 	int rxq_discard;
    431 	int rxq_len;
    432 	struct mbuf *rxq_head;
    433 	struct mbuf *rxq_tail;
    434 	struct mbuf **rxq_tailp;
    435 
    436 	bool rxq_stopping;
    437 
    438 	uint32_t rxq_packets;		/* for AIM */
    439 	uint32_t rxq_bytes;		/* for AIM */
    440 #ifdef WM_EVENT_COUNTERS
    441 	/* RX event counters */
    442 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    443 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    444 
    445 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    446 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    447 #endif
    448 };
    449 
    450 struct wm_queue {
    451 	int wmq_id;			/* index of TX/RX queues */
    452 	int wmq_intr_idx;		/* index of MSI-X tables */
    453 
    454 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    455 	bool wmq_set_itr;
    456 
    457 	struct wm_txqueue wmq_txq;
    458 	struct wm_rxqueue wmq_rxq;
    459 
    460 	void *wmq_si;
    461 };
    462 
    463 struct wm_phyop {
    464 	int (*acquire)(struct wm_softc *);
    465 	void (*release)(struct wm_softc *);
    466 	int reset_delay_us;
    467 };
    468 
    469 struct wm_nvmop {
    470 	int (*acquire)(struct wm_softc *);
    471 	void (*release)(struct wm_softc *);
    472 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    473 };
    474 
    475 /*
    476  * Software state per device.
    477  */
    478 struct wm_softc {
    479 	device_t sc_dev;		/* generic device information */
    480 	bus_space_tag_t sc_st;		/* bus space tag */
    481 	bus_space_handle_t sc_sh;	/* bus space handle */
    482 	bus_size_t sc_ss;		/* bus space size */
    483 	bus_space_tag_t sc_iot;		/* I/O space tag */
    484 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    485 	bus_size_t sc_ios;		/* I/O space size */
    486 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    487 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    488 	bus_size_t sc_flashs;		/* flash registers space size */
    489 	off_t sc_flashreg_offset;	/*
    490 					 * offset to flash registers from
    491 					 * start of BAR
    492 					 */
    493 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    494 
    495 	struct ethercom sc_ethercom;	/* ethernet common data */
    496 	struct mii_data sc_mii;		/* MII/media information */
    497 
    498 	pci_chipset_tag_t sc_pc;
    499 	pcitag_t sc_pcitag;
    500 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    501 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    502 
    503 	uint16_t sc_pcidevid;		/* PCI device ID */
    504 	wm_chip_type sc_type;		/* MAC type */
    505 	int sc_rev;			/* MAC revision */
    506 	wm_phy_type sc_phytype;		/* PHY type */
    507 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    508 #define	WM_MEDIATYPE_UNKNOWN		0x00
    509 #define	WM_MEDIATYPE_FIBER		0x01
    510 #define	WM_MEDIATYPE_COPPER		0x02
    511 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    512 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    513 	int sc_flags;			/* flags; see below */
    514 	int sc_if_flags;		/* last if_flags */
    515 	int sc_flowflags;		/* 802.3x flow control flags */
    516 	int sc_align_tweak;
    517 
    518 	void *sc_ihs[WM_MAX_NINTR];	/*
    519 					 * interrupt cookie.
    520 					 * - legacy and msi use sc_ihs[0] only
    521 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    522 					 */
    523 	pci_intr_handle_t *sc_intrs;	/*
    524 					 * legacy and msi use sc_intrs[0] only
    525 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    526 					 */
    527 	int sc_nintrs;			/* number of interrupts */
    528 
    529 	int sc_link_intr_idx;		/* index of MSI-X tables */
    530 
    531 	callout_t sc_tick_ch;		/* tick callout */
    532 	bool sc_core_stopping;
    533 
    534 	int sc_nvm_ver_major;
    535 	int sc_nvm_ver_minor;
    536 	int sc_nvm_ver_build;
    537 	int sc_nvm_addrbits;		/* NVM address bits */
    538 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    539 	int sc_ich8_flash_base;
    540 	int sc_ich8_flash_bank_size;
    541 	int sc_nvm_k1_enabled;
    542 
    543 	int sc_nqueues;
    544 	struct wm_queue *sc_queue;
    545 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
    546 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
    547 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    548 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    549 
    550 	int sc_affinity_offset;
    551 
    552 #ifdef WM_EVENT_COUNTERS
    553 	/* Event counters. */
    554 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    555 
    556 	/* WM_T_82542_2_1 only */
    557 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    558 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    559 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    560 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    561 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    562 #endif /* WM_EVENT_COUNTERS */
    563 
    564 	/* This variable are used only on the 82547. */
    565 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    566 
    567 	uint32_t sc_ctrl;		/* prototype CTRL register */
    568 #if 0
    569 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    570 #endif
    571 	uint32_t sc_icr;		/* prototype interrupt bits */
    572 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    573 	uint32_t sc_tctl;		/* prototype TCTL register */
    574 	uint32_t sc_rctl;		/* prototype RCTL register */
    575 	uint32_t sc_txcw;		/* prototype TXCW register */
    576 	uint32_t sc_tipg;		/* prototype TIPG register */
    577 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    578 	uint32_t sc_pba;		/* prototype PBA register */
    579 
    580 	int sc_tbi_linkup;		/* TBI link status */
    581 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    582 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    583 
    584 	int sc_mchash_type;		/* multicast filter offset */
    585 
    586 	krndsource_t rnd_source;	/* random source */
    587 
    588 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    589 
    590 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    591 	kmutex_t *sc_ich_phymtx;	/*
    592 					 * 82574/82583/ICH/PCH specific PHY
    593 					 * mutex. For 82574/82583, the mutex
    594 					 * is used for both PHY and NVM.
    595 					 */
    596 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    597 
    598 	struct wm_phyop phy;
    599 	struct wm_nvmop nvm;
    600 };
    601 
    602 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    603 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    604 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    605 
    606 #define	WM_RXCHAIN_RESET(rxq)						\
    607 do {									\
    608 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    609 	*(rxq)->rxq_tailp = NULL;					\
    610 	(rxq)->rxq_len = 0;						\
    611 } while (/*CONSTCOND*/0)
    612 
    613 #define	WM_RXCHAIN_LINK(rxq, m)						\
    614 do {									\
    615 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    616 	(rxq)->rxq_tailp = &(m)->m_next;				\
    617 } while (/*CONSTCOND*/0)
    618 
    619 #ifdef WM_EVENT_COUNTERS
    620 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    621 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    622 
    623 #define WM_Q_EVCNT_INCR(qname, evname)			\
    624 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    625 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    626 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    627 #else /* !WM_EVENT_COUNTERS */
    628 #define	WM_EVCNT_INCR(ev)	/* nothing */
    629 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    630 
    631 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    632 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    633 #endif /* !WM_EVENT_COUNTERS */
    634 
    635 #define	CSR_READ(sc, reg)						\
    636 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    637 #define	CSR_WRITE(sc, reg, val)						\
    638 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    639 #define	CSR_WRITE_FLUSH(sc)						\
    640 	(void) CSR_READ((sc), WMREG_STATUS)
    641 
    642 #define ICH8_FLASH_READ32(sc, reg)					\
    643 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    644 	    (reg) + sc->sc_flashreg_offset)
    645 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    646 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    647 	    (reg) + sc->sc_flashreg_offset, (data))
    648 
    649 #define ICH8_FLASH_READ16(sc, reg)					\
    650 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    651 	    (reg) + sc->sc_flashreg_offset)
    652 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    653 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    654 	    (reg) + sc->sc_flashreg_offset, (data))
    655 
    656 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    657 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    658 
    659 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    660 #define	WM_CDTXADDR_HI(txq, x)						\
    661 	(sizeof(bus_addr_t) == 8 ?					\
    662 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    663 
    664 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    665 #define	WM_CDRXADDR_HI(rxq, x)						\
    666 	(sizeof(bus_addr_t) == 8 ?					\
    667 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    668 
    669 /*
    670  * Register read/write functions.
    671  * Other than CSR_{READ|WRITE}().
    672  */
    673 #if 0
    674 static inline uint32_t wm_io_read(struct wm_softc *, int);
    675 #endif
    676 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    677 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    678     uint32_t, uint32_t);
    679 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    680 
    681 /*
    682  * Descriptor sync/init functions.
    683  */
    684 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    685 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    686 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    687 
    688 /*
    689  * Device driver interface functions and commonly used functions.
    690  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    691  */
    692 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    693 static int	wm_match(device_t, cfdata_t, void *);
    694 static void	wm_attach(device_t, device_t, void *);
    695 static int	wm_detach(device_t, int);
    696 static bool	wm_suspend(device_t, const pmf_qual_t *);
    697 static bool	wm_resume(device_t, const pmf_qual_t *);
    698 static void	wm_watchdog(struct ifnet *);
    699 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    700     uint16_t *);
    701 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    702     uint16_t *);
    703 static void	wm_tick(void *);
    704 static int	wm_ifflags_cb(struct ethercom *);
    705 static int	wm_ioctl(struct ifnet *, u_long, void *);
    706 /* MAC address related */
    707 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    708 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    709 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    710 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    711 static void	wm_set_filter(struct wm_softc *);
    712 /* Reset and init related */
    713 static void	wm_set_vlan(struct wm_softc *);
    714 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    715 static void	wm_get_auto_rd_done(struct wm_softc *);
    716 static void	wm_lan_init_done(struct wm_softc *);
    717 static void	wm_get_cfg_done(struct wm_softc *);
    718 static void	wm_phy_post_reset(struct wm_softc *);
    719 static void	wm_write_smbus_addr(struct wm_softc *);
    720 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    721 static void	wm_initialize_hardware_bits(struct wm_softc *);
    722 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    723 static void	wm_reset_phy(struct wm_softc *);
    724 static void	wm_flush_desc_rings(struct wm_softc *);
    725 static void	wm_reset(struct wm_softc *);
    726 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    727 static void	wm_rxdrain(struct wm_rxqueue *);
    728 static void	wm_init_rss(struct wm_softc *);
    729 static void	wm_adjust_qnum(struct wm_softc *, int);
    730 static inline bool	wm_is_using_msix(struct wm_softc *);
    731 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    732 static int	wm_softint_establish(struct wm_softc *, int, int);
    733 static int	wm_setup_legacy(struct wm_softc *);
    734 static int	wm_setup_msix(struct wm_softc *);
    735 static int	wm_init(struct ifnet *);
    736 static int	wm_init_locked(struct ifnet *);
    737 static void	wm_unset_stopping_flags(struct wm_softc *);
    738 static void	wm_set_stopping_flags(struct wm_softc *);
    739 static void	wm_stop(struct ifnet *, int);
    740 static void	wm_stop_locked(struct ifnet *, int);
    741 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    742 static void	wm_82547_txfifo_stall(void *);
    743 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    744 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    745 /* DMA related */
    746 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    747 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    748 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    749 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    750     struct wm_txqueue *);
    751 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    752 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    753 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    754     struct wm_rxqueue *);
    755 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    756 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    757 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    758 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    759 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    760 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    761 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    762     struct wm_txqueue *);
    763 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    764     struct wm_rxqueue *);
    765 static int	wm_alloc_txrx_queues(struct wm_softc *);
    766 static void	wm_free_txrx_queues(struct wm_softc *);
    767 static int	wm_init_txrx_queues(struct wm_softc *);
    768 /* Start */
    769 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    770     struct wm_txsoft *, uint32_t *, uint8_t *);
    771 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    772 static void	wm_start(struct ifnet *);
    773 static void	wm_start_locked(struct ifnet *);
    774 static int	wm_transmit(struct ifnet *, struct mbuf *);
    775 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    776 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    777     bool);
    778 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    779     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    780 static void	wm_nq_start(struct ifnet *);
    781 static void	wm_nq_start_locked(struct ifnet *);
    782 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    783 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    784 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    785     bool);
    786 static void	wm_deferred_start_locked(struct wm_txqueue *);
    787 static void	wm_handle_queue(void *);
    788 /* Interrupt */
    789 static bool	wm_txeof(struct wm_txqueue *, u_int);
    790 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    791 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    792 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    793 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    794 static void	wm_linkintr(struct wm_softc *, uint32_t);
    795 static int	wm_intr_legacy(void *);
    796 static inline void	wm_txrxintr_disable(struct wm_queue *);
    797 static inline void	wm_txrxintr_enable(struct wm_queue *);
    798 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    799 static int	wm_txrxintr_msix(void *);
    800 static int	wm_linkintr_msix(void *);
    801 
    802 /*
    803  * Media related.
    804  * GMII, SGMII, TBI, SERDES and SFP.
    805  */
    806 /* Common */
    807 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    808 /* GMII related */
    809 static void	wm_gmii_reset(struct wm_softc *);
    810 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    811 static int	wm_get_phy_id_82575(struct wm_softc *);
    812 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    813 static int	wm_gmii_mediachange(struct ifnet *);
    814 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    815 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    816 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    817 static int	wm_gmii_i82543_readreg(device_t, int, int);
    818 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    819 static int	wm_gmii_mdic_readreg(device_t, int, int);
    820 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    821 static int	wm_gmii_i82544_readreg(device_t, int, int);
    822 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    823 static int	wm_gmii_i80003_readreg(device_t, int, int);
    824 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    825 static int	wm_gmii_bm_readreg(device_t, int, int);
    826 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    827 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    828 static int	wm_gmii_hv_readreg(device_t, int, int);
    829 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    830 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    831 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    832 static int	wm_gmii_82580_readreg(device_t, int, int);
    833 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    834 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    835 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    836 static void	wm_gmii_statchg(struct ifnet *);
    837 /*
    838  * kumeran related (80003, ICH* and PCH*).
    839  * These functions are not for accessing MII registers but for accessing
    840  * kumeran specific registers.
    841  */
    842 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    843 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    844 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    845 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    846 /* SGMII */
    847 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    848 static int	wm_sgmii_readreg(device_t, int, int);
    849 static void	wm_sgmii_writereg(device_t, int, int, int);
    850 /* TBI related */
    851 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    852 static void	wm_tbi_mediainit(struct wm_softc *);
    853 static int	wm_tbi_mediachange(struct ifnet *);
    854 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    855 static int	wm_check_for_link(struct wm_softc *);
    856 static void	wm_tbi_tick(struct wm_softc *);
    857 /* SERDES related */
    858 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    859 static int	wm_serdes_mediachange(struct ifnet *);
    860 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    861 static void	wm_serdes_tick(struct wm_softc *);
    862 /* SFP related */
    863 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    864 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    865 
    866 /*
    867  * NVM related.
    868  * Microwire, SPI (w/wo EERD) and Flash.
    869  */
    870 /* Misc functions */
    871 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    872 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    873 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    874 /* Microwire */
    875 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    876 /* SPI */
    877 static int	wm_nvm_ready_spi(struct wm_softc *);
    878 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    879 /* Using with EERD */
    880 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    881 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    882 /* Flash */
    883 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    884     unsigned int *);
    885 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    886 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    887 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    888     uint32_t *);
    889 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    890 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    891 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    892 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    893 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    894 /* iNVM */
    895 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    896 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    897 /* Lock, detecting NVM type, validate checksum and read */
    898 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    899 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    900 static int	wm_nvm_validate_checksum(struct wm_softc *);
    901 static void	wm_nvm_version_invm(struct wm_softc *);
    902 static void	wm_nvm_version(struct wm_softc *);
    903 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    904 
    905 /*
    906  * Hardware semaphores.
    907  * Very complexed...
    908  */
    909 static int	wm_get_null(struct wm_softc *);
    910 static void	wm_put_null(struct wm_softc *);
    911 static int	wm_get_eecd(struct wm_softc *);
    912 static void	wm_put_eecd(struct wm_softc *);
    913 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    914 static void	wm_put_swsm_semaphore(struct wm_softc *);
    915 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    916 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    917 static int	wm_get_nvm_80003(struct wm_softc *);
    918 static void	wm_put_nvm_80003(struct wm_softc *);
    919 static int	wm_get_nvm_82571(struct wm_softc *);
    920 static void	wm_put_nvm_82571(struct wm_softc *);
    921 static int	wm_get_phy_82575(struct wm_softc *);
    922 static void	wm_put_phy_82575(struct wm_softc *);
    923 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    924 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    925 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    926 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    927 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    928 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    929 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    930 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    931 
    932 /*
    933  * Management mode and power management related subroutines.
    934  * BMC, AMT, suspend/resume and EEE.
    935  */
    936 #if 0
    937 static int	wm_check_mng_mode(struct wm_softc *);
    938 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    939 static int	wm_check_mng_mode_82574(struct wm_softc *);
    940 static int	wm_check_mng_mode_generic(struct wm_softc *);
    941 #endif
    942 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    943 static bool	wm_phy_resetisblocked(struct wm_softc *);
    944 static void	wm_get_hw_control(struct wm_softc *);
    945 static void	wm_release_hw_control(struct wm_softc *);
    946 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    947 static void	wm_smbustopci(struct wm_softc *);
    948 static void	wm_init_manageability(struct wm_softc *);
    949 static void	wm_release_manageability(struct wm_softc *);
    950 static void	wm_get_wakeup(struct wm_softc *);
    951 static void	wm_ulp_disable(struct wm_softc *);
    952 static void	wm_enable_phy_wakeup(struct wm_softc *);
    953 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    954 static void	wm_enable_wakeup(struct wm_softc *);
    955 static void	wm_disable_aspm(struct wm_softc *);
    956 /* LPLU (Low Power Link Up) */
    957 static void	wm_lplu_d0_disable(struct wm_softc *);
    958 /* EEE */
    959 static void	wm_set_eee_i350(struct wm_softc *);
    960 
    961 /*
    962  * Workarounds (mainly PHY related).
    963  * Basically, PHY's workarounds are in the PHY drivers.
    964  */
    965 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    966 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    967 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    968 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    969 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    970 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    971 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    972 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    973 static void	wm_reset_init_script_82575(struct wm_softc *);
    974 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    975 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    976 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    977 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    978 static void	wm_pll_workaround_i210(struct wm_softc *);
    979 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    980 
    981 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    982     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    983 
    984 /*
    985  * Devices supported by this driver.
    986  */
    987 static const struct wm_product {
    988 	pci_vendor_id_t		wmp_vendor;
    989 	pci_product_id_t	wmp_product;
    990 	const char		*wmp_name;
    991 	wm_chip_type		wmp_type;
    992 	uint32_t		wmp_flags;
    993 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    994 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    995 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    996 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    997 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    998 } wm_products[] = {
    999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1000 	  "Intel i82542 1000BASE-X Ethernet",
   1001 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1002 
   1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1004 	  "Intel i82543GC 1000BASE-X Ethernet",
   1005 	  WM_T_82543,		WMP_F_FIBER },
   1006 
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1008 	  "Intel i82543GC 1000BASE-T Ethernet",
   1009 	  WM_T_82543,		WMP_F_COPPER },
   1010 
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1012 	  "Intel i82544EI 1000BASE-T Ethernet",
   1013 	  WM_T_82544,		WMP_F_COPPER },
   1014 
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1016 	  "Intel i82544EI 1000BASE-X Ethernet",
   1017 	  WM_T_82544,		WMP_F_FIBER },
   1018 
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1020 	  "Intel i82544GC 1000BASE-T Ethernet",
   1021 	  WM_T_82544,		WMP_F_COPPER },
   1022 
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1024 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1025 	  WM_T_82544,		WMP_F_COPPER },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1028 	  "Intel i82540EM 1000BASE-T Ethernet",
   1029 	  WM_T_82540,		WMP_F_COPPER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1032 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1033 	  WM_T_82540,		WMP_F_COPPER },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1036 	  "Intel i82540EP 1000BASE-T Ethernet",
   1037 	  WM_T_82540,		WMP_F_COPPER },
   1038 
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1040 	  "Intel i82540EP 1000BASE-T Ethernet",
   1041 	  WM_T_82540,		WMP_F_COPPER },
   1042 
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1044 	  "Intel i82540EP 1000BASE-T Ethernet",
   1045 	  WM_T_82540,		WMP_F_COPPER },
   1046 
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1048 	  "Intel i82545EM 1000BASE-T Ethernet",
   1049 	  WM_T_82545,		WMP_F_COPPER },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1052 	  "Intel i82545GM 1000BASE-T Ethernet",
   1053 	  WM_T_82545_3,		WMP_F_COPPER },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1056 	  "Intel i82545GM 1000BASE-X Ethernet",
   1057 	  WM_T_82545_3,		WMP_F_FIBER },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1060 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1061 	  WM_T_82545_3,		WMP_F_SERDES },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1064 	  "Intel i82546EB 1000BASE-T Ethernet",
   1065 	  WM_T_82546,		WMP_F_COPPER },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1068 	  "Intel i82546EB 1000BASE-T Ethernet",
   1069 	  WM_T_82546,		WMP_F_COPPER },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1072 	  "Intel i82545EM 1000BASE-X Ethernet",
   1073 	  WM_T_82545,		WMP_F_FIBER },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1076 	  "Intel i82546EB 1000BASE-X Ethernet",
   1077 	  WM_T_82546,		WMP_F_FIBER },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1080 	  "Intel i82546GB 1000BASE-T Ethernet",
   1081 	  WM_T_82546_3,		WMP_F_COPPER },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1084 	  "Intel i82546GB 1000BASE-X Ethernet",
   1085 	  WM_T_82546_3,		WMP_F_FIBER },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1088 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1089 	  WM_T_82546_3,		WMP_F_SERDES },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1092 	  "i82546GB quad-port Gigabit Ethernet",
   1093 	  WM_T_82546_3,		WMP_F_COPPER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1096 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1097 	  WM_T_82546_3,		WMP_F_COPPER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1100 	  "Intel PRO/1000MT (82546GB)",
   1101 	  WM_T_82546_3,		WMP_F_COPPER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1104 	  "Intel i82541EI 1000BASE-T Ethernet",
   1105 	  WM_T_82541,		WMP_F_COPPER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1108 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1109 	  WM_T_82541,		WMP_F_COPPER },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1112 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1113 	  WM_T_82541,		WMP_F_COPPER },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1116 	  "Intel i82541ER 1000BASE-T Ethernet",
   1117 	  WM_T_82541_2,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1120 	  "Intel i82541GI 1000BASE-T Ethernet",
   1121 	  WM_T_82541_2,		WMP_F_COPPER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1124 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1125 	  WM_T_82541_2,		WMP_F_COPPER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1128 	  "Intel i82541PI 1000BASE-T Ethernet",
   1129 	  WM_T_82541_2,		WMP_F_COPPER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1132 	  "Intel i82547EI 1000BASE-T Ethernet",
   1133 	  WM_T_82547,		WMP_F_COPPER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1136 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1137 	  WM_T_82547,		WMP_F_COPPER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1140 	  "Intel i82547GI 1000BASE-T Ethernet",
   1141 	  WM_T_82547_2,		WMP_F_COPPER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1144 	  "Intel PRO/1000 PT (82571EB)",
   1145 	  WM_T_82571,		WMP_F_COPPER },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1148 	  "Intel PRO/1000 PF (82571EB)",
   1149 	  WM_T_82571,		WMP_F_FIBER },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1152 	  "Intel PRO/1000 PB (82571EB)",
   1153 	  WM_T_82571,		WMP_F_SERDES },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1156 	  "Intel PRO/1000 QT (82571EB)",
   1157 	  WM_T_82571,		WMP_F_COPPER },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1160 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1161 	  WM_T_82571,		WMP_F_COPPER, },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1164 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1165 	  WM_T_82571,		WMP_F_COPPER, },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1168 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1169 	  WM_T_82571,		WMP_F_SERDES, },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1172 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1173 	  WM_T_82571,		WMP_F_SERDES, },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1176 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1177 	  WM_T_82571,		WMP_F_FIBER, },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1180 	  "Intel i82572EI 1000baseT Ethernet",
   1181 	  WM_T_82572,		WMP_F_COPPER },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1184 	  "Intel i82572EI 1000baseX Ethernet",
   1185 	  WM_T_82572,		WMP_F_FIBER },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1188 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1189 	  WM_T_82572,		WMP_F_SERDES },
   1190 
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1192 	  "Intel i82572EI 1000baseT Ethernet",
   1193 	  WM_T_82572,		WMP_F_COPPER },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1196 	  "Intel i82573E",
   1197 	  WM_T_82573,		WMP_F_COPPER },
   1198 
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1200 	  "Intel i82573E IAMT",
   1201 	  WM_T_82573,		WMP_F_COPPER },
   1202 
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1204 	  "Intel i82573L Gigabit Ethernet",
   1205 	  WM_T_82573,		WMP_F_COPPER },
   1206 
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1208 	  "Intel i82574L",
   1209 	  WM_T_82574,		WMP_F_COPPER },
   1210 
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1212 	  "Intel i82574L",
   1213 	  WM_T_82574,		WMP_F_COPPER },
   1214 
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1216 	  "Intel i82583V",
   1217 	  WM_T_82583,		WMP_F_COPPER },
   1218 
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1220 	  "i80003 dual 1000baseT Ethernet",
   1221 	  WM_T_80003,		WMP_F_COPPER },
   1222 
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1224 	  "i80003 dual 1000baseX Ethernet",
   1225 	  WM_T_80003,		WMP_F_COPPER },
   1226 
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1228 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1229 	  WM_T_80003,		WMP_F_SERDES },
   1230 
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1232 	  "Intel i80003 1000baseT Ethernet",
   1233 	  WM_T_80003,		WMP_F_COPPER },
   1234 
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1236 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1237 	  WM_T_80003,		WMP_F_SERDES },
   1238 
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1240 	  "Intel i82801H (M_AMT) LAN Controller",
   1241 	  WM_T_ICH8,		WMP_F_COPPER },
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1243 	  "Intel i82801H (AMT) LAN Controller",
   1244 	  WM_T_ICH8,		WMP_F_COPPER },
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1246 	  "Intel i82801H LAN Controller",
   1247 	  WM_T_ICH8,		WMP_F_COPPER },
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1249 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1250 	  WM_T_ICH8,		WMP_F_COPPER },
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1252 	  "Intel i82801H (M) LAN Controller",
   1253 	  WM_T_ICH8,		WMP_F_COPPER },
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1255 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1256 	  WM_T_ICH8,		WMP_F_COPPER },
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1258 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1259 	  WM_T_ICH8,		WMP_F_COPPER },
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1261 	  "82567V-3 LAN Controller",
   1262 	  WM_T_ICH8,		WMP_F_COPPER },
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1264 	  "82801I (AMT) LAN Controller",
   1265 	  WM_T_ICH9,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1267 	  "82801I 10/100 LAN Controller",
   1268 	  WM_T_ICH9,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1270 	  "82801I (G) 10/100 LAN Controller",
   1271 	  WM_T_ICH9,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1273 	  "82801I (GT) 10/100 LAN Controller",
   1274 	  WM_T_ICH9,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1276 	  "82801I (C) LAN Controller",
   1277 	  WM_T_ICH9,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1279 	  "82801I mobile LAN Controller",
   1280 	  WM_T_ICH9,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1282 	  "82801I mobile (V) LAN Controller",
   1283 	  WM_T_ICH9,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1285 	  "82801I mobile (AMT) LAN Controller",
   1286 	  WM_T_ICH9,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1288 	  "82567LM-4 LAN Controller",
   1289 	  WM_T_ICH9,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1291 	  "82567LM-2 LAN Controller",
   1292 	  WM_T_ICH10,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1294 	  "82567LF-2 LAN Controller",
   1295 	  WM_T_ICH10,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1297 	  "82567LM-3 LAN Controller",
   1298 	  WM_T_ICH10,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1300 	  "82567LF-3 LAN Controller",
   1301 	  WM_T_ICH10,		WMP_F_COPPER },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1303 	  "82567V-2 LAN Controller",
   1304 	  WM_T_ICH10,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1306 	  "82567V-3? LAN Controller",
   1307 	  WM_T_ICH10,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1309 	  "HANKSVILLE LAN Controller",
   1310 	  WM_T_ICH10,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1312 	  "PCH LAN (82577LM) Controller",
   1313 	  WM_T_PCH,		WMP_F_COPPER },
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1315 	  "PCH LAN (82577LC) Controller",
   1316 	  WM_T_PCH,		WMP_F_COPPER },
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1318 	  "PCH LAN (82578DM) Controller",
   1319 	  WM_T_PCH,		WMP_F_COPPER },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1321 	  "PCH LAN (82578DC) Controller",
   1322 	  WM_T_PCH,		WMP_F_COPPER },
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1324 	  "PCH2 LAN (82579LM) Controller",
   1325 	  WM_T_PCH2,		WMP_F_COPPER },
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1327 	  "PCH2 LAN (82579V) Controller",
   1328 	  WM_T_PCH2,		WMP_F_COPPER },
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1330 	  "82575EB dual-1000baseT Ethernet",
   1331 	  WM_T_82575,		WMP_F_COPPER },
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1333 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1334 	  WM_T_82575,		WMP_F_SERDES },
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1336 	  "82575GB quad-1000baseT Ethernet",
   1337 	  WM_T_82575,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1339 	  "82575GB quad-1000baseT Ethernet (PM)",
   1340 	  WM_T_82575,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1342 	  "82576 1000BaseT Ethernet",
   1343 	  WM_T_82576,		WMP_F_COPPER },
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1345 	  "82576 1000BaseX Ethernet",
   1346 	  WM_T_82576,		WMP_F_FIBER },
   1347 
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1349 	  "82576 gigabit Ethernet (SERDES)",
   1350 	  WM_T_82576,		WMP_F_SERDES },
   1351 
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1353 	  "82576 quad-1000BaseT Ethernet",
   1354 	  WM_T_82576,		WMP_F_COPPER },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1357 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1358 	  WM_T_82576,		WMP_F_COPPER },
   1359 
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1361 	  "82576 gigabit Ethernet",
   1362 	  WM_T_82576,		WMP_F_COPPER },
   1363 
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1365 	  "82576 gigabit Ethernet (SERDES)",
   1366 	  WM_T_82576,		WMP_F_SERDES },
   1367 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1368 	  "82576 quad-gigabit Ethernet (SERDES)",
   1369 	  WM_T_82576,		WMP_F_SERDES },
   1370 
   1371 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1372 	  "82580 1000BaseT Ethernet",
   1373 	  WM_T_82580,		WMP_F_COPPER },
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1375 	  "82580 1000BaseX Ethernet",
   1376 	  WM_T_82580,		WMP_F_FIBER },
   1377 
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1379 	  "82580 1000BaseT Ethernet (SERDES)",
   1380 	  WM_T_82580,		WMP_F_SERDES },
   1381 
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1383 	  "82580 gigabit Ethernet (SGMII)",
   1384 	  WM_T_82580,		WMP_F_COPPER },
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1386 	  "82580 dual-1000BaseT Ethernet",
   1387 	  WM_T_82580,		WMP_F_COPPER },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1390 	  "82580 quad-1000BaseX Ethernet",
   1391 	  WM_T_82580,		WMP_F_FIBER },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1394 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1395 	  WM_T_82580,		WMP_F_COPPER },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1398 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1399 	  WM_T_82580,		WMP_F_SERDES },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1402 	  "DH89XXCC 1000BASE-KX Ethernet",
   1403 	  WM_T_82580,		WMP_F_SERDES },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1406 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1407 	  WM_T_82580,		WMP_F_SERDES },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1410 	  "I350 Gigabit Network Connection",
   1411 	  WM_T_I350,		WMP_F_COPPER },
   1412 
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1414 	  "I350 Gigabit Fiber Network Connection",
   1415 	  WM_T_I350,		WMP_F_FIBER },
   1416 
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1418 	  "I350 Gigabit Backplane Connection",
   1419 	  WM_T_I350,		WMP_F_SERDES },
   1420 
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1422 	  "I350 Quad Port Gigabit Ethernet",
   1423 	  WM_T_I350,		WMP_F_SERDES },
   1424 
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1426 	  "I350 Gigabit Connection",
   1427 	  WM_T_I350,		WMP_F_COPPER },
   1428 
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1430 	  "I354 Gigabit Ethernet (KX)",
   1431 	  WM_T_I354,		WMP_F_SERDES },
   1432 
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1434 	  "I354 Gigabit Ethernet (SGMII)",
   1435 	  WM_T_I354,		WMP_F_COPPER },
   1436 
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1438 	  "I354 Gigabit Ethernet (2.5G)",
   1439 	  WM_T_I354,		WMP_F_COPPER },
   1440 
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1442 	  "I210-T1 Ethernet Server Adapter",
   1443 	  WM_T_I210,		WMP_F_COPPER },
   1444 
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1446 	  "I210 Ethernet (Copper OEM)",
   1447 	  WM_T_I210,		WMP_F_COPPER },
   1448 
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1450 	  "I210 Ethernet (Copper IT)",
   1451 	  WM_T_I210,		WMP_F_COPPER },
   1452 
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1454 	  "I210 Ethernet (FLASH less)",
   1455 	  WM_T_I210,		WMP_F_COPPER },
   1456 
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1458 	  "I210 Gigabit Ethernet (Fiber)",
   1459 	  WM_T_I210,		WMP_F_FIBER },
   1460 
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1462 	  "I210 Gigabit Ethernet (SERDES)",
   1463 	  WM_T_I210,		WMP_F_SERDES },
   1464 
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1466 	  "I210 Gigabit Ethernet (FLASH less)",
   1467 	  WM_T_I210,		WMP_F_SERDES },
   1468 
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1470 	  "I210 Gigabit Ethernet (SGMII)",
   1471 	  WM_T_I210,		WMP_F_COPPER },
   1472 
   1473 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1474 	  "I211 Ethernet (COPPER)",
   1475 	  WM_T_I211,		WMP_F_COPPER },
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1477 	  "I217 V Ethernet Connection",
   1478 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1480 	  "I217 LM Ethernet Connection",
   1481 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1483 	  "I218 V Ethernet Connection",
   1484 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1486 	  "I218 V Ethernet Connection",
   1487 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1488 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1489 	  "I218 V Ethernet Connection",
   1490 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1492 	  "I218 LM Ethernet Connection",
   1493 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1495 	  "I218 LM Ethernet Connection",
   1496 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1497 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1498 	  "I218 LM Ethernet Connection",
   1499 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1500 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1501 	  "I219 V Ethernet Connection",
   1502 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1504 	  "I219 V Ethernet Connection",
   1505 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1507 	  "I219 V Ethernet Connection",
   1508 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1509 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1510 	  "I219 V Ethernet Connection",
   1511 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1512 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1513 	  "I219 LM Ethernet Connection",
   1514 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1516 	  "I219 LM Ethernet Connection",
   1517 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1519 	  "I219 LM Ethernet Connection",
   1520 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1522 	  "I219 LM Ethernet Connection",
   1523 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1525 	  "I219 LM Ethernet Connection",
   1526 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1528 	  "I219 V Ethernet Connection",
   1529 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1530 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1531 	  "I219 V Ethernet Connection",
   1532 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1534 	  "I219 LM Ethernet Connection",
   1535 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1537 	  "I219 LM Ethernet Connection",
   1538 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1539 	{ 0,			0,
   1540 	  NULL,
   1541 	  0,			0 },
   1542 };
   1543 
   1544 /*
   1545  * Register read/write functions.
   1546  * Other than CSR_{READ|WRITE}().
   1547  */
   1548 
   1549 #if 0 /* Not currently used */
   1550 static inline uint32_t
   1551 wm_io_read(struct wm_softc *sc, int reg)
   1552 {
   1553 
   1554 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1555 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1556 }
   1557 #endif
   1558 
   1559 static inline void
   1560 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1561 {
   1562 
   1563 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1564 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1565 }
   1566 
   1567 static inline void
   1568 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1569     uint32_t data)
   1570 {
   1571 	uint32_t regval;
   1572 	int i;
   1573 
   1574 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1575 
   1576 	CSR_WRITE(sc, reg, regval);
   1577 
   1578 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1579 		delay(5);
   1580 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1581 			break;
   1582 	}
   1583 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1584 		aprint_error("%s: WARNING:"
   1585 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1586 		    device_xname(sc->sc_dev), reg);
   1587 	}
   1588 }
   1589 
   1590 static inline void
   1591 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1592 {
   1593 	wa->wa_low = htole32(v & 0xffffffffU);
   1594 	if (sizeof(bus_addr_t) == 8)
   1595 		wa->wa_high = htole32((uint64_t) v >> 32);
   1596 	else
   1597 		wa->wa_high = 0;
   1598 }
   1599 
   1600 /*
   1601  * Descriptor sync/init functions.
   1602  */
   1603 static inline void
   1604 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1605 {
   1606 	struct wm_softc *sc = txq->txq_sc;
   1607 
   1608 	/* If it will wrap around, sync to the end of the ring. */
   1609 	if ((start + num) > WM_NTXDESC(txq)) {
   1610 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1611 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1612 		    (WM_NTXDESC(txq) - start), ops);
   1613 		num -= (WM_NTXDESC(txq) - start);
   1614 		start = 0;
   1615 	}
   1616 
   1617 	/* Now sync whatever is left. */
   1618 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1619 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1620 }
   1621 
   1622 static inline void
   1623 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1624 {
   1625 	struct wm_softc *sc = rxq->rxq_sc;
   1626 
   1627 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1628 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1629 }
   1630 
   1631 static inline void
   1632 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1633 {
   1634 	struct wm_softc *sc = rxq->rxq_sc;
   1635 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1636 	struct mbuf *m = rxs->rxs_mbuf;
   1637 
   1638 	/*
   1639 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1640 	 * so that the payload after the Ethernet header is aligned
   1641 	 * to a 4-byte boundary.
   1642 
   1643 	 * XXX BRAINDAMAGE ALERT!
   1644 	 * The stupid chip uses the same size for every buffer, which
   1645 	 * is set in the Receive Control register.  We are using the 2K
   1646 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1647 	 * reason, we can't "scoot" packets longer than the standard
   1648 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1649 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1650 	 * the upper layer copy the headers.
   1651 	 */
   1652 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1653 
   1654 	if (sc->sc_type == WM_T_82574) {
   1655 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1656 		rxd->erx_data.erxd_addr =
   1657 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1658 		rxd->erx_data.erxd_dd = 0;
   1659 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1660 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1661 
   1662 		rxd->nqrx_data.nrxd_paddr =
   1663 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1664 		/* Currently, split header is not supported. */
   1665 		rxd->nqrx_data.nrxd_haddr = 0;
   1666 	} else {
   1667 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1668 
   1669 		wm_set_dma_addr(&rxd->wrx_addr,
   1670 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1671 		rxd->wrx_len = 0;
   1672 		rxd->wrx_cksum = 0;
   1673 		rxd->wrx_status = 0;
   1674 		rxd->wrx_errors = 0;
   1675 		rxd->wrx_special = 0;
   1676 	}
   1677 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1678 
   1679 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1680 }
   1681 
   1682 /*
   1683  * Device driver interface functions and commonly used functions.
   1684  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1685  */
   1686 
   1687 /* Lookup supported device table */
   1688 static const struct wm_product *
   1689 wm_lookup(const struct pci_attach_args *pa)
   1690 {
   1691 	const struct wm_product *wmp;
   1692 
   1693 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1694 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1695 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1696 			return wmp;
   1697 	}
   1698 	return NULL;
   1699 }
   1700 
   1701 /* The match function (ca_match) */
   1702 static int
   1703 wm_match(device_t parent, cfdata_t cf, void *aux)
   1704 {
   1705 	struct pci_attach_args *pa = aux;
   1706 
   1707 	if (wm_lookup(pa) != NULL)
   1708 		return 1;
   1709 
   1710 	return 0;
   1711 }
   1712 
   1713 /* The attach function (ca_attach) */
   1714 static void
   1715 wm_attach(device_t parent, device_t self, void *aux)
   1716 {
   1717 	struct wm_softc *sc = device_private(self);
   1718 	struct pci_attach_args *pa = aux;
   1719 	prop_dictionary_t dict;
   1720 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1721 	pci_chipset_tag_t pc = pa->pa_pc;
   1722 	int counts[PCI_INTR_TYPE_SIZE];
   1723 	pci_intr_type_t max_type;
   1724 	const char *eetype, *xname;
   1725 	bus_space_tag_t memt;
   1726 	bus_space_handle_t memh;
   1727 	bus_size_t memsize;
   1728 	int memh_valid;
   1729 	int i, error;
   1730 	const struct wm_product *wmp;
   1731 	prop_data_t ea;
   1732 	prop_number_t pn;
   1733 	uint8_t enaddr[ETHER_ADDR_LEN];
   1734 	char buf[256];
   1735 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1736 	pcireg_t preg, memtype;
   1737 	uint16_t eeprom_data, apme_mask;
   1738 	bool force_clear_smbi;
   1739 	uint32_t link_mode;
   1740 	uint32_t reg;
   1741 
   1742 	sc->sc_dev = self;
   1743 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1744 	sc->sc_core_stopping = false;
   1745 
   1746 	wmp = wm_lookup(pa);
   1747 #ifdef DIAGNOSTIC
   1748 	if (wmp == NULL) {
   1749 		printf("\n");
   1750 		panic("wm_attach: impossible");
   1751 	}
   1752 #endif
   1753 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1754 
   1755 	sc->sc_pc = pa->pa_pc;
   1756 	sc->sc_pcitag = pa->pa_tag;
   1757 
   1758 	if (pci_dma64_available(pa))
   1759 		sc->sc_dmat = pa->pa_dmat64;
   1760 	else
   1761 		sc->sc_dmat = pa->pa_dmat;
   1762 
   1763 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1764 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1765 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1766 
   1767 	sc->sc_type = wmp->wmp_type;
   1768 
   1769 	/* Set default function pointers */
   1770 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1771 	sc->phy.release = sc->nvm.release = wm_put_null;
   1772 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1773 
   1774 	if (sc->sc_type < WM_T_82543) {
   1775 		if (sc->sc_rev < 2) {
   1776 			aprint_error_dev(sc->sc_dev,
   1777 			    "i82542 must be at least rev. 2\n");
   1778 			return;
   1779 		}
   1780 		if (sc->sc_rev < 3)
   1781 			sc->sc_type = WM_T_82542_2_0;
   1782 	}
   1783 
   1784 	/*
   1785 	 * Disable MSI for Errata:
   1786 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1787 	 *
   1788 	 *  82544: Errata 25
   1789 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1790 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1791 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1792 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1793 	 *
   1794 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1795 	 *
   1796 	 *  82571 & 82572: Errata 63
   1797 	 */
   1798 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1799 	    || (sc->sc_type == WM_T_82572))
   1800 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1801 
   1802 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1803 	    || (sc->sc_type == WM_T_82580)
   1804 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1805 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1806 		sc->sc_flags |= WM_F_NEWQUEUE;
   1807 
   1808 	/* Set device properties (mactype) */
   1809 	dict = device_properties(sc->sc_dev);
   1810 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1811 
   1812 	/*
   1813 	 * Map the device.  All devices support memory-mapped acccess,
   1814 	 * and it is really required for normal operation.
   1815 	 */
   1816 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1817 	switch (memtype) {
   1818 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1819 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1820 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1821 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1822 		break;
   1823 	default:
   1824 		memh_valid = 0;
   1825 		break;
   1826 	}
   1827 
   1828 	if (memh_valid) {
   1829 		sc->sc_st = memt;
   1830 		sc->sc_sh = memh;
   1831 		sc->sc_ss = memsize;
   1832 	} else {
   1833 		aprint_error_dev(sc->sc_dev,
   1834 		    "unable to map device registers\n");
   1835 		return;
   1836 	}
   1837 
   1838 	/*
   1839 	 * In addition, i82544 and later support I/O mapped indirect
   1840 	 * register access.  It is not desirable (nor supported in
   1841 	 * this driver) to use it for normal operation, though it is
   1842 	 * required to work around bugs in some chip versions.
   1843 	 */
   1844 	if (sc->sc_type >= WM_T_82544) {
   1845 		/* First we have to find the I/O BAR. */
   1846 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1847 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1848 			if (memtype == PCI_MAPREG_TYPE_IO)
   1849 				break;
   1850 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1851 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1852 				i += 4;	/* skip high bits, too */
   1853 		}
   1854 		if (i < PCI_MAPREG_END) {
   1855 			/*
   1856 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1857 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1858 			 * It's no problem because newer chips has no this
   1859 			 * bug.
   1860 			 *
   1861 			 * The i8254x doesn't apparently respond when the
   1862 			 * I/O BAR is 0, which looks somewhat like it's not
   1863 			 * been configured.
   1864 			 */
   1865 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1866 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1867 				aprint_error_dev(sc->sc_dev,
   1868 				    "WARNING: I/O BAR at zero.\n");
   1869 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1870 					0, &sc->sc_iot, &sc->sc_ioh,
   1871 					NULL, &sc->sc_ios) == 0) {
   1872 				sc->sc_flags |= WM_F_IOH_VALID;
   1873 			} else
   1874 				aprint_error_dev(sc->sc_dev,
   1875 				    "WARNING: unable to map I/O space\n");
   1876 		}
   1877 
   1878 	}
   1879 
   1880 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1881 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1882 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1883 	if (sc->sc_type < WM_T_82542_2_1)
   1884 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1885 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1886 
   1887 	/* power up chip */
   1888 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1889 	    && error != EOPNOTSUPP) {
   1890 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1891 		return;
   1892 	}
   1893 
   1894 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1895 	/*
   1896 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1897 	 * resource.
   1898 	 */
   1899 	if (sc->sc_nqueues > 1) {
   1900 		max_type = PCI_INTR_TYPE_MSIX;
   1901 		/*
   1902 		 *  82583 has a MSI-X capability in the PCI configuration space
   1903 		 * but it doesn't support it. At least the document doesn't
   1904 		 * say anything about MSI-X.
   1905 		 */
   1906 		counts[PCI_INTR_TYPE_MSIX]
   1907 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1908 	} else {
   1909 		max_type = PCI_INTR_TYPE_MSI;
   1910 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1911 	}
   1912 
   1913 	/* Allocation settings */
   1914 	counts[PCI_INTR_TYPE_MSI] = 1;
   1915 	counts[PCI_INTR_TYPE_INTX] = 1;
   1916 	/* overridden by disable flags */
   1917 	if (wm_disable_msi != 0) {
   1918 		counts[PCI_INTR_TYPE_MSI] = 0;
   1919 		if (wm_disable_msix != 0) {
   1920 			max_type = PCI_INTR_TYPE_INTX;
   1921 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1922 		}
   1923 	} else if (wm_disable_msix != 0) {
   1924 		max_type = PCI_INTR_TYPE_MSI;
   1925 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1926 	}
   1927 
   1928 alloc_retry:
   1929 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1930 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1931 		return;
   1932 	}
   1933 
   1934 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1935 		error = wm_setup_msix(sc);
   1936 		if (error) {
   1937 			pci_intr_release(pc, sc->sc_intrs,
   1938 			    counts[PCI_INTR_TYPE_MSIX]);
   1939 
   1940 			/* Setup for MSI: Disable MSI-X */
   1941 			max_type = PCI_INTR_TYPE_MSI;
   1942 			counts[PCI_INTR_TYPE_MSI] = 1;
   1943 			counts[PCI_INTR_TYPE_INTX] = 1;
   1944 			goto alloc_retry;
   1945 		}
   1946 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1947 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1948 		error = wm_setup_legacy(sc);
   1949 		if (error) {
   1950 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1951 			    counts[PCI_INTR_TYPE_MSI]);
   1952 
   1953 			/* The next try is for INTx: Disable MSI */
   1954 			max_type = PCI_INTR_TYPE_INTX;
   1955 			counts[PCI_INTR_TYPE_INTX] = 1;
   1956 			goto alloc_retry;
   1957 		}
   1958 	} else {
   1959 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1960 		error = wm_setup_legacy(sc);
   1961 		if (error) {
   1962 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1963 			    counts[PCI_INTR_TYPE_INTX]);
   1964 			return;
   1965 		}
   1966 	}
   1967 
   1968 	/*
   1969 	 * Check the function ID (unit number of the chip).
   1970 	 */
   1971 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1972 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   1973 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1974 	    || (sc->sc_type == WM_T_82580)
   1975 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1976 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1977 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1978 	else
   1979 		sc->sc_funcid = 0;
   1980 
   1981 	/*
   1982 	 * Determine a few things about the bus we're connected to.
   1983 	 */
   1984 	if (sc->sc_type < WM_T_82543) {
   1985 		/* We don't really know the bus characteristics here. */
   1986 		sc->sc_bus_speed = 33;
   1987 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1988 		/*
   1989 		 * CSA (Communication Streaming Architecture) is about as fast
   1990 		 * a 32-bit 66MHz PCI Bus.
   1991 		 */
   1992 		sc->sc_flags |= WM_F_CSA;
   1993 		sc->sc_bus_speed = 66;
   1994 		aprint_verbose_dev(sc->sc_dev,
   1995 		    "Communication Streaming Architecture\n");
   1996 		if (sc->sc_type == WM_T_82547) {
   1997 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1998 			callout_setfunc(&sc->sc_txfifo_ch,
   1999 			    wm_82547_txfifo_stall, sc);
   2000 			aprint_verbose_dev(sc->sc_dev,
   2001 			    "using 82547 Tx FIFO stall work-around\n");
   2002 		}
   2003 	} else if (sc->sc_type >= WM_T_82571) {
   2004 		sc->sc_flags |= WM_F_PCIE;
   2005 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2006 		    && (sc->sc_type != WM_T_ICH10)
   2007 		    && (sc->sc_type != WM_T_PCH)
   2008 		    && (sc->sc_type != WM_T_PCH2)
   2009 		    && (sc->sc_type != WM_T_PCH_LPT)
   2010 		    && (sc->sc_type != WM_T_PCH_SPT)
   2011 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2012 			/* ICH* and PCH* have no PCIe capability registers */
   2013 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2014 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2015 				NULL) == 0)
   2016 				aprint_error_dev(sc->sc_dev,
   2017 				    "unable to find PCIe capability\n");
   2018 		}
   2019 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2020 	} else {
   2021 		reg = CSR_READ(sc, WMREG_STATUS);
   2022 		if (reg & STATUS_BUS64)
   2023 			sc->sc_flags |= WM_F_BUS64;
   2024 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2025 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2026 
   2027 			sc->sc_flags |= WM_F_PCIX;
   2028 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2029 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2030 				aprint_error_dev(sc->sc_dev,
   2031 				    "unable to find PCIX capability\n");
   2032 			else if (sc->sc_type != WM_T_82545_3 &&
   2033 				 sc->sc_type != WM_T_82546_3) {
   2034 				/*
   2035 				 * Work around a problem caused by the BIOS
   2036 				 * setting the max memory read byte count
   2037 				 * incorrectly.
   2038 				 */
   2039 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2040 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2041 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2042 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2043 
   2044 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2045 				    PCIX_CMD_BYTECNT_SHIFT;
   2046 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2047 				    PCIX_STATUS_MAXB_SHIFT;
   2048 				if (bytecnt > maxb) {
   2049 					aprint_verbose_dev(sc->sc_dev,
   2050 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2051 					    512 << bytecnt, 512 << maxb);
   2052 					pcix_cmd = (pcix_cmd &
   2053 					    ~PCIX_CMD_BYTECNT_MASK) |
   2054 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2055 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2056 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2057 					    pcix_cmd);
   2058 				}
   2059 			}
   2060 		}
   2061 		/*
   2062 		 * The quad port adapter is special; it has a PCIX-PCIX
   2063 		 * bridge on the board, and can run the secondary bus at
   2064 		 * a higher speed.
   2065 		 */
   2066 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2067 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2068 								      : 66;
   2069 		} else if (sc->sc_flags & WM_F_PCIX) {
   2070 			switch (reg & STATUS_PCIXSPD_MASK) {
   2071 			case STATUS_PCIXSPD_50_66:
   2072 				sc->sc_bus_speed = 66;
   2073 				break;
   2074 			case STATUS_PCIXSPD_66_100:
   2075 				sc->sc_bus_speed = 100;
   2076 				break;
   2077 			case STATUS_PCIXSPD_100_133:
   2078 				sc->sc_bus_speed = 133;
   2079 				break;
   2080 			default:
   2081 				aprint_error_dev(sc->sc_dev,
   2082 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2083 				    reg & STATUS_PCIXSPD_MASK);
   2084 				sc->sc_bus_speed = 66;
   2085 				break;
   2086 			}
   2087 		} else
   2088 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2089 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2090 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2091 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2092 	}
   2093 
   2094 	/* Disable ASPM L0s and/or L1 for workaround */
   2095 	wm_disable_aspm(sc);
   2096 
   2097 	/* clear interesting stat counters */
   2098 	CSR_READ(sc, WMREG_COLC);
   2099 	CSR_READ(sc, WMREG_RXERRC);
   2100 
   2101 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2102 	    || (sc->sc_type >= WM_T_ICH8))
   2103 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2104 	if (sc->sc_type >= WM_T_ICH8)
   2105 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2106 
   2107 	/* Set PHY, NVM mutex related stuff */
   2108 	switch (sc->sc_type) {
   2109 	case WM_T_82542_2_0:
   2110 	case WM_T_82542_2_1:
   2111 	case WM_T_82543:
   2112 	case WM_T_82544:
   2113 		/* Microwire */
   2114 		sc->nvm.read = wm_nvm_read_uwire;
   2115 		sc->sc_nvm_wordsize = 64;
   2116 		sc->sc_nvm_addrbits = 6;
   2117 		break;
   2118 	case WM_T_82540:
   2119 	case WM_T_82545:
   2120 	case WM_T_82545_3:
   2121 	case WM_T_82546:
   2122 	case WM_T_82546_3:
   2123 		/* Microwire */
   2124 		sc->nvm.read = wm_nvm_read_uwire;
   2125 		reg = CSR_READ(sc, WMREG_EECD);
   2126 		if (reg & EECD_EE_SIZE) {
   2127 			sc->sc_nvm_wordsize = 256;
   2128 			sc->sc_nvm_addrbits = 8;
   2129 		} else {
   2130 			sc->sc_nvm_wordsize = 64;
   2131 			sc->sc_nvm_addrbits = 6;
   2132 		}
   2133 		sc->sc_flags |= WM_F_LOCK_EECD;
   2134 		sc->nvm.acquire = wm_get_eecd;
   2135 		sc->nvm.release = wm_put_eecd;
   2136 		break;
   2137 	case WM_T_82541:
   2138 	case WM_T_82541_2:
   2139 	case WM_T_82547:
   2140 	case WM_T_82547_2:
   2141 		reg = CSR_READ(sc, WMREG_EECD);
   2142 		/*
   2143 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2144 		 * on 8254[17], so set flags and functios before calling it.
   2145 		 */
   2146 		sc->sc_flags |= WM_F_LOCK_EECD;
   2147 		sc->nvm.acquire = wm_get_eecd;
   2148 		sc->nvm.release = wm_put_eecd;
   2149 		if (reg & EECD_EE_TYPE) {
   2150 			/* SPI */
   2151 			sc->nvm.read = wm_nvm_read_spi;
   2152 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2153 			wm_nvm_set_addrbits_size_eecd(sc);
   2154 		} else {
   2155 			/* Microwire */
   2156 			sc->nvm.read = wm_nvm_read_uwire;
   2157 			if ((reg & EECD_EE_ABITS) != 0) {
   2158 				sc->sc_nvm_wordsize = 256;
   2159 				sc->sc_nvm_addrbits = 8;
   2160 			} else {
   2161 				sc->sc_nvm_wordsize = 64;
   2162 				sc->sc_nvm_addrbits = 6;
   2163 			}
   2164 		}
   2165 		break;
   2166 	case WM_T_82571:
   2167 	case WM_T_82572:
   2168 		/* SPI */
   2169 		sc->nvm.read = wm_nvm_read_eerd;
   2170 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2171 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2172 		wm_nvm_set_addrbits_size_eecd(sc);
   2173 		sc->phy.acquire = wm_get_swsm_semaphore;
   2174 		sc->phy.release = wm_put_swsm_semaphore;
   2175 		sc->nvm.acquire = wm_get_nvm_82571;
   2176 		sc->nvm.release = wm_put_nvm_82571;
   2177 		break;
   2178 	case WM_T_82573:
   2179 	case WM_T_82574:
   2180 	case WM_T_82583:
   2181 		sc->nvm.read = wm_nvm_read_eerd;
   2182 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2183 		if (sc->sc_type == WM_T_82573) {
   2184 			sc->phy.acquire = wm_get_swsm_semaphore;
   2185 			sc->phy.release = wm_put_swsm_semaphore;
   2186 			sc->nvm.acquire = wm_get_nvm_82571;
   2187 			sc->nvm.release = wm_put_nvm_82571;
   2188 		} else {
   2189 			/* Both PHY and NVM use the same semaphore. */
   2190 			sc->phy.acquire = sc->nvm.acquire
   2191 			    = wm_get_swfwhw_semaphore;
   2192 			sc->phy.release = sc->nvm.release
   2193 			    = wm_put_swfwhw_semaphore;
   2194 		}
   2195 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2196 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2197 			sc->sc_nvm_wordsize = 2048;
   2198 		} else {
   2199 			/* SPI */
   2200 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2201 			wm_nvm_set_addrbits_size_eecd(sc);
   2202 		}
   2203 		break;
   2204 	case WM_T_82575:
   2205 	case WM_T_82576:
   2206 	case WM_T_82580:
   2207 	case WM_T_I350:
   2208 	case WM_T_I354:
   2209 	case WM_T_80003:
   2210 		/* SPI */
   2211 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2212 		wm_nvm_set_addrbits_size_eecd(sc);
   2213 		if ((sc->sc_type == WM_T_80003)
   2214 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2215 			sc->nvm.read = wm_nvm_read_eerd;
   2216 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2217 		} else {
   2218 			sc->nvm.read = wm_nvm_read_spi;
   2219 			sc->sc_flags |= WM_F_LOCK_EECD;
   2220 		}
   2221 		sc->phy.acquire = wm_get_phy_82575;
   2222 		sc->phy.release = wm_put_phy_82575;
   2223 		sc->nvm.acquire = wm_get_nvm_80003;
   2224 		sc->nvm.release = wm_put_nvm_80003;
   2225 		break;
   2226 	case WM_T_ICH8:
   2227 	case WM_T_ICH9:
   2228 	case WM_T_ICH10:
   2229 	case WM_T_PCH:
   2230 	case WM_T_PCH2:
   2231 	case WM_T_PCH_LPT:
   2232 		sc->nvm.read = wm_nvm_read_ich8;
   2233 		/* FLASH */
   2234 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2235 		sc->sc_nvm_wordsize = 2048;
   2236 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2237 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2238 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2239 			aprint_error_dev(sc->sc_dev,
   2240 			    "can't map FLASH registers\n");
   2241 			goto out;
   2242 		}
   2243 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2244 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2245 		    ICH_FLASH_SECTOR_SIZE;
   2246 		sc->sc_ich8_flash_bank_size =
   2247 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2248 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2249 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2250 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2251 		sc->sc_flashreg_offset = 0;
   2252 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2253 		sc->phy.release = wm_put_swflag_ich8lan;
   2254 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2255 		sc->nvm.release = wm_put_nvm_ich8lan;
   2256 		break;
   2257 	case WM_T_PCH_SPT:
   2258 	case WM_T_PCH_CNP:
   2259 		sc->nvm.read = wm_nvm_read_spt;
   2260 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2261 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2262 		sc->sc_flasht = sc->sc_st;
   2263 		sc->sc_flashh = sc->sc_sh;
   2264 		sc->sc_ich8_flash_base = 0;
   2265 		sc->sc_nvm_wordsize =
   2266 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2267 		    * NVM_SIZE_MULTIPLIER;
   2268 		/* It is size in bytes, we want words */
   2269 		sc->sc_nvm_wordsize /= 2;
   2270 		/* assume 2 banks */
   2271 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2272 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2273 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2274 		sc->phy.release = wm_put_swflag_ich8lan;
   2275 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2276 		sc->nvm.release = wm_put_nvm_ich8lan;
   2277 		break;
   2278 	case WM_T_I210:
   2279 	case WM_T_I211:
   2280 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2281 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2282 		if (wm_nvm_flash_presence_i210(sc)) {
   2283 			sc->nvm.read = wm_nvm_read_eerd;
   2284 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2285 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2286 			wm_nvm_set_addrbits_size_eecd(sc);
   2287 		} else {
   2288 			sc->nvm.read = wm_nvm_read_invm;
   2289 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2290 			sc->sc_nvm_wordsize = INVM_SIZE;
   2291 		}
   2292 		sc->phy.acquire = wm_get_phy_82575;
   2293 		sc->phy.release = wm_put_phy_82575;
   2294 		sc->nvm.acquire = wm_get_nvm_80003;
   2295 		sc->nvm.release = wm_put_nvm_80003;
   2296 		break;
   2297 	default:
   2298 		break;
   2299 	}
   2300 
   2301 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2302 	switch (sc->sc_type) {
   2303 	case WM_T_82571:
   2304 	case WM_T_82572:
   2305 		reg = CSR_READ(sc, WMREG_SWSM2);
   2306 		if ((reg & SWSM2_LOCK) == 0) {
   2307 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2308 			force_clear_smbi = true;
   2309 		} else
   2310 			force_clear_smbi = false;
   2311 		break;
   2312 	case WM_T_82573:
   2313 	case WM_T_82574:
   2314 	case WM_T_82583:
   2315 		force_clear_smbi = true;
   2316 		break;
   2317 	default:
   2318 		force_clear_smbi = false;
   2319 		break;
   2320 	}
   2321 	if (force_clear_smbi) {
   2322 		reg = CSR_READ(sc, WMREG_SWSM);
   2323 		if ((reg & SWSM_SMBI) != 0)
   2324 			aprint_error_dev(sc->sc_dev,
   2325 			    "Please update the Bootagent\n");
   2326 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2327 	}
   2328 
   2329 	/*
   2330 	 * Defer printing the EEPROM type until after verifying the checksum
   2331 	 * This allows the EEPROM type to be printed correctly in the case
   2332 	 * that no EEPROM is attached.
   2333 	 */
   2334 	/*
   2335 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2336 	 * this for later, so we can fail future reads from the EEPROM.
   2337 	 */
   2338 	if (wm_nvm_validate_checksum(sc)) {
   2339 		/*
   2340 		 * Read twice again because some PCI-e parts fail the
   2341 		 * first check due to the link being in sleep state.
   2342 		 */
   2343 		if (wm_nvm_validate_checksum(sc))
   2344 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2345 	}
   2346 
   2347 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2348 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2349 	else {
   2350 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2351 		    sc->sc_nvm_wordsize);
   2352 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2353 			aprint_verbose("iNVM");
   2354 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2355 			aprint_verbose("FLASH(HW)");
   2356 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2357 			aprint_verbose("FLASH");
   2358 		else {
   2359 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2360 				eetype = "SPI";
   2361 			else
   2362 				eetype = "MicroWire";
   2363 			aprint_verbose("(%d address bits) %s EEPROM",
   2364 			    sc->sc_nvm_addrbits, eetype);
   2365 		}
   2366 	}
   2367 	wm_nvm_version(sc);
   2368 	aprint_verbose("\n");
   2369 
   2370 	/*
   2371 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2372 	 * incorrect.
   2373 	 */
   2374 	wm_gmii_setup_phytype(sc, 0, 0);
   2375 
   2376 	/* Reset the chip to a known state. */
   2377 	wm_reset(sc);
   2378 
   2379 	/*
   2380 	 * Check for I21[01] PLL workaround.
   2381 	 *
   2382 	 * Three cases:
   2383 	 * a) Chip is I211.
   2384 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2385 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2386 	 */
   2387 	if (sc->sc_type == WM_T_I211)
   2388 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2389 	if (sc->sc_type == WM_T_I210) {
   2390 		if (!wm_nvm_flash_presence_i210(sc))
   2391 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2392 		else if ((sc->sc_nvm_ver_major < 3)
   2393 		    || ((sc->sc_nvm_ver_major == 3)
   2394 			&& (sc->sc_nvm_ver_minor < 25))) {
   2395 			aprint_verbose_dev(sc->sc_dev,
   2396 			    "ROM image version %d.%d is older than 3.25\n",
   2397 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2398 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2399 		}
   2400 	}
   2401 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2402 		wm_pll_workaround_i210(sc);
   2403 
   2404 	wm_get_wakeup(sc);
   2405 
   2406 	/* Non-AMT based hardware can now take control from firmware */
   2407 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2408 		wm_get_hw_control(sc);
   2409 
   2410 	/*
   2411 	 * Read the Ethernet address from the EEPROM, if not first found
   2412 	 * in device properties.
   2413 	 */
   2414 	ea = prop_dictionary_get(dict, "mac-address");
   2415 	if (ea != NULL) {
   2416 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2417 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2418 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2419 	} else {
   2420 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2421 			aprint_error_dev(sc->sc_dev,
   2422 			    "unable to read Ethernet address\n");
   2423 			goto out;
   2424 		}
   2425 	}
   2426 
   2427 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2428 	    ether_sprintf(enaddr));
   2429 
   2430 	/*
   2431 	 * Read the config info from the EEPROM, and set up various
   2432 	 * bits in the control registers based on their contents.
   2433 	 */
   2434 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2435 	if (pn != NULL) {
   2436 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2437 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2438 	} else {
   2439 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2440 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2441 			goto out;
   2442 		}
   2443 	}
   2444 
   2445 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2446 	if (pn != NULL) {
   2447 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2448 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2449 	} else {
   2450 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2451 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2452 			goto out;
   2453 		}
   2454 	}
   2455 
   2456 	/* check for WM_F_WOL */
   2457 	switch (sc->sc_type) {
   2458 	case WM_T_82542_2_0:
   2459 	case WM_T_82542_2_1:
   2460 	case WM_T_82543:
   2461 		/* dummy? */
   2462 		eeprom_data = 0;
   2463 		apme_mask = NVM_CFG3_APME;
   2464 		break;
   2465 	case WM_T_82544:
   2466 		apme_mask = NVM_CFG2_82544_APM_EN;
   2467 		eeprom_data = cfg2;
   2468 		break;
   2469 	case WM_T_82546:
   2470 	case WM_T_82546_3:
   2471 	case WM_T_82571:
   2472 	case WM_T_82572:
   2473 	case WM_T_82573:
   2474 	case WM_T_82574:
   2475 	case WM_T_82583:
   2476 	case WM_T_80003:
   2477 	default:
   2478 		apme_mask = NVM_CFG3_APME;
   2479 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2480 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2481 		break;
   2482 	case WM_T_82575:
   2483 	case WM_T_82576:
   2484 	case WM_T_82580:
   2485 	case WM_T_I350:
   2486 	case WM_T_I354: /* XXX ok? */
   2487 	case WM_T_ICH8:
   2488 	case WM_T_ICH9:
   2489 	case WM_T_ICH10:
   2490 	case WM_T_PCH:
   2491 	case WM_T_PCH2:
   2492 	case WM_T_PCH_LPT:
   2493 	case WM_T_PCH_SPT:
   2494 	case WM_T_PCH_CNP:
   2495 		/* XXX The funcid should be checked on some devices */
   2496 		apme_mask = WUC_APME;
   2497 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2498 		break;
   2499 	}
   2500 
   2501 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2502 	if ((eeprom_data & apme_mask) != 0)
   2503 		sc->sc_flags |= WM_F_WOL;
   2504 
   2505 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2506 		/* Check NVM for autonegotiation */
   2507 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2508 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2509 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2510 		}
   2511 	}
   2512 
   2513 	/*
   2514 	 * XXX need special handling for some multiple port cards
   2515 	 * to disable a paticular port.
   2516 	 */
   2517 
   2518 	if (sc->sc_type >= WM_T_82544) {
   2519 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2520 		if (pn != NULL) {
   2521 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2522 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2523 		} else {
   2524 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2525 				aprint_error_dev(sc->sc_dev,
   2526 				    "unable to read SWDPIN\n");
   2527 				goto out;
   2528 			}
   2529 		}
   2530 	}
   2531 
   2532 	if (cfg1 & NVM_CFG1_ILOS)
   2533 		sc->sc_ctrl |= CTRL_ILOS;
   2534 
   2535 	/*
   2536 	 * XXX
   2537 	 * This code isn't correct because pin 2 and 3 are located
   2538 	 * in different position on newer chips. Check all datasheet.
   2539 	 *
   2540 	 * Until resolve this problem, check if a chip < 82580
   2541 	 */
   2542 	if (sc->sc_type <= WM_T_82580) {
   2543 		if (sc->sc_type >= WM_T_82544) {
   2544 			sc->sc_ctrl |=
   2545 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2546 			    CTRL_SWDPIO_SHIFT;
   2547 			sc->sc_ctrl |=
   2548 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2549 			    CTRL_SWDPINS_SHIFT;
   2550 		} else {
   2551 			sc->sc_ctrl |=
   2552 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2553 			    CTRL_SWDPIO_SHIFT;
   2554 		}
   2555 	}
   2556 
   2557 	/* XXX For other than 82580? */
   2558 	if (sc->sc_type == WM_T_82580) {
   2559 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2560 		if (nvmword & __BIT(13))
   2561 			sc->sc_ctrl |= CTRL_ILOS;
   2562 	}
   2563 
   2564 #if 0
   2565 	if (sc->sc_type >= WM_T_82544) {
   2566 		if (cfg1 & NVM_CFG1_IPS0)
   2567 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2568 		if (cfg1 & NVM_CFG1_IPS1)
   2569 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2570 		sc->sc_ctrl_ext |=
   2571 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2572 		    CTRL_EXT_SWDPIO_SHIFT;
   2573 		sc->sc_ctrl_ext |=
   2574 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2575 		    CTRL_EXT_SWDPINS_SHIFT;
   2576 	} else {
   2577 		sc->sc_ctrl_ext |=
   2578 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2579 		    CTRL_EXT_SWDPIO_SHIFT;
   2580 	}
   2581 #endif
   2582 
   2583 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2584 #if 0
   2585 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2586 #endif
   2587 
   2588 	if (sc->sc_type == WM_T_PCH) {
   2589 		uint16_t val;
   2590 
   2591 		/* Save the NVM K1 bit setting */
   2592 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2593 
   2594 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2595 			sc->sc_nvm_k1_enabled = 1;
   2596 		else
   2597 			sc->sc_nvm_k1_enabled = 0;
   2598 	}
   2599 
   2600 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2601 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2602 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2603 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2604 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2605 	    || sc->sc_type == WM_T_82573
   2606 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2607 		/* Copper only */
   2608 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2609 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2610 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2611 	    || (sc->sc_type ==WM_T_I211)) {
   2612 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2613 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2614 		switch (link_mode) {
   2615 		case CTRL_EXT_LINK_MODE_1000KX:
   2616 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2617 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2618 			break;
   2619 		case CTRL_EXT_LINK_MODE_SGMII:
   2620 			if (wm_sgmii_uses_mdio(sc)) {
   2621 				aprint_verbose_dev(sc->sc_dev,
   2622 				    "SGMII(MDIO)\n");
   2623 				sc->sc_flags |= WM_F_SGMII;
   2624 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2625 				break;
   2626 			}
   2627 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2628 			/*FALLTHROUGH*/
   2629 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2630 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2631 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2632 				if (link_mode
   2633 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2634 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2635 					sc->sc_flags |= WM_F_SGMII;
   2636 				} else {
   2637 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2638 					aprint_verbose_dev(sc->sc_dev,
   2639 					    "SERDES\n");
   2640 				}
   2641 				break;
   2642 			}
   2643 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2644 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2645 
   2646 			/* Change current link mode setting */
   2647 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2648 			switch (sc->sc_mediatype) {
   2649 			case WM_MEDIATYPE_COPPER:
   2650 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2651 				break;
   2652 			case WM_MEDIATYPE_SERDES:
   2653 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2654 				break;
   2655 			default:
   2656 				break;
   2657 			}
   2658 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2659 			break;
   2660 		case CTRL_EXT_LINK_MODE_GMII:
   2661 		default:
   2662 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2663 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2664 			break;
   2665 		}
   2666 
   2667 		reg &= ~CTRL_EXT_I2C_ENA;
   2668 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2669 			reg |= CTRL_EXT_I2C_ENA;
   2670 		else
   2671 			reg &= ~CTRL_EXT_I2C_ENA;
   2672 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2673 	} else if (sc->sc_type < WM_T_82543 ||
   2674 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2675 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2676 			aprint_error_dev(sc->sc_dev,
   2677 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2678 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2679 		}
   2680 	} else {
   2681 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2682 			aprint_error_dev(sc->sc_dev,
   2683 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2684 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2685 		}
   2686 	}
   2687 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2688 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2689 
   2690 	/* Set device properties (macflags) */
   2691 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2692 
   2693 	/* Initialize the media structures accordingly. */
   2694 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2695 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2696 	else
   2697 		wm_tbi_mediainit(sc); /* All others */
   2698 
   2699 	ifp = &sc->sc_ethercom.ec_if;
   2700 	xname = device_xname(sc->sc_dev);
   2701 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2702 	ifp->if_softc = sc;
   2703 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2704 #ifdef WM_MPSAFE
   2705 	ifp->if_extflags = IFEF_MPSAFE;
   2706 #endif
   2707 	ifp->if_ioctl = wm_ioctl;
   2708 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2709 		ifp->if_start = wm_nq_start;
   2710 		/*
   2711 		 * When the number of CPUs is one and the controller can use
   2712 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2713 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2714 		 * and the other is used for link status changing.
   2715 		 * In this situation, wm_nq_transmit() is disadvantageous
   2716 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2717 		 */
   2718 		if (wm_is_using_multiqueue(sc))
   2719 			ifp->if_transmit = wm_nq_transmit;
   2720 	} else {
   2721 		ifp->if_start = wm_start;
   2722 		/*
   2723 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2724 		 */
   2725 		if (wm_is_using_multiqueue(sc))
   2726 			ifp->if_transmit = wm_transmit;
   2727 	}
   2728 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2729 	ifp->if_init = wm_init;
   2730 	ifp->if_stop = wm_stop;
   2731 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2732 	IFQ_SET_READY(&ifp->if_snd);
   2733 
   2734 	/* Check for jumbo frame */
   2735 	switch (sc->sc_type) {
   2736 	case WM_T_82573:
   2737 		/* XXX limited to 9234 if ASPM is disabled */
   2738 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2739 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2740 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2741 		break;
   2742 	case WM_T_82571:
   2743 	case WM_T_82572:
   2744 	case WM_T_82574:
   2745 	case WM_T_82583:
   2746 	case WM_T_82575:
   2747 	case WM_T_82576:
   2748 	case WM_T_82580:
   2749 	case WM_T_I350:
   2750 	case WM_T_I354:
   2751 	case WM_T_I210:
   2752 	case WM_T_I211:
   2753 	case WM_T_80003:
   2754 	case WM_T_ICH9:
   2755 	case WM_T_ICH10:
   2756 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2757 	case WM_T_PCH_LPT:
   2758 	case WM_T_PCH_SPT:
   2759 	case WM_T_PCH_CNP:
   2760 		/* XXX limited to 9234 */
   2761 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2762 		break;
   2763 	case WM_T_PCH:
   2764 		/* XXX limited to 4096 */
   2765 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2766 		break;
   2767 	case WM_T_82542_2_0:
   2768 	case WM_T_82542_2_1:
   2769 	case WM_T_ICH8:
   2770 		/* No support for jumbo frame */
   2771 		break;
   2772 	default:
   2773 		/* ETHER_MAX_LEN_JUMBO */
   2774 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2775 		break;
   2776 	}
   2777 
   2778 	/* If we're a i82543 or greater, we can support VLANs. */
   2779 	if (sc->sc_type >= WM_T_82543)
   2780 		sc->sc_ethercom.ec_capabilities |=
   2781 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2782 
   2783 	/*
   2784 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2785 	 * on i82543 and later.
   2786 	 */
   2787 	if (sc->sc_type >= WM_T_82543) {
   2788 		ifp->if_capabilities |=
   2789 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2790 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2791 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2792 		    IFCAP_CSUM_TCPv6_Tx |
   2793 		    IFCAP_CSUM_UDPv6_Tx;
   2794 	}
   2795 
   2796 	/*
   2797 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2798 	 *
   2799 	 *	82541GI (8086:1076) ... no
   2800 	 *	82572EI (8086:10b9) ... yes
   2801 	 */
   2802 	if (sc->sc_type >= WM_T_82571) {
   2803 		ifp->if_capabilities |=
   2804 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2805 	}
   2806 
   2807 	/*
   2808 	 * If we're a i82544 or greater (except i82547), we can do
   2809 	 * TCP segmentation offload.
   2810 	 */
   2811 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2812 		ifp->if_capabilities |= IFCAP_TSOv4;
   2813 	}
   2814 
   2815 	if (sc->sc_type >= WM_T_82571) {
   2816 		ifp->if_capabilities |= IFCAP_TSOv6;
   2817 	}
   2818 
   2819 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2820 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2821 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2822 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2823 
   2824 #ifdef WM_MPSAFE
   2825 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2826 #else
   2827 	sc->sc_core_lock = NULL;
   2828 #endif
   2829 
   2830 	/* Attach the interface. */
   2831 	error = if_initialize(ifp);
   2832 	if (error != 0) {
   2833 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2834 		    error);
   2835 		return; /* Error */
   2836 	}
   2837 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2838 	ether_ifattach(ifp, enaddr);
   2839 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2840 	if_register(ifp);
   2841 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2842 	    RND_FLAG_DEFAULT);
   2843 
   2844 #ifdef WM_EVENT_COUNTERS
   2845 	/* Attach event counters. */
   2846 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2847 	    NULL, xname, "linkintr");
   2848 
   2849 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2850 	    NULL, xname, "tx_xoff");
   2851 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2852 	    NULL, xname, "tx_xon");
   2853 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2854 	    NULL, xname, "rx_xoff");
   2855 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2856 	    NULL, xname, "rx_xon");
   2857 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2858 	    NULL, xname, "rx_macctl");
   2859 #endif /* WM_EVENT_COUNTERS */
   2860 
   2861 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2862 		pmf_class_network_register(self, ifp);
   2863 	else
   2864 		aprint_error_dev(self, "couldn't establish power handler\n");
   2865 
   2866 	sc->sc_flags |= WM_F_ATTACHED;
   2867  out:
   2868 	return;
   2869 }
   2870 
   2871 /* The detach function (ca_detach) */
   2872 static int
   2873 wm_detach(device_t self, int flags __unused)
   2874 {
   2875 	struct wm_softc *sc = device_private(self);
   2876 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2877 	int i;
   2878 
   2879 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2880 		return 0;
   2881 
   2882 	/* Stop the interface. Callouts are stopped in it. */
   2883 	wm_stop(ifp, 1);
   2884 
   2885 	pmf_device_deregister(self);
   2886 
   2887 #ifdef WM_EVENT_COUNTERS
   2888 	evcnt_detach(&sc->sc_ev_linkintr);
   2889 
   2890 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2891 	evcnt_detach(&sc->sc_ev_tx_xon);
   2892 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2893 	evcnt_detach(&sc->sc_ev_rx_xon);
   2894 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2895 #endif /* WM_EVENT_COUNTERS */
   2896 
   2897 	/* Tell the firmware about the release */
   2898 	WM_CORE_LOCK(sc);
   2899 	wm_release_manageability(sc);
   2900 	wm_release_hw_control(sc);
   2901 	wm_enable_wakeup(sc);
   2902 	WM_CORE_UNLOCK(sc);
   2903 
   2904 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2905 
   2906 	/* Delete all remaining media. */
   2907 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2908 
   2909 	ether_ifdetach(ifp);
   2910 	if_detach(ifp);
   2911 	if_percpuq_destroy(sc->sc_ipq);
   2912 
   2913 	/* Unload RX dmamaps and free mbufs */
   2914 	for (i = 0; i < sc->sc_nqueues; i++) {
   2915 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2916 		mutex_enter(rxq->rxq_lock);
   2917 		wm_rxdrain(rxq);
   2918 		mutex_exit(rxq->rxq_lock);
   2919 	}
   2920 	/* Must unlock here */
   2921 
   2922 	/* Disestablish the interrupt handler */
   2923 	for (i = 0; i < sc->sc_nintrs; i++) {
   2924 		if (sc->sc_ihs[i] != NULL) {
   2925 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2926 			sc->sc_ihs[i] = NULL;
   2927 		}
   2928 	}
   2929 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2930 
   2931 	wm_free_txrx_queues(sc);
   2932 
   2933 	/* Unmap the registers */
   2934 	if (sc->sc_ss) {
   2935 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2936 		sc->sc_ss = 0;
   2937 	}
   2938 	if (sc->sc_ios) {
   2939 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2940 		sc->sc_ios = 0;
   2941 	}
   2942 	if (sc->sc_flashs) {
   2943 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2944 		sc->sc_flashs = 0;
   2945 	}
   2946 
   2947 	if (sc->sc_core_lock)
   2948 		mutex_obj_free(sc->sc_core_lock);
   2949 	if (sc->sc_ich_phymtx)
   2950 		mutex_obj_free(sc->sc_ich_phymtx);
   2951 	if (sc->sc_ich_nvmmtx)
   2952 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2953 
   2954 	return 0;
   2955 }
   2956 
   2957 static bool
   2958 wm_suspend(device_t self, const pmf_qual_t *qual)
   2959 {
   2960 	struct wm_softc *sc = device_private(self);
   2961 
   2962 	wm_release_manageability(sc);
   2963 	wm_release_hw_control(sc);
   2964 	wm_enable_wakeup(sc);
   2965 
   2966 	return true;
   2967 }
   2968 
   2969 static bool
   2970 wm_resume(device_t self, const pmf_qual_t *qual)
   2971 {
   2972 	struct wm_softc *sc = device_private(self);
   2973 
   2974 	/* Disable ASPM L0s and/or L1 for workaround */
   2975 	wm_disable_aspm(sc);
   2976 	wm_init_manageability(sc);
   2977 
   2978 	return true;
   2979 }
   2980 
   2981 /*
   2982  * wm_watchdog:		[ifnet interface function]
   2983  *
   2984  *	Watchdog timer handler.
   2985  */
   2986 static void
   2987 wm_watchdog(struct ifnet *ifp)
   2988 {
   2989 	int qid;
   2990 	struct wm_softc *sc = ifp->if_softc;
   2991 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   2992 
   2993 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2994 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2995 
   2996 		wm_watchdog_txq(ifp, txq, &hang_queue);
   2997 	}
   2998 
   2999 	/*
   3000 	 * IF any of queues hanged up, reset the interface.
   3001 	 */
   3002 	if (hang_queue != 0) {
   3003 		(void) wm_init(ifp);
   3004 
   3005 		/*
   3006 		 * There are still some upper layer processing which call
   3007 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3008 		 */
   3009 		/* Try to get more packets going. */
   3010 		ifp->if_start(ifp);
   3011 	}
   3012 }
   3013 
   3014 
   3015 static void
   3016 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3017 {
   3018 
   3019 	mutex_enter(txq->txq_lock);
   3020 	if (txq->txq_sending &&
   3021 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
   3022 		wm_watchdog_txq_locked(ifp, txq, hang);
   3023 	}
   3024 	mutex_exit(txq->txq_lock);
   3025 }
   3026 
   3027 static void
   3028 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3029     uint16_t *hang)
   3030 {
   3031 	struct wm_softc *sc = ifp->if_softc;
   3032 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3033 
   3034 	KASSERT(mutex_owned(txq->txq_lock));
   3035 
   3036 	/*
   3037 	 * Since we're using delayed interrupts, sweep up
   3038 	 * before we report an error.
   3039 	 */
   3040 	wm_txeof(txq, UINT_MAX);
   3041 
   3042 	if (txq->txq_sending)
   3043 		*hang |= __BIT(wmq->wmq_id);
   3044 
   3045 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3046 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3047 		    device_xname(sc->sc_dev));
   3048 	} else {
   3049 #ifdef WM_DEBUG
   3050 		int i, j;
   3051 		struct wm_txsoft *txs;
   3052 #endif
   3053 		log(LOG_ERR,
   3054 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3055 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3056 		    txq->txq_next);
   3057 		ifp->if_oerrors++;
   3058 #ifdef WM_DEBUG
   3059 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3060 		    i = WM_NEXTTXS(txq, i)) {
   3061 		    txs = &txq->txq_soft[i];
   3062 		    printf("txs %d tx %d -> %d\n",
   3063 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3064 		    for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3065 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3066 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3067 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3068 				    printf("\t %#08x%08x\n",
   3069 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3070 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3071 			    } else {
   3072 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3073 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3074 					txq->txq_descs[j].wtx_addr.wa_low);
   3075 				    printf("\t %#04x%02x%02x%08x\n",
   3076 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3077 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3078 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3079 					txq->txq_descs[j].wtx_cmdlen);
   3080 			    }
   3081 			if (j == txs->txs_lastdesc)
   3082 				break;
   3083 			}
   3084 		}
   3085 #endif
   3086 	}
   3087 }
   3088 
   3089 /*
   3090  * wm_tick:
   3091  *
   3092  *	One second timer, used to check link status, sweep up
   3093  *	completed transmit jobs, etc.
   3094  */
   3095 static void
   3096 wm_tick(void *arg)
   3097 {
   3098 	struct wm_softc *sc = arg;
   3099 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3100 #ifndef WM_MPSAFE
   3101 	int s = splnet();
   3102 #endif
   3103 
   3104 	WM_CORE_LOCK(sc);
   3105 
   3106 	if (sc->sc_core_stopping) {
   3107 		WM_CORE_UNLOCK(sc);
   3108 #ifndef WM_MPSAFE
   3109 		splx(s);
   3110 #endif
   3111 		return;
   3112 	}
   3113 
   3114 	if (sc->sc_type >= WM_T_82542_2_1) {
   3115 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3116 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3117 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3118 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3119 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3120 	}
   3121 
   3122 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3123 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3124 	    + CSR_READ(sc, WMREG_CRCERRS)
   3125 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3126 	    + CSR_READ(sc, WMREG_SYMERRC)
   3127 	    + CSR_READ(sc, WMREG_RXERRC)
   3128 	    + CSR_READ(sc, WMREG_SEC)
   3129 	    + CSR_READ(sc, WMREG_CEXTERR)
   3130 	    + CSR_READ(sc, WMREG_RLEC);
   3131 	/*
   3132 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3133 	 * memory. It does not mean the number of dropped packet. Because
   3134 	 * ethernet controller can receive packets in such case if there is
   3135 	 * space in phy's FIFO.
   3136 	 *
   3137 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3138 	 * own EVCNT instead of if_iqdrops.
   3139 	 */
   3140 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3141 
   3142 	if (sc->sc_flags & WM_F_HAS_MII)
   3143 		mii_tick(&sc->sc_mii);
   3144 	else if ((sc->sc_type >= WM_T_82575)
   3145 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3146 		wm_serdes_tick(sc);
   3147 	else
   3148 		wm_tbi_tick(sc);
   3149 
   3150 	WM_CORE_UNLOCK(sc);
   3151 
   3152 	wm_watchdog(ifp);
   3153 
   3154 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3155 }
   3156 
   3157 static int
   3158 wm_ifflags_cb(struct ethercom *ec)
   3159 {
   3160 	struct ifnet *ifp = &ec->ec_if;
   3161 	struct wm_softc *sc = ifp->if_softc;
   3162 	int rc = 0;
   3163 
   3164 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3165 		device_xname(sc->sc_dev), __func__));
   3166 
   3167 	WM_CORE_LOCK(sc);
   3168 
   3169 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3170 	sc->sc_if_flags = ifp->if_flags;
   3171 
   3172 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3173 		rc = ENETRESET;
   3174 		goto out;
   3175 	}
   3176 
   3177 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3178 		wm_set_filter(sc);
   3179 
   3180 	wm_set_vlan(sc);
   3181 
   3182 out:
   3183 	WM_CORE_UNLOCK(sc);
   3184 
   3185 	return rc;
   3186 }
   3187 
   3188 /*
   3189  * wm_ioctl:		[ifnet interface function]
   3190  *
   3191  *	Handle control requests from the operator.
   3192  */
   3193 static int
   3194 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3195 {
   3196 	struct wm_softc *sc = ifp->if_softc;
   3197 	struct ifreq *ifr = (struct ifreq *) data;
   3198 	struct ifaddr *ifa = (struct ifaddr *)data;
   3199 	struct sockaddr_dl *sdl;
   3200 	int s, error;
   3201 
   3202 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3203 		device_xname(sc->sc_dev), __func__));
   3204 
   3205 #ifndef WM_MPSAFE
   3206 	s = splnet();
   3207 #endif
   3208 	switch (cmd) {
   3209 	case SIOCSIFMEDIA:
   3210 	case SIOCGIFMEDIA:
   3211 		WM_CORE_LOCK(sc);
   3212 		/* Flow control requires full-duplex mode. */
   3213 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3214 		    (ifr->ifr_media & IFM_FDX) == 0)
   3215 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3216 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3217 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3218 				/* We can do both TXPAUSE and RXPAUSE. */
   3219 				ifr->ifr_media |=
   3220 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3221 			}
   3222 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3223 		}
   3224 		WM_CORE_UNLOCK(sc);
   3225 #ifdef WM_MPSAFE
   3226 		s = splnet();
   3227 #endif
   3228 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3229 #ifdef WM_MPSAFE
   3230 		splx(s);
   3231 #endif
   3232 		break;
   3233 	case SIOCINITIFADDR:
   3234 		WM_CORE_LOCK(sc);
   3235 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3236 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3237 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3238 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3239 			/* unicast address is first multicast entry */
   3240 			wm_set_filter(sc);
   3241 			error = 0;
   3242 			WM_CORE_UNLOCK(sc);
   3243 			break;
   3244 		}
   3245 		WM_CORE_UNLOCK(sc);
   3246 		/*FALLTHROUGH*/
   3247 	default:
   3248 #ifdef WM_MPSAFE
   3249 		s = splnet();
   3250 #endif
   3251 		/* It may call wm_start, so unlock here */
   3252 		error = ether_ioctl(ifp, cmd, data);
   3253 #ifdef WM_MPSAFE
   3254 		splx(s);
   3255 #endif
   3256 		if (error != ENETRESET)
   3257 			break;
   3258 
   3259 		error = 0;
   3260 
   3261 		if (cmd == SIOCSIFCAP)
   3262 			error = (*ifp->if_init)(ifp);
   3263 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3264 			;
   3265 		else if (ifp->if_flags & IFF_RUNNING) {
   3266 			/*
   3267 			 * Multicast list has changed; set the hardware filter
   3268 			 * accordingly.
   3269 			 */
   3270 			WM_CORE_LOCK(sc);
   3271 			wm_set_filter(sc);
   3272 			WM_CORE_UNLOCK(sc);
   3273 		}
   3274 		break;
   3275 	}
   3276 
   3277 #ifndef WM_MPSAFE
   3278 	splx(s);
   3279 #endif
   3280 	return error;
   3281 }
   3282 
   3283 /* MAC address related */
   3284 
   3285 /*
   3286  * Get the offset of MAC address and return it.
   3287  * If error occured, use offset 0.
   3288  */
   3289 static uint16_t
   3290 wm_check_alt_mac_addr(struct wm_softc *sc)
   3291 {
   3292 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3293 	uint16_t offset = NVM_OFF_MACADDR;
   3294 
   3295 	/* Try to read alternative MAC address pointer */
   3296 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3297 		return 0;
   3298 
   3299 	/* Check pointer if it's valid or not. */
   3300 	if ((offset == 0x0000) || (offset == 0xffff))
   3301 		return 0;
   3302 
   3303 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3304 	/*
   3305 	 * Check whether alternative MAC address is valid or not.
   3306 	 * Some cards have non 0xffff pointer but those don't use
   3307 	 * alternative MAC address in reality.
   3308 	 *
   3309 	 * Check whether the broadcast bit is set or not.
   3310 	 */
   3311 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3312 		if (((myea[0] & 0xff) & 0x01) == 0)
   3313 			return offset; /* Found */
   3314 
   3315 	/* Not found */
   3316 	return 0;
   3317 }
   3318 
   3319 static int
   3320 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3321 {
   3322 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3323 	uint16_t offset = NVM_OFF_MACADDR;
   3324 	int do_invert = 0;
   3325 
   3326 	switch (sc->sc_type) {
   3327 	case WM_T_82580:
   3328 	case WM_T_I350:
   3329 	case WM_T_I354:
   3330 		/* EEPROM Top Level Partitioning */
   3331 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3332 		break;
   3333 	case WM_T_82571:
   3334 	case WM_T_82575:
   3335 	case WM_T_82576:
   3336 	case WM_T_80003:
   3337 	case WM_T_I210:
   3338 	case WM_T_I211:
   3339 		offset = wm_check_alt_mac_addr(sc);
   3340 		if (offset == 0)
   3341 			if ((sc->sc_funcid & 0x01) == 1)
   3342 				do_invert = 1;
   3343 		break;
   3344 	default:
   3345 		if ((sc->sc_funcid & 0x01) == 1)
   3346 			do_invert = 1;
   3347 		break;
   3348 	}
   3349 
   3350 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3351 		goto bad;
   3352 
   3353 	enaddr[0] = myea[0] & 0xff;
   3354 	enaddr[1] = myea[0] >> 8;
   3355 	enaddr[2] = myea[1] & 0xff;
   3356 	enaddr[3] = myea[1] >> 8;
   3357 	enaddr[4] = myea[2] & 0xff;
   3358 	enaddr[5] = myea[2] >> 8;
   3359 
   3360 	/*
   3361 	 * Toggle the LSB of the MAC address on the second port
   3362 	 * of some dual port cards.
   3363 	 */
   3364 	if (do_invert != 0)
   3365 		enaddr[5] ^= 1;
   3366 
   3367 	return 0;
   3368 
   3369  bad:
   3370 	return -1;
   3371 }
   3372 
   3373 /*
   3374  * wm_set_ral:
   3375  *
   3376  *	Set an entery in the receive address list.
   3377  */
   3378 static void
   3379 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3380 {
   3381 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3382 	uint32_t wlock_mac;
   3383 	int rv;
   3384 
   3385 	if (enaddr != NULL) {
   3386 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3387 		    (enaddr[3] << 24);
   3388 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3389 		ral_hi |= RAL_AV;
   3390 	} else {
   3391 		ral_lo = 0;
   3392 		ral_hi = 0;
   3393 	}
   3394 
   3395 	switch (sc->sc_type) {
   3396 	case WM_T_82542_2_0:
   3397 	case WM_T_82542_2_1:
   3398 	case WM_T_82543:
   3399 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3400 		CSR_WRITE_FLUSH(sc);
   3401 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3402 		CSR_WRITE_FLUSH(sc);
   3403 		break;
   3404 	case WM_T_PCH2:
   3405 	case WM_T_PCH_LPT:
   3406 	case WM_T_PCH_SPT:
   3407 	case WM_T_PCH_CNP:
   3408 		if (idx == 0) {
   3409 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3410 			CSR_WRITE_FLUSH(sc);
   3411 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3412 			CSR_WRITE_FLUSH(sc);
   3413 			return;
   3414 		}
   3415 		if (sc->sc_type != WM_T_PCH2) {
   3416 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3417 			    FWSM_WLOCK_MAC);
   3418 			addrl = WMREG_SHRAL(idx - 1);
   3419 			addrh = WMREG_SHRAH(idx - 1);
   3420 		} else {
   3421 			wlock_mac = 0;
   3422 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3423 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3424 		}
   3425 
   3426 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3427 			rv = wm_get_swflag_ich8lan(sc);
   3428 			if (rv != 0)
   3429 				return;
   3430 			CSR_WRITE(sc, addrl, ral_lo);
   3431 			CSR_WRITE_FLUSH(sc);
   3432 			CSR_WRITE(sc, addrh, ral_hi);
   3433 			CSR_WRITE_FLUSH(sc);
   3434 			wm_put_swflag_ich8lan(sc);
   3435 		}
   3436 
   3437 		break;
   3438 	default:
   3439 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3440 		CSR_WRITE_FLUSH(sc);
   3441 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3442 		CSR_WRITE_FLUSH(sc);
   3443 		break;
   3444 	}
   3445 }
   3446 
   3447 /*
   3448  * wm_mchash:
   3449  *
   3450  *	Compute the hash of the multicast address for the 4096-bit
   3451  *	multicast filter.
   3452  */
   3453 static uint32_t
   3454 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3455 {
   3456 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3457 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3458 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3459 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3460 	uint32_t hash;
   3461 
   3462 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3463 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3464 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3465 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3466 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3467 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3468 		return (hash & 0x3ff);
   3469 	}
   3470 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3471 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3472 
   3473 	return (hash & 0xfff);
   3474 }
   3475 
   3476 /*
   3477  * wm_set_filter:
   3478  *
   3479  *	Set up the receive filter.
   3480  */
   3481 static void
   3482 wm_set_filter(struct wm_softc *sc)
   3483 {
   3484 	struct ethercom *ec = &sc->sc_ethercom;
   3485 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3486 	struct ether_multi *enm;
   3487 	struct ether_multistep step;
   3488 	bus_addr_t mta_reg;
   3489 	uint32_t hash, reg, bit;
   3490 	int i, size, ralmax;
   3491 
   3492 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3493 		device_xname(sc->sc_dev), __func__));
   3494 
   3495 	if (sc->sc_type >= WM_T_82544)
   3496 		mta_reg = WMREG_CORDOVA_MTA;
   3497 	else
   3498 		mta_reg = WMREG_MTA;
   3499 
   3500 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3501 
   3502 	if (ifp->if_flags & IFF_BROADCAST)
   3503 		sc->sc_rctl |= RCTL_BAM;
   3504 	if (ifp->if_flags & IFF_PROMISC) {
   3505 		sc->sc_rctl |= RCTL_UPE;
   3506 		goto allmulti;
   3507 	}
   3508 
   3509 	/*
   3510 	 * Set the station address in the first RAL slot, and
   3511 	 * clear the remaining slots.
   3512 	 */
   3513 	if (sc->sc_type == WM_T_ICH8)
   3514 		size = WM_RAL_TABSIZE_ICH8 -1;
   3515 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3516 	    || (sc->sc_type == WM_T_PCH))
   3517 		size = WM_RAL_TABSIZE_ICH8;
   3518 	else if (sc->sc_type == WM_T_PCH2)
   3519 		size = WM_RAL_TABSIZE_PCH2;
   3520 	else if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3521 	    || (sc->sc_type == WM_T_PCH_CNP))
   3522 		size = WM_RAL_TABSIZE_PCH_LPT;
   3523 	else if (sc->sc_type == WM_T_82575)
   3524 		size = WM_RAL_TABSIZE_82575;
   3525 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3526 		size = WM_RAL_TABSIZE_82576;
   3527 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3528 		size = WM_RAL_TABSIZE_I350;
   3529 	else
   3530 		size = WM_RAL_TABSIZE;
   3531 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3532 
   3533 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3534 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3535 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3536 		switch (i) {
   3537 		case 0:
   3538 			/* We can use all entries */
   3539 			ralmax = size;
   3540 			break;
   3541 		case 1:
   3542 			/* Only RAR[0] */
   3543 			ralmax = 1;
   3544 			break;
   3545 		default:
   3546 			/* available SHRA + RAR[0] */
   3547 			ralmax = i + 1;
   3548 		}
   3549 	} else
   3550 		ralmax = size;
   3551 	for (i = 1; i < size; i++) {
   3552 		if (i < ralmax)
   3553 			wm_set_ral(sc, NULL, i);
   3554 	}
   3555 
   3556 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3557 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3558 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3559 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3560 		size = WM_ICH8_MC_TABSIZE;
   3561 	else
   3562 		size = WM_MC_TABSIZE;
   3563 	/* Clear out the multicast table. */
   3564 	for (i = 0; i < size; i++) {
   3565 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3566 		CSR_WRITE_FLUSH(sc);
   3567 	}
   3568 
   3569 	ETHER_LOCK(ec);
   3570 	ETHER_FIRST_MULTI(step, ec, enm);
   3571 	while (enm != NULL) {
   3572 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3573 			ETHER_UNLOCK(ec);
   3574 			/*
   3575 			 * We must listen to a range of multicast addresses.
   3576 			 * For now, just accept all multicasts, rather than
   3577 			 * trying to set only those filter bits needed to match
   3578 			 * the range.  (At this time, the only use of address
   3579 			 * ranges is for IP multicast routing, for which the
   3580 			 * range is big enough to require all bits set.)
   3581 			 */
   3582 			goto allmulti;
   3583 		}
   3584 
   3585 		hash = wm_mchash(sc, enm->enm_addrlo);
   3586 
   3587 		reg = (hash >> 5);
   3588 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3589 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3590 		    || (sc->sc_type == WM_T_PCH2)
   3591 		    || (sc->sc_type == WM_T_PCH_LPT)
   3592 		    || (sc->sc_type == WM_T_PCH_SPT)
   3593 		    || (sc->sc_type == WM_T_PCH_CNP))
   3594 			reg &= 0x1f;
   3595 		else
   3596 			reg &= 0x7f;
   3597 		bit = hash & 0x1f;
   3598 
   3599 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3600 		hash |= 1U << bit;
   3601 
   3602 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3603 			/*
   3604 			 * 82544 Errata 9: Certain register cannot be written
   3605 			 * with particular alignments in PCI-X bus operation
   3606 			 * (FCAH, MTA and VFTA).
   3607 			 */
   3608 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3609 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3610 			CSR_WRITE_FLUSH(sc);
   3611 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3612 			CSR_WRITE_FLUSH(sc);
   3613 		} else {
   3614 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3615 			CSR_WRITE_FLUSH(sc);
   3616 		}
   3617 
   3618 		ETHER_NEXT_MULTI(step, enm);
   3619 	}
   3620 	ETHER_UNLOCK(ec);
   3621 
   3622 	ifp->if_flags &= ~IFF_ALLMULTI;
   3623 	goto setit;
   3624 
   3625  allmulti:
   3626 	ifp->if_flags |= IFF_ALLMULTI;
   3627 	sc->sc_rctl |= RCTL_MPE;
   3628 
   3629  setit:
   3630 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3631 }
   3632 
   3633 /* Reset and init related */
   3634 
   3635 static void
   3636 wm_set_vlan(struct wm_softc *sc)
   3637 {
   3638 
   3639 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3640 		device_xname(sc->sc_dev), __func__));
   3641 
   3642 	/* Deal with VLAN enables. */
   3643 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3644 		sc->sc_ctrl |= CTRL_VME;
   3645 	else
   3646 		sc->sc_ctrl &= ~CTRL_VME;
   3647 
   3648 	/* Write the control registers. */
   3649 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3650 }
   3651 
   3652 static void
   3653 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3654 {
   3655 	uint32_t gcr;
   3656 	pcireg_t ctrl2;
   3657 
   3658 	gcr = CSR_READ(sc, WMREG_GCR);
   3659 
   3660 	/* Only take action if timeout value is defaulted to 0 */
   3661 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3662 		goto out;
   3663 
   3664 	if ((gcr & GCR_CAP_VER2) == 0) {
   3665 		gcr |= GCR_CMPL_TMOUT_10MS;
   3666 		goto out;
   3667 	}
   3668 
   3669 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3670 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3671 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3672 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3673 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3674 
   3675 out:
   3676 	/* Disable completion timeout resend */
   3677 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3678 
   3679 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3680 }
   3681 
   3682 void
   3683 wm_get_auto_rd_done(struct wm_softc *sc)
   3684 {
   3685 	int i;
   3686 
   3687 	/* wait for eeprom to reload */
   3688 	switch (sc->sc_type) {
   3689 	case WM_T_82571:
   3690 	case WM_T_82572:
   3691 	case WM_T_82573:
   3692 	case WM_T_82574:
   3693 	case WM_T_82583:
   3694 	case WM_T_82575:
   3695 	case WM_T_82576:
   3696 	case WM_T_82580:
   3697 	case WM_T_I350:
   3698 	case WM_T_I354:
   3699 	case WM_T_I210:
   3700 	case WM_T_I211:
   3701 	case WM_T_80003:
   3702 	case WM_T_ICH8:
   3703 	case WM_T_ICH9:
   3704 		for (i = 0; i < 10; i++) {
   3705 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3706 				break;
   3707 			delay(1000);
   3708 		}
   3709 		if (i == 10) {
   3710 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3711 			    "complete\n", device_xname(sc->sc_dev));
   3712 		}
   3713 		break;
   3714 	default:
   3715 		break;
   3716 	}
   3717 }
   3718 
   3719 void
   3720 wm_lan_init_done(struct wm_softc *sc)
   3721 {
   3722 	uint32_t reg = 0;
   3723 	int i;
   3724 
   3725 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3726 		device_xname(sc->sc_dev), __func__));
   3727 
   3728 	/* Wait for eeprom to reload */
   3729 	switch (sc->sc_type) {
   3730 	case WM_T_ICH10:
   3731 	case WM_T_PCH:
   3732 	case WM_T_PCH2:
   3733 	case WM_T_PCH_LPT:
   3734 	case WM_T_PCH_SPT:
   3735 	case WM_T_PCH_CNP:
   3736 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3737 			reg = CSR_READ(sc, WMREG_STATUS);
   3738 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3739 				break;
   3740 			delay(100);
   3741 		}
   3742 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3743 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3744 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3745 		}
   3746 		break;
   3747 	default:
   3748 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3749 		    __func__);
   3750 		break;
   3751 	}
   3752 
   3753 	reg &= ~STATUS_LAN_INIT_DONE;
   3754 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3755 }
   3756 
   3757 void
   3758 wm_get_cfg_done(struct wm_softc *sc)
   3759 {
   3760 	int mask;
   3761 	uint32_t reg;
   3762 	int i;
   3763 
   3764 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3765 		device_xname(sc->sc_dev), __func__));
   3766 
   3767 	/* Wait for eeprom to reload */
   3768 	switch (sc->sc_type) {
   3769 	case WM_T_82542_2_0:
   3770 	case WM_T_82542_2_1:
   3771 		/* null */
   3772 		break;
   3773 	case WM_T_82543:
   3774 	case WM_T_82544:
   3775 	case WM_T_82540:
   3776 	case WM_T_82545:
   3777 	case WM_T_82545_3:
   3778 	case WM_T_82546:
   3779 	case WM_T_82546_3:
   3780 	case WM_T_82541:
   3781 	case WM_T_82541_2:
   3782 	case WM_T_82547:
   3783 	case WM_T_82547_2:
   3784 	case WM_T_82573:
   3785 	case WM_T_82574:
   3786 	case WM_T_82583:
   3787 		/* generic */
   3788 		delay(10*1000);
   3789 		break;
   3790 	case WM_T_80003:
   3791 	case WM_T_82571:
   3792 	case WM_T_82572:
   3793 	case WM_T_82575:
   3794 	case WM_T_82576:
   3795 	case WM_T_82580:
   3796 	case WM_T_I350:
   3797 	case WM_T_I354:
   3798 	case WM_T_I210:
   3799 	case WM_T_I211:
   3800 		if (sc->sc_type == WM_T_82571) {
   3801 			/* Only 82571 shares port 0 */
   3802 			mask = EEMNGCTL_CFGDONE_0;
   3803 		} else
   3804 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3805 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3806 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3807 				break;
   3808 			delay(1000);
   3809 		}
   3810 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3811 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3812 				device_xname(sc->sc_dev), __func__));
   3813 		}
   3814 		break;
   3815 	case WM_T_ICH8:
   3816 	case WM_T_ICH9:
   3817 	case WM_T_ICH10:
   3818 	case WM_T_PCH:
   3819 	case WM_T_PCH2:
   3820 	case WM_T_PCH_LPT:
   3821 	case WM_T_PCH_SPT:
   3822 	case WM_T_PCH_CNP:
   3823 		delay(10*1000);
   3824 		if (sc->sc_type >= WM_T_ICH10)
   3825 			wm_lan_init_done(sc);
   3826 		else
   3827 			wm_get_auto_rd_done(sc);
   3828 
   3829 		reg = CSR_READ(sc, WMREG_STATUS);
   3830 		if ((reg & STATUS_PHYRA) != 0)
   3831 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3832 		break;
   3833 	default:
   3834 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3835 		    __func__);
   3836 		break;
   3837 	}
   3838 }
   3839 
   3840 void
   3841 wm_phy_post_reset(struct wm_softc *sc)
   3842 {
   3843 	uint32_t reg;
   3844 
   3845 	/* This function is only for ICH8 and newer. */
   3846 	if (sc->sc_type < WM_T_ICH8)
   3847 		return;
   3848 
   3849 	if (wm_phy_resetisblocked(sc)) {
   3850 		/* XXX */
   3851 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3852 		return;
   3853 	}
   3854 
   3855 	/* Allow time for h/w to get to quiescent state after reset */
   3856 	delay(10*1000);
   3857 
   3858 	/* Perform any necessary post-reset workarounds */
   3859 	if (sc->sc_type == WM_T_PCH)
   3860 		wm_hv_phy_workaround_ich8lan(sc);
   3861 	else if (sc->sc_type == WM_T_PCH2)
   3862 		wm_lv_phy_workaround_ich8lan(sc);
   3863 
   3864 	/* Clear the host wakeup bit after lcd reset */
   3865 	if (sc->sc_type >= WM_T_PCH) {
   3866 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3867 		    BM_PORT_GEN_CFG);
   3868 		reg &= ~BM_WUC_HOST_WU_BIT;
   3869 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3870 		    BM_PORT_GEN_CFG, reg);
   3871 	}
   3872 
   3873 	/* Configure the LCD with the extended configuration region in NVM */
   3874 	wm_init_lcd_from_nvm(sc);
   3875 
   3876 	/* XXX Configure the LCD with the OEM bits in NVM */
   3877 
   3878 	if (sc->sc_type == WM_T_PCH2) {
   3879 		/* Ungate automatic PHY configuration on non-managed 82579 */
   3880 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   3881 			delay(10 * 1000);
   3882 			wm_gate_hw_phy_config_ich8lan(sc, false);
   3883 		}
   3884 		/* XXX Set EEE LPI Update Timer to 200usec */
   3885 	}
   3886 }
   3887 
   3888 /* Only for PCH and newer */
   3889 static void
   3890 wm_write_smbus_addr(struct wm_softc *sc)
   3891 {
   3892 	uint32_t strap, freq;
   3893 	uint32_t phy_data;
   3894 
   3895 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3896 		device_xname(sc->sc_dev), __func__));
   3897 
   3898 	strap = CSR_READ(sc, WMREG_STRAP);
   3899 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3900 
   3901 	phy_data = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR);
   3902 
   3903 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3904 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3905 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3906 
   3907 	if (sc->sc_phytype == WMPHY_I217) {
   3908 		/* Restore SMBus frequency */
   3909 		if (freq --) {
   3910 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3911 			    | HV_SMB_ADDR_FREQ_HIGH);
   3912 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3913 			    HV_SMB_ADDR_FREQ_LOW);
   3914 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3915 			    HV_SMB_ADDR_FREQ_HIGH);
   3916 		} else {
   3917 			DPRINTF(WM_DEBUG_INIT,
   3918 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3919 				device_xname(sc->sc_dev), __func__));
   3920 		}
   3921 	}
   3922 
   3923 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, phy_data);
   3924 }
   3925 
   3926 void
   3927 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3928 {
   3929 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3930 	uint16_t phy_page = 0;
   3931 
   3932 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3933 		device_xname(sc->sc_dev), __func__));
   3934 
   3935 	switch (sc->sc_type) {
   3936 	case WM_T_ICH8:
   3937 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3938 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3939 			return;
   3940 
   3941 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3942 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3943 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3944 			break;
   3945 		}
   3946 		/* FALLTHROUGH */
   3947 	case WM_T_PCH:
   3948 	case WM_T_PCH2:
   3949 	case WM_T_PCH_LPT:
   3950 	case WM_T_PCH_SPT:
   3951 	case WM_T_PCH_CNP:
   3952 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3953 		break;
   3954 	default:
   3955 		return;
   3956 	}
   3957 
   3958 	sc->phy.acquire(sc);
   3959 
   3960 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3961 	if ((reg & sw_cfg_mask) == 0)
   3962 		goto release;
   3963 
   3964 	/*
   3965 	 * Make sure HW does not configure LCD from PHY extended configuration
   3966 	 * before SW configuration
   3967 	 */
   3968 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3969 	if ((sc->sc_type < WM_T_PCH2)
   3970 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3971 		goto release;
   3972 
   3973 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3974 		device_xname(sc->sc_dev), __func__));
   3975 	/* word_addr is in DWORD */
   3976 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3977 
   3978 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3979 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3980 	if (cnf_size == 0)
   3981 		goto release;
   3982 
   3983 	if (((sc->sc_type == WM_T_PCH)
   3984 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3985 	    || (sc->sc_type > WM_T_PCH)) {
   3986 		/*
   3987 		 * HW configures the SMBus address and LEDs when the OEM and
   3988 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3989 		 * are cleared, SW will configure them instead.
   3990 		 */
   3991 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   3992 			device_xname(sc->sc_dev), __func__));
   3993 		wm_write_smbus_addr(sc);
   3994 
   3995 		reg = CSR_READ(sc, WMREG_LEDCTL);
   3996 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   3997 	}
   3998 
   3999 	/* Configure LCD from extended configuration region. */
   4000 	for (i = 0; i < cnf_size; i++) {
   4001 		uint16_t reg_data, reg_addr;
   4002 
   4003 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4004 			goto release;
   4005 
   4006 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4007 			goto release;
   4008 
   4009 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4010 			phy_page = reg_data;
   4011 
   4012 		reg_addr &= IGPHY_MAXREGADDR;
   4013 		reg_addr |= phy_page;
   4014 
   4015 		sc->phy.release(sc); /* XXX */
   4016 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
   4017 		sc->phy.acquire(sc); /* XXX */
   4018 	}
   4019 
   4020 release:
   4021 	sc->phy.release(sc);
   4022 	return;
   4023 }
   4024 
   4025 
   4026 /* Init hardware bits */
   4027 void
   4028 wm_initialize_hardware_bits(struct wm_softc *sc)
   4029 {
   4030 	uint32_t tarc0, tarc1, reg;
   4031 
   4032 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4033 		device_xname(sc->sc_dev), __func__));
   4034 
   4035 	/* For 82571 variant, 80003 and ICHs */
   4036 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4037 	    || (sc->sc_type >= WM_T_80003)) {
   4038 
   4039 		/* Transmit Descriptor Control 0 */
   4040 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4041 		reg |= TXDCTL_COUNT_DESC;
   4042 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4043 
   4044 		/* Transmit Descriptor Control 1 */
   4045 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4046 		reg |= TXDCTL_COUNT_DESC;
   4047 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4048 
   4049 		/* TARC0 */
   4050 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4051 		switch (sc->sc_type) {
   4052 		case WM_T_82571:
   4053 		case WM_T_82572:
   4054 		case WM_T_82573:
   4055 		case WM_T_82574:
   4056 		case WM_T_82583:
   4057 		case WM_T_80003:
   4058 			/* Clear bits 30..27 */
   4059 			tarc0 &= ~__BITS(30, 27);
   4060 			break;
   4061 		default:
   4062 			break;
   4063 		}
   4064 
   4065 		switch (sc->sc_type) {
   4066 		case WM_T_82571:
   4067 		case WM_T_82572:
   4068 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4069 
   4070 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4071 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4072 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4073 			/* 8257[12] Errata No.7 */
   4074 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4075 
   4076 			/* TARC1 bit 28 */
   4077 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4078 				tarc1 &= ~__BIT(28);
   4079 			else
   4080 				tarc1 |= __BIT(28);
   4081 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4082 
   4083 			/*
   4084 			 * 8257[12] Errata No.13
   4085 			 * Disable Dyamic Clock Gating.
   4086 			 */
   4087 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4088 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4089 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4090 			break;
   4091 		case WM_T_82573:
   4092 		case WM_T_82574:
   4093 		case WM_T_82583:
   4094 			if ((sc->sc_type == WM_T_82574)
   4095 			    || (sc->sc_type == WM_T_82583))
   4096 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4097 
   4098 			/* Extended Device Control */
   4099 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4100 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4101 			reg |= __BIT(22);	/* Set bit 22 */
   4102 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4103 
   4104 			/* Device Control */
   4105 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4106 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4107 
   4108 			/* PCIe Control Register */
   4109 			/*
   4110 			 * 82573 Errata (unknown).
   4111 			 *
   4112 			 * 82574 Errata 25 and 82583 Errata 12
   4113 			 * "Dropped Rx Packets":
   4114 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4115 			 */
   4116 			reg = CSR_READ(sc, WMREG_GCR);
   4117 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4118 			CSR_WRITE(sc, WMREG_GCR, reg);
   4119 
   4120 			if ((sc->sc_type == WM_T_82574)
   4121 			    || (sc->sc_type == WM_T_82583)) {
   4122 				/*
   4123 				 * Document says this bit must be set for
   4124 				 * proper operation.
   4125 				 */
   4126 				reg = CSR_READ(sc, WMREG_GCR);
   4127 				reg |= __BIT(22);
   4128 				CSR_WRITE(sc, WMREG_GCR, reg);
   4129 
   4130 				/*
   4131 				 * Apply workaround for hardware errata
   4132 				 * documented in errata docs Fixes issue where
   4133 				 * some error prone or unreliable PCIe
   4134 				 * completions are occurring, particularly
   4135 				 * with ASPM enabled. Without fix, issue can
   4136 				 * cause Tx timeouts.
   4137 				 */
   4138 				reg = CSR_READ(sc, WMREG_GCR2);
   4139 				reg |= __BIT(0);
   4140 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4141 			}
   4142 			break;
   4143 		case WM_T_80003:
   4144 			/* TARC0 */
   4145 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4146 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4147 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4148 
   4149 			/* TARC1 bit 28 */
   4150 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4151 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4152 				tarc1 &= ~__BIT(28);
   4153 			else
   4154 				tarc1 |= __BIT(28);
   4155 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4156 			break;
   4157 		case WM_T_ICH8:
   4158 		case WM_T_ICH9:
   4159 		case WM_T_ICH10:
   4160 		case WM_T_PCH:
   4161 		case WM_T_PCH2:
   4162 		case WM_T_PCH_LPT:
   4163 		case WM_T_PCH_SPT:
   4164 		case WM_T_PCH_CNP:
   4165 			/* TARC0 */
   4166 			if (sc->sc_type == WM_T_ICH8) {
   4167 				/* Set TARC0 bits 29 and 28 */
   4168 				tarc0 |= __BITS(29, 28);
   4169 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4170 				tarc0 |= __BIT(29);
   4171 				/*
   4172 				 *  Drop bit 28. From Linux.
   4173 				 * See I218/I219 spec update
   4174 				 * "5. Buffer Overrun While the I219 is
   4175 				 * Processing DMA Transactions"
   4176 				 */
   4177 				tarc0 &= ~__BIT(28);
   4178 			}
   4179 			/* Set TARC0 bits 23,24,26,27 */
   4180 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4181 
   4182 			/* CTRL_EXT */
   4183 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4184 			reg |= __BIT(22);	/* Set bit 22 */
   4185 			/*
   4186 			 * Enable PHY low-power state when MAC is at D3
   4187 			 * w/o WoL
   4188 			 */
   4189 			if (sc->sc_type >= WM_T_PCH)
   4190 				reg |= CTRL_EXT_PHYPDEN;
   4191 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4192 
   4193 			/* TARC1 */
   4194 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4195 			/* bit 28 */
   4196 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4197 				tarc1 &= ~__BIT(28);
   4198 			else
   4199 				tarc1 |= __BIT(28);
   4200 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4201 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4202 
   4203 			/* Device Status */
   4204 			if (sc->sc_type == WM_T_ICH8) {
   4205 				reg = CSR_READ(sc, WMREG_STATUS);
   4206 				reg &= ~__BIT(31);
   4207 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4208 
   4209 			}
   4210 
   4211 			/* IOSFPC */
   4212 			if (sc->sc_type == WM_T_PCH_SPT) {
   4213 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4214 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4215 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4216 			}
   4217 			/*
   4218 			 * Work-around descriptor data corruption issue during
   4219 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4220 			 * capability.
   4221 			 */
   4222 			reg = CSR_READ(sc, WMREG_RFCTL);
   4223 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4224 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4225 			break;
   4226 		default:
   4227 			break;
   4228 		}
   4229 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4230 
   4231 		switch (sc->sc_type) {
   4232 		/*
   4233 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4234 		 * Avoid RSS Hash Value bug.
   4235 		 */
   4236 		case WM_T_82571:
   4237 		case WM_T_82572:
   4238 		case WM_T_82573:
   4239 		case WM_T_80003:
   4240 		case WM_T_ICH8:
   4241 			reg = CSR_READ(sc, WMREG_RFCTL);
   4242 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4243 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4244 			break;
   4245 		case WM_T_82574:
   4246 			/* use extened Rx descriptor. */
   4247 			reg = CSR_READ(sc, WMREG_RFCTL);
   4248 			reg |= WMREG_RFCTL_EXSTEN;
   4249 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4250 			break;
   4251 		default:
   4252 			break;
   4253 		}
   4254 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4255 		/*
   4256 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4257 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4258 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4259 		 * Correctly by the Device"
   4260 		 *
   4261 		 * I354(C2000) Errata AVR53:
   4262 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4263 		 * Hang"
   4264 		 */
   4265 		reg = CSR_READ(sc, WMREG_RFCTL);
   4266 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4267 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4268 	}
   4269 }
   4270 
   4271 static uint32_t
   4272 wm_rxpbs_adjust_82580(uint32_t val)
   4273 {
   4274 	uint32_t rv = 0;
   4275 
   4276 	if (val < __arraycount(wm_82580_rxpbs_table))
   4277 		rv = wm_82580_rxpbs_table[val];
   4278 
   4279 	return rv;
   4280 }
   4281 
   4282 /*
   4283  * wm_reset_phy:
   4284  *
   4285  *	generic PHY reset function.
   4286  *	Same as e1000_phy_hw_reset_generic()
   4287  */
   4288 static void
   4289 wm_reset_phy(struct wm_softc *sc)
   4290 {
   4291 	uint32_t reg;
   4292 
   4293 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4294 		device_xname(sc->sc_dev), __func__));
   4295 	if (wm_phy_resetisblocked(sc))
   4296 		return;
   4297 
   4298 	sc->phy.acquire(sc);
   4299 
   4300 	reg = CSR_READ(sc, WMREG_CTRL);
   4301 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4302 	CSR_WRITE_FLUSH(sc);
   4303 
   4304 	delay(sc->phy.reset_delay_us);
   4305 
   4306 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4307 	CSR_WRITE_FLUSH(sc);
   4308 
   4309 	delay(150);
   4310 
   4311 	sc->phy.release(sc);
   4312 
   4313 	wm_get_cfg_done(sc);
   4314 	wm_phy_post_reset(sc);
   4315 }
   4316 
   4317 /*
   4318  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4319  * so it is enough to check sc->sc_queue[0] only.
   4320  */
   4321 static void
   4322 wm_flush_desc_rings(struct wm_softc *sc)
   4323 {
   4324 	pcireg_t preg;
   4325 	uint32_t reg;
   4326 	struct wm_txqueue *txq;
   4327 	wiseman_txdesc_t *txd;
   4328 	int nexttx;
   4329 	uint32_t rctl;
   4330 
   4331 	/* First, disable MULR fix in FEXTNVM11 */
   4332 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4333 	reg |= FEXTNVM11_DIS_MULRFIX;
   4334 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4335 
   4336 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4337 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4338 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4339 		return;
   4340 
   4341 	/* TX */
   4342 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4343 	    device_xname(sc->sc_dev), preg, reg);
   4344 	reg = CSR_READ(sc, WMREG_TCTL);
   4345 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4346 
   4347 	txq = &sc->sc_queue[0].wmq_txq;
   4348 	nexttx = txq->txq_next;
   4349 	txd = &txq->txq_descs[nexttx];
   4350 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4351 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4352 	txd->wtx_fields.wtxu_status = 0;
   4353 	txd->wtx_fields.wtxu_options = 0;
   4354 	txd->wtx_fields.wtxu_vlan = 0;
   4355 
   4356 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4357 	    BUS_SPACE_BARRIER_WRITE);
   4358 
   4359 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4360 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4361 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4362 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4363 	delay(250);
   4364 
   4365 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4366 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4367 		return;
   4368 
   4369 	/* RX */
   4370 	printf("%s: Need RX flush (reg = %08x)\n",
   4371 	    device_xname(sc->sc_dev), preg);
   4372 	rctl = CSR_READ(sc, WMREG_RCTL);
   4373 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4374 	CSR_WRITE_FLUSH(sc);
   4375 	delay(150);
   4376 
   4377 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4378 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4379 	reg &= 0xffffc000;
   4380 	/*
   4381 	 * update thresholds: prefetch threshold to 31, host threshold
   4382 	 * to 1 and make sure the granularity is "descriptors" and not
   4383 	 * "cache lines"
   4384 	 */
   4385 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4386 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4387 
   4388 	/*
   4389 	 * momentarily enable the RX ring for the changes to take
   4390 	 * effect
   4391 	 */
   4392 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4393 	CSR_WRITE_FLUSH(sc);
   4394 	delay(150);
   4395 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4396 }
   4397 
   4398 /*
   4399  * wm_reset:
   4400  *
   4401  *	Reset the i82542 chip.
   4402  */
   4403 static void
   4404 wm_reset(struct wm_softc *sc)
   4405 {
   4406 	int phy_reset = 0;
   4407 	int i, error = 0;
   4408 	uint32_t reg;
   4409 	uint16_t kmreg;
   4410 	int rv;
   4411 
   4412 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4413 		device_xname(sc->sc_dev), __func__));
   4414 	KASSERT(sc->sc_type != 0);
   4415 
   4416 	/*
   4417 	 * Allocate on-chip memory according to the MTU size.
   4418 	 * The Packet Buffer Allocation register must be written
   4419 	 * before the chip is reset.
   4420 	 */
   4421 	switch (sc->sc_type) {
   4422 	case WM_T_82547:
   4423 	case WM_T_82547_2:
   4424 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4425 		    PBA_22K : PBA_30K;
   4426 		for (i = 0; i < sc->sc_nqueues; i++) {
   4427 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4428 			txq->txq_fifo_head = 0;
   4429 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4430 			txq->txq_fifo_size =
   4431 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4432 			txq->txq_fifo_stall = 0;
   4433 		}
   4434 		break;
   4435 	case WM_T_82571:
   4436 	case WM_T_82572:
   4437 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4438 	case WM_T_80003:
   4439 		sc->sc_pba = PBA_32K;
   4440 		break;
   4441 	case WM_T_82573:
   4442 		sc->sc_pba = PBA_12K;
   4443 		break;
   4444 	case WM_T_82574:
   4445 	case WM_T_82583:
   4446 		sc->sc_pba = PBA_20K;
   4447 		break;
   4448 	case WM_T_82576:
   4449 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4450 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4451 		break;
   4452 	case WM_T_82580:
   4453 	case WM_T_I350:
   4454 	case WM_T_I354:
   4455 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4456 		break;
   4457 	case WM_T_I210:
   4458 	case WM_T_I211:
   4459 		sc->sc_pba = PBA_34K;
   4460 		break;
   4461 	case WM_T_ICH8:
   4462 		/* Workaround for a bit corruption issue in FIFO memory */
   4463 		sc->sc_pba = PBA_8K;
   4464 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4465 		break;
   4466 	case WM_T_ICH9:
   4467 	case WM_T_ICH10:
   4468 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4469 		    PBA_14K : PBA_10K;
   4470 		break;
   4471 	case WM_T_PCH:
   4472 	case WM_T_PCH2:	/* XXX 14K? */
   4473 	case WM_T_PCH_LPT:
   4474 	case WM_T_PCH_SPT:
   4475 	case WM_T_PCH_CNP:
   4476 		sc->sc_pba = PBA_26K;
   4477 		break;
   4478 	default:
   4479 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4480 		    PBA_40K : PBA_48K;
   4481 		break;
   4482 	}
   4483 	/*
   4484 	 * Only old or non-multiqueue devices have the PBA register
   4485 	 * XXX Need special handling for 82575.
   4486 	 */
   4487 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4488 	    || (sc->sc_type == WM_T_82575))
   4489 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4490 
   4491 	/* Prevent the PCI-E bus from sticking */
   4492 	if (sc->sc_flags & WM_F_PCIE) {
   4493 		int timeout = 800;
   4494 
   4495 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4496 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4497 
   4498 		while (timeout--) {
   4499 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4500 			    == 0)
   4501 				break;
   4502 			delay(100);
   4503 		}
   4504 		if (timeout == 0)
   4505 			device_printf(sc->sc_dev,
   4506 			    "failed to disable busmastering\n");
   4507 	}
   4508 
   4509 	/* Set the completion timeout for interface */
   4510 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4511 	    || (sc->sc_type == WM_T_82580)
   4512 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4513 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4514 		wm_set_pcie_completion_timeout(sc);
   4515 
   4516 	/* Clear interrupt */
   4517 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4518 	if (wm_is_using_msix(sc)) {
   4519 		if (sc->sc_type != WM_T_82574) {
   4520 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4521 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4522 		} else
   4523 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4524 	}
   4525 
   4526 	/* Stop the transmit and receive processes. */
   4527 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4528 	sc->sc_rctl &= ~RCTL_EN;
   4529 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4530 	CSR_WRITE_FLUSH(sc);
   4531 
   4532 	/* XXX set_tbi_sbp_82543() */
   4533 
   4534 	delay(10*1000);
   4535 
   4536 	/* Must acquire the MDIO ownership before MAC reset */
   4537 	switch (sc->sc_type) {
   4538 	case WM_T_82573:
   4539 	case WM_T_82574:
   4540 	case WM_T_82583:
   4541 		error = wm_get_hw_semaphore_82573(sc);
   4542 		break;
   4543 	default:
   4544 		break;
   4545 	}
   4546 
   4547 	/*
   4548 	 * 82541 Errata 29? & 82547 Errata 28?
   4549 	 * See also the description about PHY_RST bit in CTRL register
   4550 	 * in 8254x_GBe_SDM.pdf.
   4551 	 */
   4552 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4553 		CSR_WRITE(sc, WMREG_CTRL,
   4554 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4555 		CSR_WRITE_FLUSH(sc);
   4556 		delay(5000);
   4557 	}
   4558 
   4559 	switch (sc->sc_type) {
   4560 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4561 	case WM_T_82541:
   4562 	case WM_T_82541_2:
   4563 	case WM_T_82547:
   4564 	case WM_T_82547_2:
   4565 		/*
   4566 		 * On some chipsets, a reset through a memory-mapped write
   4567 		 * cycle can cause the chip to reset before completing the
   4568 		 * write cycle. This causes major headache that can be avoided
   4569 		 * by issuing the reset via indirect register writes through
   4570 		 * I/O space.
   4571 		 *
   4572 		 * So, if we successfully mapped the I/O BAR at attach time,
   4573 		 * use that. Otherwise, try our luck with a memory-mapped
   4574 		 * reset.
   4575 		 */
   4576 		if (sc->sc_flags & WM_F_IOH_VALID)
   4577 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4578 		else
   4579 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4580 		break;
   4581 	case WM_T_82545_3:
   4582 	case WM_T_82546_3:
   4583 		/* Use the shadow control register on these chips. */
   4584 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4585 		break;
   4586 	case WM_T_80003:
   4587 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4588 		sc->phy.acquire(sc);
   4589 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4590 		sc->phy.release(sc);
   4591 		break;
   4592 	case WM_T_ICH8:
   4593 	case WM_T_ICH9:
   4594 	case WM_T_ICH10:
   4595 	case WM_T_PCH:
   4596 	case WM_T_PCH2:
   4597 	case WM_T_PCH_LPT:
   4598 	case WM_T_PCH_SPT:
   4599 	case WM_T_PCH_CNP:
   4600 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4601 		if (wm_phy_resetisblocked(sc) == false) {
   4602 			/*
   4603 			 * Gate automatic PHY configuration by hardware on
   4604 			 * non-managed 82579
   4605 			 */
   4606 			if ((sc->sc_type == WM_T_PCH2)
   4607 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4608 				== 0))
   4609 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4610 
   4611 			reg |= CTRL_PHY_RESET;
   4612 			phy_reset = 1;
   4613 		} else
   4614 			printf("XXX reset is blocked!!!\n");
   4615 		sc->phy.acquire(sc);
   4616 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4617 		/* Don't insert a completion barrier when reset */
   4618 		delay(20*1000);
   4619 		mutex_exit(sc->sc_ich_phymtx);
   4620 		break;
   4621 	case WM_T_82580:
   4622 	case WM_T_I350:
   4623 	case WM_T_I354:
   4624 	case WM_T_I210:
   4625 	case WM_T_I211:
   4626 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4627 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4628 			CSR_WRITE_FLUSH(sc);
   4629 		delay(5000);
   4630 		break;
   4631 	case WM_T_82542_2_0:
   4632 	case WM_T_82542_2_1:
   4633 	case WM_T_82543:
   4634 	case WM_T_82540:
   4635 	case WM_T_82545:
   4636 	case WM_T_82546:
   4637 	case WM_T_82571:
   4638 	case WM_T_82572:
   4639 	case WM_T_82573:
   4640 	case WM_T_82574:
   4641 	case WM_T_82575:
   4642 	case WM_T_82576:
   4643 	case WM_T_82583:
   4644 	default:
   4645 		/* Everything else can safely use the documented method. */
   4646 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4647 		break;
   4648 	}
   4649 
   4650 	/* Must release the MDIO ownership after MAC reset */
   4651 	switch (sc->sc_type) {
   4652 	case WM_T_82573:
   4653 	case WM_T_82574:
   4654 	case WM_T_82583:
   4655 		if (error == 0)
   4656 			wm_put_hw_semaphore_82573(sc);
   4657 		break;
   4658 	default:
   4659 		break;
   4660 	}
   4661 
   4662 	/* Set Phy Config Counter to 50msec */
   4663 	if (sc->sc_type == WM_T_PCH2) {
   4664 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   4665 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   4666 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   4667 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   4668 	}
   4669 
   4670 	if (phy_reset != 0)
   4671 		wm_get_cfg_done(sc);
   4672 
   4673 	/* reload EEPROM */
   4674 	switch (sc->sc_type) {
   4675 	case WM_T_82542_2_0:
   4676 	case WM_T_82542_2_1:
   4677 	case WM_T_82543:
   4678 	case WM_T_82544:
   4679 		delay(10);
   4680 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4681 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4682 		CSR_WRITE_FLUSH(sc);
   4683 		delay(2000);
   4684 		break;
   4685 	case WM_T_82540:
   4686 	case WM_T_82545:
   4687 	case WM_T_82545_3:
   4688 	case WM_T_82546:
   4689 	case WM_T_82546_3:
   4690 		delay(5*1000);
   4691 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4692 		break;
   4693 	case WM_T_82541:
   4694 	case WM_T_82541_2:
   4695 	case WM_T_82547:
   4696 	case WM_T_82547_2:
   4697 		delay(20000);
   4698 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4699 		break;
   4700 	case WM_T_82571:
   4701 	case WM_T_82572:
   4702 	case WM_T_82573:
   4703 	case WM_T_82574:
   4704 	case WM_T_82583:
   4705 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4706 			delay(10);
   4707 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4708 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4709 			CSR_WRITE_FLUSH(sc);
   4710 		}
   4711 		/* check EECD_EE_AUTORD */
   4712 		wm_get_auto_rd_done(sc);
   4713 		/*
   4714 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4715 		 * is set.
   4716 		 */
   4717 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4718 		    || (sc->sc_type == WM_T_82583))
   4719 			delay(25*1000);
   4720 		break;
   4721 	case WM_T_82575:
   4722 	case WM_T_82576:
   4723 	case WM_T_82580:
   4724 	case WM_T_I350:
   4725 	case WM_T_I354:
   4726 	case WM_T_I210:
   4727 	case WM_T_I211:
   4728 	case WM_T_80003:
   4729 		/* check EECD_EE_AUTORD */
   4730 		wm_get_auto_rd_done(sc);
   4731 		break;
   4732 	case WM_T_ICH8:
   4733 	case WM_T_ICH9:
   4734 	case WM_T_ICH10:
   4735 	case WM_T_PCH:
   4736 	case WM_T_PCH2:
   4737 	case WM_T_PCH_LPT:
   4738 	case WM_T_PCH_SPT:
   4739 	case WM_T_PCH_CNP:
   4740 		break;
   4741 	default:
   4742 		panic("%s: unknown type\n", __func__);
   4743 	}
   4744 
   4745 	/* Check whether EEPROM is present or not */
   4746 	switch (sc->sc_type) {
   4747 	case WM_T_82575:
   4748 	case WM_T_82576:
   4749 	case WM_T_82580:
   4750 	case WM_T_I350:
   4751 	case WM_T_I354:
   4752 	case WM_T_ICH8:
   4753 	case WM_T_ICH9:
   4754 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4755 			/* Not found */
   4756 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4757 			if (sc->sc_type == WM_T_82575)
   4758 				wm_reset_init_script_82575(sc);
   4759 		}
   4760 		break;
   4761 	default:
   4762 		break;
   4763 	}
   4764 
   4765 	if (phy_reset != 0)
   4766 		wm_phy_post_reset(sc);
   4767 
   4768 	if ((sc->sc_type == WM_T_82580)
   4769 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4770 		/* clear global device reset status bit */
   4771 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4772 	}
   4773 
   4774 	/* Clear any pending interrupt events. */
   4775 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4776 	reg = CSR_READ(sc, WMREG_ICR);
   4777 	if (wm_is_using_msix(sc)) {
   4778 		if (sc->sc_type != WM_T_82574) {
   4779 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4780 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4781 		} else
   4782 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4783 	}
   4784 
   4785 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4786 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4787 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4788 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4789 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4790 		reg |= KABGTXD_BGSQLBIAS;
   4791 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4792 	}
   4793 
   4794 	/* reload sc_ctrl */
   4795 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4796 
   4797 	if (sc->sc_type == WM_T_I354) {
   4798 #if 0
   4799 		/* I354 uses an external PHY */
   4800 		wm_set_eee_i354(sc);
   4801 #endif
   4802 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4803 		wm_set_eee_i350(sc);
   4804 
   4805 	/*
   4806 	 * For PCH, this write will make sure that any noise will be detected
   4807 	 * as a CRC error and be dropped rather than show up as a bad packet
   4808 	 * to the DMA engine
   4809 	 */
   4810 	if (sc->sc_type == WM_T_PCH)
   4811 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4812 
   4813 	if (sc->sc_type >= WM_T_82544)
   4814 		CSR_WRITE(sc, WMREG_WUC, 0);
   4815 
   4816 	wm_reset_mdicnfg_82580(sc);
   4817 
   4818 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4819 		wm_pll_workaround_i210(sc);
   4820 
   4821 	if (sc->sc_type == WM_T_80003) {
   4822 		/* default to TRUE to enable the MDIC W/A */
   4823 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   4824 
   4825 		rv = wm_kmrn_readreg(sc,
   4826 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   4827 		if (rv == 0) {
   4828 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   4829 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   4830 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   4831 			else
   4832 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   4833 		}
   4834 	}
   4835 }
   4836 
   4837 /*
   4838  * wm_add_rxbuf:
   4839  *
   4840  *	Add a receive buffer to the indiciated descriptor.
   4841  */
   4842 static int
   4843 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4844 {
   4845 	struct wm_softc *sc = rxq->rxq_sc;
   4846 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4847 	struct mbuf *m;
   4848 	int error;
   4849 
   4850 	KASSERT(mutex_owned(rxq->rxq_lock));
   4851 
   4852 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4853 	if (m == NULL)
   4854 		return ENOBUFS;
   4855 
   4856 	MCLGET(m, M_DONTWAIT);
   4857 	if ((m->m_flags & M_EXT) == 0) {
   4858 		m_freem(m);
   4859 		return ENOBUFS;
   4860 	}
   4861 
   4862 	if (rxs->rxs_mbuf != NULL)
   4863 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4864 
   4865 	rxs->rxs_mbuf = m;
   4866 
   4867 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4868 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4869 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4870 	if (error) {
   4871 		/* XXX XXX XXX */
   4872 		aprint_error_dev(sc->sc_dev,
   4873 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   4874 		panic("wm_add_rxbuf");
   4875 	}
   4876 
   4877 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4878 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4879 
   4880 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4881 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4882 			wm_init_rxdesc(rxq, idx);
   4883 	} else
   4884 		wm_init_rxdesc(rxq, idx);
   4885 
   4886 	return 0;
   4887 }
   4888 
   4889 /*
   4890  * wm_rxdrain:
   4891  *
   4892  *	Drain the receive queue.
   4893  */
   4894 static void
   4895 wm_rxdrain(struct wm_rxqueue *rxq)
   4896 {
   4897 	struct wm_softc *sc = rxq->rxq_sc;
   4898 	struct wm_rxsoft *rxs;
   4899 	int i;
   4900 
   4901 	KASSERT(mutex_owned(rxq->rxq_lock));
   4902 
   4903 	for (i = 0; i < WM_NRXDESC; i++) {
   4904 		rxs = &rxq->rxq_soft[i];
   4905 		if (rxs->rxs_mbuf != NULL) {
   4906 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4907 			m_freem(rxs->rxs_mbuf);
   4908 			rxs->rxs_mbuf = NULL;
   4909 		}
   4910 	}
   4911 }
   4912 
   4913 /*
   4914  * Setup registers for RSS.
   4915  *
   4916  * XXX not yet VMDq support
   4917  */
   4918 static void
   4919 wm_init_rss(struct wm_softc *sc)
   4920 {
   4921 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4922 	int i;
   4923 
   4924 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   4925 
   4926 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4927 		int qid, reta_ent;
   4928 
   4929 		qid  = i % sc->sc_nqueues;
   4930 		switch (sc->sc_type) {
   4931 		case WM_T_82574:
   4932 			reta_ent = __SHIFTIN(qid,
   4933 			    RETA_ENT_QINDEX_MASK_82574);
   4934 			break;
   4935 		case WM_T_82575:
   4936 			reta_ent = __SHIFTIN(qid,
   4937 			    RETA_ENT_QINDEX1_MASK_82575);
   4938 			break;
   4939 		default:
   4940 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4941 			break;
   4942 		}
   4943 
   4944 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4945 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4946 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4947 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4948 	}
   4949 
   4950 	rss_getkey((uint8_t *)rss_key);
   4951 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4952 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4953 
   4954 	if (sc->sc_type == WM_T_82574)
   4955 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4956 	else
   4957 		mrqc = MRQC_ENABLE_RSS_MQ;
   4958 
   4959 	/*
   4960 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4961 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4962 	 */
   4963 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4964 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4965 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4966 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4967 
   4968 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4969 }
   4970 
   4971 /*
   4972  * Adjust TX and RX queue numbers which the system actulally uses.
   4973  *
   4974  * The numbers are affected by below parameters.
   4975  *     - The nubmer of hardware queues
   4976  *     - The number of MSI-X vectors (= "nvectors" argument)
   4977  *     - ncpu
   4978  */
   4979 static void
   4980 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4981 {
   4982 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4983 
   4984 	if (nvectors < 2) {
   4985 		sc->sc_nqueues = 1;
   4986 		return;
   4987 	}
   4988 
   4989 	switch (sc->sc_type) {
   4990 	case WM_T_82572:
   4991 		hw_ntxqueues = 2;
   4992 		hw_nrxqueues = 2;
   4993 		break;
   4994 	case WM_T_82574:
   4995 		hw_ntxqueues = 2;
   4996 		hw_nrxqueues = 2;
   4997 		break;
   4998 	case WM_T_82575:
   4999 		hw_ntxqueues = 4;
   5000 		hw_nrxqueues = 4;
   5001 		break;
   5002 	case WM_T_82576:
   5003 		hw_ntxqueues = 16;
   5004 		hw_nrxqueues = 16;
   5005 		break;
   5006 	case WM_T_82580:
   5007 	case WM_T_I350:
   5008 	case WM_T_I354:
   5009 		hw_ntxqueues = 8;
   5010 		hw_nrxqueues = 8;
   5011 		break;
   5012 	case WM_T_I210:
   5013 		hw_ntxqueues = 4;
   5014 		hw_nrxqueues = 4;
   5015 		break;
   5016 	case WM_T_I211:
   5017 		hw_ntxqueues = 2;
   5018 		hw_nrxqueues = 2;
   5019 		break;
   5020 		/*
   5021 		 * As below ethernet controllers does not support MSI-X,
   5022 		 * this driver let them not use multiqueue.
   5023 		 *     - WM_T_80003
   5024 		 *     - WM_T_ICH8
   5025 		 *     - WM_T_ICH9
   5026 		 *     - WM_T_ICH10
   5027 		 *     - WM_T_PCH
   5028 		 *     - WM_T_PCH2
   5029 		 *     - WM_T_PCH_LPT
   5030 		 */
   5031 	default:
   5032 		hw_ntxqueues = 1;
   5033 		hw_nrxqueues = 1;
   5034 		break;
   5035 	}
   5036 
   5037 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5038 
   5039 	/*
   5040 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5041 	 * the number of queues used actually.
   5042 	 */
   5043 	if (nvectors < hw_nqueues + 1)
   5044 		sc->sc_nqueues = nvectors - 1;
   5045 	else
   5046 		sc->sc_nqueues = hw_nqueues;
   5047 
   5048 	/*
   5049 	 * As queues more then cpus cannot improve scaling, we limit
   5050 	 * the number of queues used actually.
   5051 	 */
   5052 	if (ncpu < sc->sc_nqueues)
   5053 		sc->sc_nqueues = ncpu;
   5054 }
   5055 
   5056 static inline bool
   5057 wm_is_using_msix(struct wm_softc *sc)
   5058 {
   5059 
   5060 	return (sc->sc_nintrs > 1);
   5061 }
   5062 
   5063 static inline bool
   5064 wm_is_using_multiqueue(struct wm_softc *sc)
   5065 {
   5066 
   5067 	return (sc->sc_nqueues > 1);
   5068 }
   5069 
   5070 static int
   5071 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5072 {
   5073 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5074 	wmq->wmq_id = qidx;
   5075 	wmq->wmq_intr_idx = intr_idx;
   5076 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5077 #ifdef WM_MPSAFE
   5078 	    | SOFTINT_MPSAFE
   5079 #endif
   5080 	    , wm_handle_queue, wmq);
   5081 	if (wmq->wmq_si != NULL)
   5082 		return 0;
   5083 
   5084 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5085 	    wmq->wmq_id);
   5086 
   5087 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5088 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5089 	return ENOMEM;
   5090 }
   5091 
   5092 /*
   5093  * Both single interrupt MSI and INTx can use this function.
   5094  */
   5095 static int
   5096 wm_setup_legacy(struct wm_softc *sc)
   5097 {
   5098 	pci_chipset_tag_t pc = sc->sc_pc;
   5099 	const char *intrstr = NULL;
   5100 	char intrbuf[PCI_INTRSTR_LEN];
   5101 	int error;
   5102 
   5103 	error = wm_alloc_txrx_queues(sc);
   5104 	if (error) {
   5105 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5106 		    error);
   5107 		return ENOMEM;
   5108 	}
   5109 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5110 	    sizeof(intrbuf));
   5111 #ifdef WM_MPSAFE
   5112 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5113 #endif
   5114 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5115 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5116 	if (sc->sc_ihs[0] == NULL) {
   5117 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5118 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5119 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5120 		return ENOMEM;
   5121 	}
   5122 
   5123 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5124 	sc->sc_nintrs = 1;
   5125 
   5126 	return wm_softint_establish(sc, 0, 0);
   5127 }
   5128 
   5129 static int
   5130 wm_setup_msix(struct wm_softc *sc)
   5131 {
   5132 	void *vih;
   5133 	kcpuset_t *affinity;
   5134 	int qidx, error, intr_idx, txrx_established;
   5135 	pci_chipset_tag_t pc = sc->sc_pc;
   5136 	const char *intrstr = NULL;
   5137 	char intrbuf[PCI_INTRSTR_LEN];
   5138 	char intr_xname[INTRDEVNAMEBUF];
   5139 
   5140 	if (sc->sc_nqueues < ncpu) {
   5141 		/*
   5142 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5143 		 * interrupts start from CPU#1.
   5144 		 */
   5145 		sc->sc_affinity_offset = 1;
   5146 	} else {
   5147 		/*
   5148 		 * In this case, this device use all CPUs. So, we unify
   5149 		 * affinitied cpu_index to msix vector number for readability.
   5150 		 */
   5151 		sc->sc_affinity_offset = 0;
   5152 	}
   5153 
   5154 	error = wm_alloc_txrx_queues(sc);
   5155 	if (error) {
   5156 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5157 		    error);
   5158 		return ENOMEM;
   5159 	}
   5160 
   5161 	kcpuset_create(&affinity, false);
   5162 	intr_idx = 0;
   5163 
   5164 	/*
   5165 	 * TX and RX
   5166 	 */
   5167 	txrx_established = 0;
   5168 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5169 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5170 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5171 
   5172 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5173 		    sizeof(intrbuf));
   5174 #ifdef WM_MPSAFE
   5175 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5176 		    PCI_INTR_MPSAFE, true);
   5177 #endif
   5178 		memset(intr_xname, 0, sizeof(intr_xname));
   5179 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5180 		    device_xname(sc->sc_dev), qidx);
   5181 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5182 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5183 		if (vih == NULL) {
   5184 			aprint_error_dev(sc->sc_dev,
   5185 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5186 			    intrstr ? " at " : "",
   5187 			    intrstr ? intrstr : "");
   5188 
   5189 			goto fail;
   5190 		}
   5191 		kcpuset_zero(affinity);
   5192 		/* Round-robin affinity */
   5193 		kcpuset_set(affinity, affinity_to);
   5194 		error = interrupt_distribute(vih, affinity, NULL);
   5195 		if (error == 0) {
   5196 			aprint_normal_dev(sc->sc_dev,
   5197 			    "for TX and RX interrupting at %s affinity to %u\n",
   5198 			    intrstr, affinity_to);
   5199 		} else {
   5200 			aprint_normal_dev(sc->sc_dev,
   5201 			    "for TX and RX interrupting at %s\n", intrstr);
   5202 		}
   5203 		sc->sc_ihs[intr_idx] = vih;
   5204 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5205 			goto fail;
   5206 		txrx_established++;
   5207 		intr_idx++;
   5208 	}
   5209 
   5210 	/*
   5211 	 * LINK
   5212 	 */
   5213 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5214 	    sizeof(intrbuf));
   5215 #ifdef WM_MPSAFE
   5216 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5217 #endif
   5218 	memset(intr_xname, 0, sizeof(intr_xname));
   5219 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5220 	    device_xname(sc->sc_dev));
   5221 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5222 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5223 	if (vih == NULL) {
   5224 		aprint_error_dev(sc->sc_dev,
   5225 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5226 		    intrstr ? " at " : "",
   5227 		    intrstr ? intrstr : "");
   5228 
   5229 		goto fail;
   5230 	}
   5231 	/* keep default affinity to LINK interrupt */
   5232 	aprint_normal_dev(sc->sc_dev,
   5233 	    "for LINK interrupting at %s\n", intrstr);
   5234 	sc->sc_ihs[intr_idx] = vih;
   5235 	sc->sc_link_intr_idx = intr_idx;
   5236 
   5237 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5238 	kcpuset_destroy(affinity);
   5239 	return 0;
   5240 
   5241  fail:
   5242 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5243 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5244 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5245 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5246 	}
   5247 
   5248 	kcpuset_destroy(affinity);
   5249 	return ENOMEM;
   5250 }
   5251 
   5252 static void
   5253 wm_unset_stopping_flags(struct wm_softc *sc)
   5254 {
   5255 	int i;
   5256 
   5257 	KASSERT(WM_CORE_LOCKED(sc));
   5258 
   5259 	/*
   5260 	 * must unset stopping flags in ascending order.
   5261 	 */
   5262 	for (i = 0; i < sc->sc_nqueues; i++) {
   5263 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5264 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5265 
   5266 		mutex_enter(txq->txq_lock);
   5267 		txq->txq_stopping = false;
   5268 		mutex_exit(txq->txq_lock);
   5269 
   5270 		mutex_enter(rxq->rxq_lock);
   5271 		rxq->rxq_stopping = false;
   5272 		mutex_exit(rxq->rxq_lock);
   5273 	}
   5274 
   5275 	sc->sc_core_stopping = false;
   5276 }
   5277 
   5278 static void
   5279 wm_set_stopping_flags(struct wm_softc *sc)
   5280 {
   5281 	int i;
   5282 
   5283 	KASSERT(WM_CORE_LOCKED(sc));
   5284 
   5285 	sc->sc_core_stopping = true;
   5286 
   5287 	/*
   5288 	 * must set stopping flags in ascending order.
   5289 	 */
   5290 	for (i = 0; i < sc->sc_nqueues; i++) {
   5291 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5292 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5293 
   5294 		mutex_enter(rxq->rxq_lock);
   5295 		rxq->rxq_stopping = true;
   5296 		mutex_exit(rxq->rxq_lock);
   5297 
   5298 		mutex_enter(txq->txq_lock);
   5299 		txq->txq_stopping = true;
   5300 		mutex_exit(txq->txq_lock);
   5301 	}
   5302 }
   5303 
   5304 /*
   5305  * write interrupt interval value to ITR or EITR
   5306  */
   5307 static void
   5308 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5309 {
   5310 
   5311 	if (!wmq->wmq_set_itr)
   5312 		return;
   5313 
   5314 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5315 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5316 
   5317 		/*
   5318 		 * 82575 doesn't have CNT_INGR field.
   5319 		 * So, overwrite counter field by software.
   5320 		 */
   5321 		if (sc->sc_type == WM_T_82575)
   5322 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5323 		else
   5324 			eitr |= EITR_CNT_INGR;
   5325 
   5326 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5327 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5328 		/*
   5329 		 * 82574 has both ITR and EITR. SET EITR when we use
   5330 		 * the multi queue function with MSI-X.
   5331 		 */
   5332 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5333 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5334 	} else {
   5335 		KASSERT(wmq->wmq_id == 0);
   5336 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5337 	}
   5338 
   5339 	wmq->wmq_set_itr = false;
   5340 }
   5341 
   5342 /*
   5343  * TODO
   5344  * Below dynamic calculation of itr is almost the same as linux igb,
   5345  * however it does not fit to wm(4). So, we will have been disable AIM
   5346  * until we will find appropriate calculation of itr.
   5347  */
   5348 /*
   5349  * calculate interrupt interval value to be going to write register in
   5350  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5351  */
   5352 static void
   5353 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5354 {
   5355 #ifdef NOTYET
   5356 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5357 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5358 	uint32_t avg_size = 0;
   5359 	uint32_t new_itr;
   5360 
   5361 	if (rxq->rxq_packets)
   5362 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5363 	if (txq->txq_packets)
   5364 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5365 
   5366 	if (avg_size == 0) {
   5367 		new_itr = 450; /* restore default value */
   5368 		goto out;
   5369 	}
   5370 
   5371 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5372 	avg_size += 24;
   5373 
   5374 	/* Don't starve jumbo frames */
   5375 	avg_size = uimin(avg_size, 3000);
   5376 
   5377 	/* Give a little boost to mid-size frames */
   5378 	if ((avg_size > 300) && (avg_size < 1200))
   5379 		new_itr = avg_size / 3;
   5380 	else
   5381 		new_itr = avg_size / 2;
   5382 
   5383 out:
   5384 	/*
   5385 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5386 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5387 	 */
   5388 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5389 		new_itr *= 4;
   5390 
   5391 	if (new_itr != wmq->wmq_itr) {
   5392 		wmq->wmq_itr = new_itr;
   5393 		wmq->wmq_set_itr = true;
   5394 	} else
   5395 		wmq->wmq_set_itr = false;
   5396 
   5397 	rxq->rxq_packets = 0;
   5398 	rxq->rxq_bytes = 0;
   5399 	txq->txq_packets = 0;
   5400 	txq->txq_bytes = 0;
   5401 #endif
   5402 }
   5403 
   5404 /*
   5405  * wm_init:		[ifnet interface function]
   5406  *
   5407  *	Initialize the interface.
   5408  */
   5409 static int
   5410 wm_init(struct ifnet *ifp)
   5411 {
   5412 	struct wm_softc *sc = ifp->if_softc;
   5413 	int ret;
   5414 
   5415 	WM_CORE_LOCK(sc);
   5416 	ret = wm_init_locked(ifp);
   5417 	WM_CORE_UNLOCK(sc);
   5418 
   5419 	return ret;
   5420 }
   5421 
   5422 static int
   5423 wm_init_locked(struct ifnet *ifp)
   5424 {
   5425 	struct wm_softc *sc = ifp->if_softc;
   5426 	int i, j, trynum, error = 0;
   5427 	uint32_t reg;
   5428 
   5429 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5430 		device_xname(sc->sc_dev), __func__));
   5431 	KASSERT(WM_CORE_LOCKED(sc));
   5432 
   5433 	/*
   5434 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5435 	 * There is a small but measurable benefit to avoiding the adjusment
   5436 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5437 	 * on such platforms.  One possibility is that the DMA itself is
   5438 	 * slightly more efficient if the front of the entire packet (instead
   5439 	 * of the front of the headers) is aligned.
   5440 	 *
   5441 	 * Note we must always set align_tweak to 0 if we are using
   5442 	 * jumbo frames.
   5443 	 */
   5444 #ifdef __NO_STRICT_ALIGNMENT
   5445 	sc->sc_align_tweak = 0;
   5446 #else
   5447 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5448 		sc->sc_align_tweak = 0;
   5449 	else
   5450 		sc->sc_align_tweak = 2;
   5451 #endif /* __NO_STRICT_ALIGNMENT */
   5452 
   5453 	/* Cancel any pending I/O. */
   5454 	wm_stop_locked(ifp, 0);
   5455 
   5456 	/* update statistics before reset */
   5457 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5458 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5459 
   5460 	/* PCH_SPT hardware workaround */
   5461 	if (sc->sc_type == WM_T_PCH_SPT)
   5462 		wm_flush_desc_rings(sc);
   5463 
   5464 	/* Reset the chip to a known state. */
   5465 	wm_reset(sc);
   5466 
   5467 	/*
   5468 	 * AMT based hardware can now take control from firmware
   5469 	 * Do this after reset.
   5470 	 */
   5471 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5472 		wm_get_hw_control(sc);
   5473 
   5474 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5475 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5476 		wm_legacy_irq_quirk_spt(sc);
   5477 
   5478 	/* Init hardware bits */
   5479 	wm_initialize_hardware_bits(sc);
   5480 
   5481 	/* Reset the PHY. */
   5482 	if (sc->sc_flags & WM_F_HAS_MII)
   5483 		wm_gmii_reset(sc);
   5484 
   5485 	/* Calculate (E)ITR value */
   5486 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5487 		/*
   5488 		 * For NEWQUEUE's EITR (except for 82575).
   5489 		 * 82575's EITR should be set same throttling value as other
   5490 		 * old controllers' ITR because the interrupt/sec calculation
   5491 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5492 		 *
   5493 		 * 82574's EITR should be set same throttling value as ITR.
   5494 		 *
   5495 		 * For N interrupts/sec, set this value to:
   5496 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5497 		 */
   5498 		sc->sc_itr_init = 450;
   5499 	} else if (sc->sc_type >= WM_T_82543) {
   5500 		/*
   5501 		 * Set up the interrupt throttling register (units of 256ns)
   5502 		 * Note that a footnote in Intel's documentation says this
   5503 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5504 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5505 		 * that that is also true for the 1024ns units of the other
   5506 		 * interrupt-related timer registers -- so, really, we ought
   5507 		 * to divide this value by 4 when the link speed is low.
   5508 		 *
   5509 		 * XXX implement this division at link speed change!
   5510 		 */
   5511 
   5512 		/*
   5513 		 * For N interrupts/sec, set this value to:
   5514 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5515 		 * absolute and packet timer values to this value
   5516 		 * divided by 4 to get "simple timer" behavior.
   5517 		 */
   5518 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5519 	}
   5520 
   5521 	error = wm_init_txrx_queues(sc);
   5522 	if (error)
   5523 		goto out;
   5524 
   5525 	/*
   5526 	 * Clear out the VLAN table -- we don't use it (yet).
   5527 	 */
   5528 	CSR_WRITE(sc, WMREG_VET, 0);
   5529 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5530 		trynum = 10; /* Due to hw errata */
   5531 	else
   5532 		trynum = 1;
   5533 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5534 		for (j = 0; j < trynum; j++)
   5535 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5536 
   5537 	/*
   5538 	 * Set up flow-control parameters.
   5539 	 *
   5540 	 * XXX Values could probably stand some tuning.
   5541 	 */
   5542 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5543 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5544 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5545 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5546 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5547 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5548 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5549 	}
   5550 
   5551 	sc->sc_fcrtl = FCRTL_DFLT;
   5552 	if (sc->sc_type < WM_T_82543) {
   5553 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5554 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5555 	} else {
   5556 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5557 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5558 	}
   5559 
   5560 	if (sc->sc_type == WM_T_80003)
   5561 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5562 	else
   5563 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5564 
   5565 	/* Writes the control register. */
   5566 	wm_set_vlan(sc);
   5567 
   5568 	if (sc->sc_flags & WM_F_HAS_MII) {
   5569 		uint16_t kmreg;
   5570 
   5571 		switch (sc->sc_type) {
   5572 		case WM_T_80003:
   5573 		case WM_T_ICH8:
   5574 		case WM_T_ICH9:
   5575 		case WM_T_ICH10:
   5576 		case WM_T_PCH:
   5577 		case WM_T_PCH2:
   5578 		case WM_T_PCH_LPT:
   5579 		case WM_T_PCH_SPT:
   5580 		case WM_T_PCH_CNP:
   5581 			/*
   5582 			 * Set the mac to wait the maximum time between each
   5583 			 * iteration and increase the max iterations when
   5584 			 * polling the phy; this fixes erroneous timeouts at
   5585 			 * 10Mbps.
   5586 			 */
   5587 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5588 			    0xFFFF);
   5589 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5590 			    &kmreg);
   5591 			kmreg |= 0x3F;
   5592 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5593 			    kmreg);
   5594 			break;
   5595 		default:
   5596 			break;
   5597 		}
   5598 
   5599 		if (sc->sc_type == WM_T_80003) {
   5600 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5601 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5602 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5603 
   5604 			/* Bypass RX and TX FIFO's */
   5605 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5606 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5607 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5608 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5609 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5610 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5611 		}
   5612 	}
   5613 #if 0
   5614 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5615 #endif
   5616 
   5617 	/* Set up checksum offload parameters. */
   5618 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5619 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5620 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5621 		reg |= RXCSUM_IPOFL;
   5622 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5623 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5624 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5625 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5626 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5627 
   5628 	/* Set registers about MSI-X */
   5629 	if (wm_is_using_msix(sc)) {
   5630 		uint32_t ivar;
   5631 		struct wm_queue *wmq;
   5632 		int qid, qintr_idx;
   5633 
   5634 		if (sc->sc_type == WM_T_82575) {
   5635 			/* Interrupt control */
   5636 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5637 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5638 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5639 
   5640 			/* TX and RX */
   5641 			for (i = 0; i < sc->sc_nqueues; i++) {
   5642 				wmq = &sc->sc_queue[i];
   5643 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5644 				    EITR_TX_QUEUE(wmq->wmq_id)
   5645 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5646 			}
   5647 			/* Link status */
   5648 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5649 			    EITR_OTHER);
   5650 		} else if (sc->sc_type == WM_T_82574) {
   5651 			/* Interrupt control */
   5652 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5653 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5654 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5655 
   5656 			/*
   5657 			 * workaround issue with spurious interrupts
   5658 			 * in MSI-X mode.
   5659 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5660 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5661 			 */
   5662 			reg = CSR_READ(sc, WMREG_RFCTL);
   5663 			reg |= WMREG_RFCTL_ACKDIS;
   5664 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5665 
   5666 			ivar = 0;
   5667 			/* TX and RX */
   5668 			for (i = 0; i < sc->sc_nqueues; i++) {
   5669 				wmq = &sc->sc_queue[i];
   5670 				qid = wmq->wmq_id;
   5671 				qintr_idx = wmq->wmq_intr_idx;
   5672 
   5673 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5674 				    IVAR_TX_MASK_Q_82574(qid));
   5675 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5676 				    IVAR_RX_MASK_Q_82574(qid));
   5677 			}
   5678 			/* Link status */
   5679 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5680 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5681 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5682 		} else {
   5683 			/* Interrupt control */
   5684 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5685 			    | GPIE_EIAME | GPIE_PBA);
   5686 
   5687 			switch (sc->sc_type) {
   5688 			case WM_T_82580:
   5689 			case WM_T_I350:
   5690 			case WM_T_I354:
   5691 			case WM_T_I210:
   5692 			case WM_T_I211:
   5693 				/* TX and RX */
   5694 				for (i = 0; i < sc->sc_nqueues; i++) {
   5695 					wmq = &sc->sc_queue[i];
   5696 					qid = wmq->wmq_id;
   5697 					qintr_idx = wmq->wmq_intr_idx;
   5698 
   5699 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5700 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5701 					ivar |= __SHIFTIN((qintr_idx
   5702 						| IVAR_VALID),
   5703 					    IVAR_TX_MASK_Q(qid));
   5704 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5705 					ivar |= __SHIFTIN((qintr_idx
   5706 						| IVAR_VALID),
   5707 					    IVAR_RX_MASK_Q(qid));
   5708 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5709 				}
   5710 				break;
   5711 			case WM_T_82576:
   5712 				/* TX and RX */
   5713 				for (i = 0; i < sc->sc_nqueues; i++) {
   5714 					wmq = &sc->sc_queue[i];
   5715 					qid = wmq->wmq_id;
   5716 					qintr_idx = wmq->wmq_intr_idx;
   5717 
   5718 					ivar = CSR_READ(sc,
   5719 					    WMREG_IVAR_Q_82576(qid));
   5720 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5721 					ivar |= __SHIFTIN((qintr_idx
   5722 						| IVAR_VALID),
   5723 					    IVAR_TX_MASK_Q_82576(qid));
   5724 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5725 					ivar |= __SHIFTIN((qintr_idx
   5726 						| IVAR_VALID),
   5727 					    IVAR_RX_MASK_Q_82576(qid));
   5728 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5729 					    ivar);
   5730 				}
   5731 				break;
   5732 			default:
   5733 				break;
   5734 			}
   5735 
   5736 			/* Link status */
   5737 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5738 			    IVAR_MISC_OTHER);
   5739 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5740 		}
   5741 
   5742 		if (wm_is_using_multiqueue(sc)) {
   5743 			wm_init_rss(sc);
   5744 
   5745 			/*
   5746 			** NOTE: Receive Full-Packet Checksum Offload
   5747 			** is mutually exclusive with Multiqueue. However
   5748 			** this is not the same as TCP/IP checksums which
   5749 			** still work.
   5750 			*/
   5751 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5752 			reg |= RXCSUM_PCSD;
   5753 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5754 		}
   5755 	}
   5756 
   5757 	/* Set up the interrupt registers. */
   5758 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5759 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5760 	    ICR_RXO | ICR_RXT0;
   5761 	if (wm_is_using_msix(sc)) {
   5762 		uint32_t mask;
   5763 		struct wm_queue *wmq;
   5764 
   5765 		switch (sc->sc_type) {
   5766 		case WM_T_82574:
   5767 			mask = 0;
   5768 			for (i = 0; i < sc->sc_nqueues; i++) {
   5769 				wmq = &sc->sc_queue[i];
   5770 				mask |= ICR_TXQ(wmq->wmq_id);
   5771 				mask |= ICR_RXQ(wmq->wmq_id);
   5772 			}
   5773 			mask |= ICR_OTHER;
   5774 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5775 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5776 			break;
   5777 		default:
   5778 			if (sc->sc_type == WM_T_82575) {
   5779 				mask = 0;
   5780 				for (i = 0; i < sc->sc_nqueues; i++) {
   5781 					wmq = &sc->sc_queue[i];
   5782 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5783 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5784 				}
   5785 				mask |= EITR_OTHER;
   5786 			} else {
   5787 				mask = 0;
   5788 				for (i = 0; i < sc->sc_nqueues; i++) {
   5789 					wmq = &sc->sc_queue[i];
   5790 					mask |= 1 << wmq->wmq_intr_idx;
   5791 				}
   5792 				mask |= 1 << sc->sc_link_intr_idx;
   5793 			}
   5794 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5795 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5796 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5797 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5798 			break;
   5799 		}
   5800 	} else
   5801 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5802 
   5803 	/* Set up the inter-packet gap. */
   5804 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5805 
   5806 	if (sc->sc_type >= WM_T_82543) {
   5807 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5808 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5809 			wm_itrs_writereg(sc, wmq);
   5810 		}
   5811 		/*
   5812 		 * Link interrupts occur much less than TX
   5813 		 * interrupts and RX interrupts. So, we don't
   5814 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5815 		 * FreeBSD's if_igb.
   5816 		 */
   5817 	}
   5818 
   5819 	/* Set the VLAN ethernetype. */
   5820 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5821 
   5822 	/*
   5823 	 * Set up the transmit control register; we start out with
   5824 	 * a collision distance suitable for FDX, but update it whe
   5825 	 * we resolve the media type.
   5826 	 */
   5827 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5828 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5829 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5830 	if (sc->sc_type >= WM_T_82571)
   5831 		sc->sc_tctl |= TCTL_MULR;
   5832 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5833 
   5834 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5835 		/* Write TDT after TCTL.EN is set. See the document. */
   5836 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5837 	}
   5838 
   5839 	if (sc->sc_type == WM_T_80003) {
   5840 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5841 		reg &= ~TCTL_EXT_GCEX_MASK;
   5842 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5843 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5844 	}
   5845 
   5846 	/* Set the media. */
   5847 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5848 		goto out;
   5849 
   5850 	/* Configure for OS presence */
   5851 	wm_init_manageability(sc);
   5852 
   5853 	/*
   5854 	 * Set up the receive control register; we actually program the
   5855 	 * register when we set the receive filter. Use multicast address
   5856 	 * offset type 0.
   5857 	 *
   5858 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   5859 	 * don't enable that feature.
   5860 	 */
   5861 	sc->sc_mchash_type = 0;
   5862 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5863 	    | RCTL_MO(sc->sc_mchash_type);
   5864 
   5865 	/*
   5866 	 * 82574 use one buffer extended Rx descriptor.
   5867 	 */
   5868 	if (sc->sc_type == WM_T_82574)
   5869 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5870 
   5871 	/*
   5872 	 * The I350 has a bug where it always strips the CRC whether
   5873 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5874 	 */
   5875 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5876 	    || (sc->sc_type == WM_T_I210))
   5877 		sc->sc_rctl |= RCTL_SECRC;
   5878 
   5879 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5880 	    && (ifp->if_mtu > ETHERMTU)) {
   5881 		sc->sc_rctl |= RCTL_LPE;
   5882 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5883 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5884 	}
   5885 
   5886 	if (MCLBYTES == 2048)
   5887 		sc->sc_rctl |= RCTL_2k;
   5888 	else {
   5889 		if (sc->sc_type >= WM_T_82543) {
   5890 			switch (MCLBYTES) {
   5891 			case 4096:
   5892 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5893 				break;
   5894 			case 8192:
   5895 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5896 				break;
   5897 			case 16384:
   5898 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5899 				break;
   5900 			default:
   5901 				panic("wm_init: MCLBYTES %d unsupported",
   5902 				    MCLBYTES);
   5903 				break;
   5904 			}
   5905 		} else
   5906 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   5907 	}
   5908 
   5909 	/* Enable ECC */
   5910 	switch (sc->sc_type) {
   5911 	case WM_T_82571:
   5912 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5913 		reg |= PBA_ECC_CORR_EN;
   5914 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5915 		break;
   5916 	case WM_T_PCH_LPT:
   5917 	case WM_T_PCH_SPT:
   5918 	case WM_T_PCH_CNP:
   5919 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5920 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5921 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5922 
   5923 		sc->sc_ctrl |= CTRL_MEHE;
   5924 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5925 		break;
   5926 	default:
   5927 		break;
   5928 	}
   5929 
   5930 	/*
   5931 	 * Set the receive filter.
   5932 	 *
   5933 	 * For 82575 and 82576, the RX descriptors must be initialized after
   5934 	 * the setting of RCTL.EN in wm_set_filter()
   5935 	 */
   5936 	wm_set_filter(sc);
   5937 
   5938 	/* On 575 and later set RDT only if RX enabled */
   5939 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5940 		int qidx;
   5941 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5942 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5943 			for (i = 0; i < WM_NRXDESC; i++) {
   5944 				mutex_enter(rxq->rxq_lock);
   5945 				wm_init_rxdesc(rxq, i);
   5946 				mutex_exit(rxq->rxq_lock);
   5947 
   5948 			}
   5949 		}
   5950 	}
   5951 
   5952 	wm_unset_stopping_flags(sc);
   5953 
   5954 	/* Start the one second link check clock. */
   5955 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5956 
   5957 	/* ...all done! */
   5958 	ifp->if_flags |= IFF_RUNNING;
   5959 	ifp->if_flags &= ~IFF_OACTIVE;
   5960 
   5961  out:
   5962 	sc->sc_if_flags = ifp->if_flags;
   5963 	if (error)
   5964 		log(LOG_ERR, "%s: interface not running\n",
   5965 		    device_xname(sc->sc_dev));
   5966 	return error;
   5967 }
   5968 
   5969 /*
   5970  * wm_stop:		[ifnet interface function]
   5971  *
   5972  *	Stop transmission on the interface.
   5973  */
   5974 static void
   5975 wm_stop(struct ifnet *ifp, int disable)
   5976 {
   5977 	struct wm_softc *sc = ifp->if_softc;
   5978 
   5979 	WM_CORE_LOCK(sc);
   5980 	wm_stop_locked(ifp, disable);
   5981 	WM_CORE_UNLOCK(sc);
   5982 }
   5983 
   5984 static void
   5985 wm_stop_locked(struct ifnet *ifp, int disable)
   5986 {
   5987 	struct wm_softc *sc = ifp->if_softc;
   5988 	struct wm_txsoft *txs;
   5989 	int i, qidx;
   5990 
   5991 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5992 		device_xname(sc->sc_dev), __func__));
   5993 	KASSERT(WM_CORE_LOCKED(sc));
   5994 
   5995 	wm_set_stopping_flags(sc);
   5996 
   5997 	/* Stop the one second clock. */
   5998 	callout_stop(&sc->sc_tick_ch);
   5999 
   6000 	/* Stop the 82547 Tx FIFO stall check timer. */
   6001 	if (sc->sc_type == WM_T_82547)
   6002 		callout_stop(&sc->sc_txfifo_ch);
   6003 
   6004 	if (sc->sc_flags & WM_F_HAS_MII) {
   6005 		/* Down the MII. */
   6006 		mii_down(&sc->sc_mii);
   6007 	} else {
   6008 #if 0
   6009 		/* Should we clear PHY's status properly? */
   6010 		wm_reset(sc);
   6011 #endif
   6012 	}
   6013 
   6014 	/* Stop the transmit and receive processes. */
   6015 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6016 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6017 	sc->sc_rctl &= ~RCTL_EN;
   6018 
   6019 	/*
   6020 	 * Clear the interrupt mask to ensure the device cannot assert its
   6021 	 * interrupt line.
   6022 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6023 	 * service any currently pending or shared interrupt.
   6024 	 */
   6025 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6026 	sc->sc_icr = 0;
   6027 	if (wm_is_using_msix(sc)) {
   6028 		if (sc->sc_type != WM_T_82574) {
   6029 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6030 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6031 		} else
   6032 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6033 	}
   6034 
   6035 	/* Release any queued transmit buffers. */
   6036 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6037 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6038 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6039 		mutex_enter(txq->txq_lock);
   6040 		txq->txq_sending = false; /* ensure watchdog disabled */
   6041 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6042 			txs = &txq->txq_soft[i];
   6043 			if (txs->txs_mbuf != NULL) {
   6044 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6045 				m_freem(txs->txs_mbuf);
   6046 				txs->txs_mbuf = NULL;
   6047 			}
   6048 		}
   6049 		mutex_exit(txq->txq_lock);
   6050 	}
   6051 
   6052 	/* Mark the interface as down and cancel the watchdog timer. */
   6053 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6054 
   6055 	if (disable) {
   6056 		for (i = 0; i < sc->sc_nqueues; i++) {
   6057 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6058 			mutex_enter(rxq->rxq_lock);
   6059 			wm_rxdrain(rxq);
   6060 			mutex_exit(rxq->rxq_lock);
   6061 		}
   6062 	}
   6063 
   6064 #if 0 /* notyet */
   6065 	if (sc->sc_type >= WM_T_82544)
   6066 		CSR_WRITE(sc, WMREG_WUC, 0);
   6067 #endif
   6068 }
   6069 
   6070 static void
   6071 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6072 {
   6073 	struct mbuf *m;
   6074 	int i;
   6075 
   6076 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6077 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6078 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6079 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6080 		    m->m_data, m->m_len, m->m_flags);
   6081 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6082 	    i, i == 1 ? "" : "s");
   6083 }
   6084 
   6085 /*
   6086  * wm_82547_txfifo_stall:
   6087  *
   6088  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6089  *	reset the FIFO pointers, and restart packet transmission.
   6090  */
   6091 static void
   6092 wm_82547_txfifo_stall(void *arg)
   6093 {
   6094 	struct wm_softc *sc = arg;
   6095 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6096 
   6097 	mutex_enter(txq->txq_lock);
   6098 
   6099 	if (txq->txq_stopping)
   6100 		goto out;
   6101 
   6102 	if (txq->txq_fifo_stall) {
   6103 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6104 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6105 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6106 			/*
   6107 			 * Packets have drained.  Stop transmitter, reset
   6108 			 * FIFO pointers, restart transmitter, and kick
   6109 			 * the packet queue.
   6110 			 */
   6111 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6112 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6113 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6114 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6115 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6116 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6117 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6118 			CSR_WRITE_FLUSH(sc);
   6119 
   6120 			txq->txq_fifo_head = 0;
   6121 			txq->txq_fifo_stall = 0;
   6122 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6123 		} else {
   6124 			/*
   6125 			 * Still waiting for packets to drain; try again in
   6126 			 * another tick.
   6127 			 */
   6128 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6129 		}
   6130 	}
   6131 
   6132 out:
   6133 	mutex_exit(txq->txq_lock);
   6134 }
   6135 
   6136 /*
   6137  * wm_82547_txfifo_bugchk:
   6138  *
   6139  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6140  *	prevent enqueueing a packet that would wrap around the end
   6141  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6142  *
   6143  *	We do this by checking the amount of space before the end
   6144  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6145  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6146  *	the internal FIFO pointers to the beginning, and restart
   6147  *	transmission on the interface.
   6148  */
   6149 #define	WM_FIFO_HDR		0x10
   6150 #define	WM_82547_PAD_LEN	0x3e0
   6151 static int
   6152 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6153 {
   6154 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6155 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6156 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6157 
   6158 	/* Just return if already stalled. */
   6159 	if (txq->txq_fifo_stall)
   6160 		return 1;
   6161 
   6162 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6163 		/* Stall only occurs in half-duplex mode. */
   6164 		goto send_packet;
   6165 	}
   6166 
   6167 	if (len >= WM_82547_PAD_LEN + space) {
   6168 		txq->txq_fifo_stall = 1;
   6169 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6170 		return 1;
   6171 	}
   6172 
   6173  send_packet:
   6174 	txq->txq_fifo_head += len;
   6175 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6176 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6177 
   6178 	return 0;
   6179 }
   6180 
   6181 static int
   6182 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6183 {
   6184 	int error;
   6185 
   6186 	/*
   6187 	 * Allocate the control data structures, and create and load the
   6188 	 * DMA map for it.
   6189 	 *
   6190 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6191 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6192 	 * both sets within the same 4G segment.
   6193 	 */
   6194 	if (sc->sc_type < WM_T_82544)
   6195 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6196 	else
   6197 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6198 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6199 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6200 	else
   6201 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6202 
   6203 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6204 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6205 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6206 		aprint_error_dev(sc->sc_dev,
   6207 		    "unable to allocate TX control data, error = %d\n",
   6208 		    error);
   6209 		goto fail_0;
   6210 	}
   6211 
   6212 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6213 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6214 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6215 		aprint_error_dev(sc->sc_dev,
   6216 		    "unable to map TX control data, error = %d\n", error);
   6217 		goto fail_1;
   6218 	}
   6219 
   6220 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6221 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6222 		aprint_error_dev(sc->sc_dev,
   6223 		    "unable to create TX control data DMA map, error = %d\n",
   6224 		    error);
   6225 		goto fail_2;
   6226 	}
   6227 
   6228 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6229 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6230 		aprint_error_dev(sc->sc_dev,
   6231 		    "unable to load TX control data DMA map, error = %d\n",
   6232 		    error);
   6233 		goto fail_3;
   6234 	}
   6235 
   6236 	return 0;
   6237 
   6238  fail_3:
   6239 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6240  fail_2:
   6241 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6242 	    WM_TXDESCS_SIZE(txq));
   6243  fail_1:
   6244 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6245  fail_0:
   6246 	return error;
   6247 }
   6248 
   6249 static void
   6250 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6251 {
   6252 
   6253 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6254 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6255 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6256 	    WM_TXDESCS_SIZE(txq));
   6257 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6258 }
   6259 
   6260 static int
   6261 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6262 {
   6263 	int error;
   6264 	size_t rxq_descs_size;
   6265 
   6266 	/*
   6267 	 * Allocate the control data structures, and create and load the
   6268 	 * DMA map for it.
   6269 	 *
   6270 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6271 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6272 	 * both sets within the same 4G segment.
   6273 	 */
   6274 	rxq->rxq_ndesc = WM_NRXDESC;
   6275 	if (sc->sc_type == WM_T_82574)
   6276 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6277 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6278 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6279 	else
   6280 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6281 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6282 
   6283 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6284 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6285 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6286 		aprint_error_dev(sc->sc_dev,
   6287 		    "unable to allocate RX control data, error = %d\n",
   6288 		    error);
   6289 		goto fail_0;
   6290 	}
   6291 
   6292 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6293 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6294 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6295 		aprint_error_dev(sc->sc_dev,
   6296 		    "unable to map RX control data, error = %d\n", error);
   6297 		goto fail_1;
   6298 	}
   6299 
   6300 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6301 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6302 		aprint_error_dev(sc->sc_dev,
   6303 		    "unable to create RX control data DMA map, error = %d\n",
   6304 		    error);
   6305 		goto fail_2;
   6306 	}
   6307 
   6308 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6309 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6310 		aprint_error_dev(sc->sc_dev,
   6311 		    "unable to load RX control data DMA map, error = %d\n",
   6312 		    error);
   6313 		goto fail_3;
   6314 	}
   6315 
   6316 	return 0;
   6317 
   6318  fail_3:
   6319 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6320  fail_2:
   6321 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6322 	    rxq_descs_size);
   6323  fail_1:
   6324 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6325  fail_0:
   6326 	return error;
   6327 }
   6328 
   6329 static void
   6330 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6331 {
   6332 
   6333 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6334 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6335 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6336 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6337 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6338 }
   6339 
   6340 
   6341 static int
   6342 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6343 {
   6344 	int i, error;
   6345 
   6346 	/* Create the transmit buffer DMA maps. */
   6347 	WM_TXQUEUELEN(txq) =
   6348 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6349 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6350 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6351 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6352 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6353 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6354 			aprint_error_dev(sc->sc_dev,
   6355 			    "unable to create Tx DMA map %d, error = %d\n",
   6356 			    i, error);
   6357 			goto fail;
   6358 		}
   6359 	}
   6360 
   6361 	return 0;
   6362 
   6363  fail:
   6364 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6365 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6366 			bus_dmamap_destroy(sc->sc_dmat,
   6367 			    txq->txq_soft[i].txs_dmamap);
   6368 	}
   6369 	return error;
   6370 }
   6371 
   6372 static void
   6373 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6374 {
   6375 	int i;
   6376 
   6377 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6378 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6379 			bus_dmamap_destroy(sc->sc_dmat,
   6380 			    txq->txq_soft[i].txs_dmamap);
   6381 	}
   6382 }
   6383 
   6384 static int
   6385 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6386 {
   6387 	int i, error;
   6388 
   6389 	/* Create the receive buffer DMA maps. */
   6390 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6391 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6392 			    MCLBYTES, 0, 0,
   6393 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6394 			aprint_error_dev(sc->sc_dev,
   6395 			    "unable to create Rx DMA map %d error = %d\n",
   6396 			    i, error);
   6397 			goto fail;
   6398 		}
   6399 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6400 	}
   6401 
   6402 	return 0;
   6403 
   6404  fail:
   6405 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6406 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6407 			bus_dmamap_destroy(sc->sc_dmat,
   6408 			    rxq->rxq_soft[i].rxs_dmamap);
   6409 	}
   6410 	return error;
   6411 }
   6412 
   6413 static void
   6414 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6415 {
   6416 	int i;
   6417 
   6418 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6419 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6420 			bus_dmamap_destroy(sc->sc_dmat,
   6421 			    rxq->rxq_soft[i].rxs_dmamap);
   6422 	}
   6423 }
   6424 
   6425 /*
   6426  * wm_alloc_quques:
   6427  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6428  */
   6429 static int
   6430 wm_alloc_txrx_queues(struct wm_softc *sc)
   6431 {
   6432 	int i, error, tx_done, rx_done;
   6433 
   6434 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6435 	    KM_SLEEP);
   6436 	if (sc->sc_queue == NULL) {
   6437 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6438 		error = ENOMEM;
   6439 		goto fail_0;
   6440 	}
   6441 
   6442 	/*
   6443 	 * For transmission
   6444 	 */
   6445 	error = 0;
   6446 	tx_done = 0;
   6447 	for (i = 0; i < sc->sc_nqueues; i++) {
   6448 #ifdef WM_EVENT_COUNTERS
   6449 		int j;
   6450 		const char *xname;
   6451 #endif
   6452 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6453 		txq->txq_sc = sc;
   6454 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6455 
   6456 		error = wm_alloc_tx_descs(sc, txq);
   6457 		if (error)
   6458 			break;
   6459 		error = wm_alloc_tx_buffer(sc, txq);
   6460 		if (error) {
   6461 			wm_free_tx_descs(sc, txq);
   6462 			break;
   6463 		}
   6464 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6465 		if (txq->txq_interq == NULL) {
   6466 			wm_free_tx_descs(sc, txq);
   6467 			wm_free_tx_buffer(sc, txq);
   6468 			error = ENOMEM;
   6469 			break;
   6470 		}
   6471 
   6472 #ifdef WM_EVENT_COUNTERS
   6473 		xname = device_xname(sc->sc_dev);
   6474 
   6475 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6476 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6477 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6478 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6479 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6480 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6481 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6482 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6483 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6484 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6485 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6486 
   6487 		for (j = 0; j < WM_NTXSEGS; j++) {
   6488 			snprintf(txq->txq_txseg_evcnt_names[j],
   6489 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6490 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6491 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6492 		}
   6493 
   6494 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6495 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6496 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6497 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6498 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6499 #endif /* WM_EVENT_COUNTERS */
   6500 
   6501 		tx_done++;
   6502 	}
   6503 	if (error)
   6504 		goto fail_1;
   6505 
   6506 	/*
   6507 	 * For recieve
   6508 	 */
   6509 	error = 0;
   6510 	rx_done = 0;
   6511 	for (i = 0; i < sc->sc_nqueues; i++) {
   6512 #ifdef WM_EVENT_COUNTERS
   6513 		const char *xname;
   6514 #endif
   6515 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6516 		rxq->rxq_sc = sc;
   6517 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6518 
   6519 		error = wm_alloc_rx_descs(sc, rxq);
   6520 		if (error)
   6521 			break;
   6522 
   6523 		error = wm_alloc_rx_buffer(sc, rxq);
   6524 		if (error) {
   6525 			wm_free_rx_descs(sc, rxq);
   6526 			break;
   6527 		}
   6528 
   6529 #ifdef WM_EVENT_COUNTERS
   6530 		xname = device_xname(sc->sc_dev);
   6531 
   6532 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6533 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6534 
   6535 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6536 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6537 #endif /* WM_EVENT_COUNTERS */
   6538 
   6539 		rx_done++;
   6540 	}
   6541 	if (error)
   6542 		goto fail_2;
   6543 
   6544 	return 0;
   6545 
   6546  fail_2:
   6547 	for (i = 0; i < rx_done; i++) {
   6548 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6549 		wm_free_rx_buffer(sc, rxq);
   6550 		wm_free_rx_descs(sc, rxq);
   6551 		if (rxq->rxq_lock)
   6552 			mutex_obj_free(rxq->rxq_lock);
   6553 	}
   6554  fail_1:
   6555 	for (i = 0; i < tx_done; i++) {
   6556 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6557 		pcq_destroy(txq->txq_interq);
   6558 		wm_free_tx_buffer(sc, txq);
   6559 		wm_free_tx_descs(sc, txq);
   6560 		if (txq->txq_lock)
   6561 			mutex_obj_free(txq->txq_lock);
   6562 	}
   6563 
   6564 	kmem_free(sc->sc_queue,
   6565 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6566  fail_0:
   6567 	return error;
   6568 }
   6569 
   6570 /*
   6571  * wm_free_quques:
   6572  *	Free {tx,rx}descs and {tx,rx} buffers
   6573  */
   6574 static void
   6575 wm_free_txrx_queues(struct wm_softc *sc)
   6576 {
   6577 	int i;
   6578 
   6579 	for (i = 0; i < sc->sc_nqueues; i++) {
   6580 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6581 
   6582 #ifdef WM_EVENT_COUNTERS
   6583 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6584 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6585 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6586 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6587 #endif /* WM_EVENT_COUNTERS */
   6588 
   6589 		wm_free_rx_buffer(sc, rxq);
   6590 		wm_free_rx_descs(sc, rxq);
   6591 		if (rxq->rxq_lock)
   6592 			mutex_obj_free(rxq->rxq_lock);
   6593 	}
   6594 
   6595 	for (i = 0; i < sc->sc_nqueues; i++) {
   6596 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6597 		struct mbuf *m;
   6598 #ifdef WM_EVENT_COUNTERS
   6599 		int j;
   6600 
   6601 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6602 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6603 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6604 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6605 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6606 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6607 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6608 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6609 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6610 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6611 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6612 
   6613 		for (j = 0; j < WM_NTXSEGS; j++)
   6614 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6615 
   6616 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6617 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6618 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6619 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6620 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6621 #endif /* WM_EVENT_COUNTERS */
   6622 
   6623 		/* drain txq_interq */
   6624 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6625 			m_freem(m);
   6626 		pcq_destroy(txq->txq_interq);
   6627 
   6628 		wm_free_tx_buffer(sc, txq);
   6629 		wm_free_tx_descs(sc, txq);
   6630 		if (txq->txq_lock)
   6631 			mutex_obj_free(txq->txq_lock);
   6632 	}
   6633 
   6634 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6635 }
   6636 
   6637 static void
   6638 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6639 {
   6640 
   6641 	KASSERT(mutex_owned(txq->txq_lock));
   6642 
   6643 	/* Initialize the transmit descriptor ring. */
   6644 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6645 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6646 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6647 	txq->txq_free = WM_NTXDESC(txq);
   6648 	txq->txq_next = 0;
   6649 }
   6650 
   6651 static void
   6652 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6653     struct wm_txqueue *txq)
   6654 {
   6655 
   6656 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6657 		device_xname(sc->sc_dev), __func__));
   6658 	KASSERT(mutex_owned(txq->txq_lock));
   6659 
   6660 	if (sc->sc_type < WM_T_82543) {
   6661 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6662 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6663 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6664 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6665 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6666 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6667 	} else {
   6668 		int qid = wmq->wmq_id;
   6669 
   6670 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6671 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6672 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6673 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6674 
   6675 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6676 			/*
   6677 			 * Don't write TDT before TCTL.EN is set.
   6678 			 * See the document.
   6679 			 */
   6680 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6681 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6682 			    | TXDCTL_WTHRESH(0));
   6683 		else {
   6684 			/* XXX should update with AIM? */
   6685 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6686 			if (sc->sc_type >= WM_T_82540) {
   6687 				/* should be same */
   6688 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6689 			}
   6690 
   6691 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6692 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6693 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6694 		}
   6695 	}
   6696 }
   6697 
   6698 static void
   6699 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6700 {
   6701 	int i;
   6702 
   6703 	KASSERT(mutex_owned(txq->txq_lock));
   6704 
   6705 	/* Initialize the transmit job descriptors. */
   6706 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6707 		txq->txq_soft[i].txs_mbuf = NULL;
   6708 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6709 	txq->txq_snext = 0;
   6710 	txq->txq_sdirty = 0;
   6711 }
   6712 
   6713 static void
   6714 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6715     struct wm_txqueue *txq)
   6716 {
   6717 
   6718 	KASSERT(mutex_owned(txq->txq_lock));
   6719 
   6720 	/*
   6721 	 * Set up some register offsets that are different between
   6722 	 * the i82542 and the i82543 and later chips.
   6723 	 */
   6724 	if (sc->sc_type < WM_T_82543)
   6725 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6726 	else
   6727 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6728 
   6729 	wm_init_tx_descs(sc, txq);
   6730 	wm_init_tx_regs(sc, wmq, txq);
   6731 	wm_init_tx_buffer(sc, txq);
   6732 
   6733 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   6734 	txq->txq_sending = false;
   6735 }
   6736 
   6737 static void
   6738 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6739     struct wm_rxqueue *rxq)
   6740 {
   6741 
   6742 	KASSERT(mutex_owned(rxq->rxq_lock));
   6743 
   6744 	/*
   6745 	 * Initialize the receive descriptor and receive job
   6746 	 * descriptor rings.
   6747 	 */
   6748 	if (sc->sc_type < WM_T_82543) {
   6749 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6750 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6751 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6752 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6753 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6754 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6755 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6756 
   6757 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6758 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6759 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6760 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6761 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6762 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6763 	} else {
   6764 		int qid = wmq->wmq_id;
   6765 
   6766 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6767 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6768 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   6769 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6770 
   6771 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6772 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6773 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6774 
   6775 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6776 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6777 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6778 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6779 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6780 			    | RXDCTL_WTHRESH(1));
   6781 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6782 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6783 		} else {
   6784 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6785 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6786 			/* XXX should update with AIM? */
   6787 			CSR_WRITE(sc, WMREG_RDTR,
   6788 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   6789 			/* MUST be same */
   6790 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6791 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6792 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6793 		}
   6794 	}
   6795 }
   6796 
   6797 static int
   6798 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6799 {
   6800 	struct wm_rxsoft *rxs;
   6801 	int error, i;
   6802 
   6803 	KASSERT(mutex_owned(rxq->rxq_lock));
   6804 
   6805 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6806 		rxs = &rxq->rxq_soft[i];
   6807 		if (rxs->rxs_mbuf == NULL) {
   6808 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6809 				log(LOG_ERR, "%s: unable to allocate or map "
   6810 				    "rx buffer %d, error = %d\n",
   6811 				    device_xname(sc->sc_dev), i, error);
   6812 				/*
   6813 				 * XXX Should attempt to run with fewer receive
   6814 				 * XXX buffers instead of just failing.
   6815 				 */
   6816 				wm_rxdrain(rxq);
   6817 				return ENOMEM;
   6818 			}
   6819 		} else {
   6820 			/*
   6821 			 * For 82575 and 82576, the RX descriptors must be
   6822 			 * initialized after the setting of RCTL.EN in
   6823 			 * wm_set_filter()
   6824 			 */
   6825 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6826 				wm_init_rxdesc(rxq, i);
   6827 		}
   6828 	}
   6829 	rxq->rxq_ptr = 0;
   6830 	rxq->rxq_discard = 0;
   6831 	WM_RXCHAIN_RESET(rxq);
   6832 
   6833 	return 0;
   6834 }
   6835 
   6836 static int
   6837 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6838     struct wm_rxqueue *rxq)
   6839 {
   6840 
   6841 	KASSERT(mutex_owned(rxq->rxq_lock));
   6842 
   6843 	/*
   6844 	 * Set up some register offsets that are different between
   6845 	 * the i82542 and the i82543 and later chips.
   6846 	 */
   6847 	if (sc->sc_type < WM_T_82543)
   6848 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6849 	else
   6850 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6851 
   6852 	wm_init_rx_regs(sc, wmq, rxq);
   6853 	return wm_init_rx_buffer(sc, rxq);
   6854 }
   6855 
   6856 /*
   6857  * wm_init_quques:
   6858  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6859  */
   6860 static int
   6861 wm_init_txrx_queues(struct wm_softc *sc)
   6862 {
   6863 	int i, error = 0;
   6864 
   6865 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6866 		device_xname(sc->sc_dev), __func__));
   6867 
   6868 	for (i = 0; i < sc->sc_nqueues; i++) {
   6869 		struct wm_queue *wmq = &sc->sc_queue[i];
   6870 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6871 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6872 
   6873 		/*
   6874 		 * TODO
   6875 		 * Currently, use constant variable instead of AIM.
   6876 		 * Furthermore, the interrupt interval of multiqueue which use
   6877 		 * polling mode is less than default value.
   6878 		 * More tuning and AIM are required.
   6879 		 */
   6880 		if (wm_is_using_multiqueue(sc))
   6881 			wmq->wmq_itr = 50;
   6882 		else
   6883 			wmq->wmq_itr = sc->sc_itr_init;
   6884 		wmq->wmq_set_itr = true;
   6885 
   6886 		mutex_enter(txq->txq_lock);
   6887 		wm_init_tx_queue(sc, wmq, txq);
   6888 		mutex_exit(txq->txq_lock);
   6889 
   6890 		mutex_enter(rxq->rxq_lock);
   6891 		error = wm_init_rx_queue(sc, wmq, rxq);
   6892 		mutex_exit(rxq->rxq_lock);
   6893 		if (error)
   6894 			break;
   6895 	}
   6896 
   6897 	return error;
   6898 }
   6899 
   6900 /*
   6901  * wm_tx_offload:
   6902  *
   6903  *	Set up TCP/IP checksumming parameters for the
   6904  *	specified packet.
   6905  */
   6906 static int
   6907 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6908     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6909 {
   6910 	struct mbuf *m0 = txs->txs_mbuf;
   6911 	struct livengood_tcpip_ctxdesc *t;
   6912 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6913 	uint32_t ipcse;
   6914 	struct ether_header *eh;
   6915 	int offset, iphl;
   6916 	uint8_t fields;
   6917 
   6918 	/*
   6919 	 * XXX It would be nice if the mbuf pkthdr had offset
   6920 	 * fields for the protocol headers.
   6921 	 */
   6922 
   6923 	eh = mtod(m0, struct ether_header *);
   6924 	switch (htons(eh->ether_type)) {
   6925 	case ETHERTYPE_IP:
   6926 	case ETHERTYPE_IPV6:
   6927 		offset = ETHER_HDR_LEN;
   6928 		break;
   6929 
   6930 	case ETHERTYPE_VLAN:
   6931 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6932 		break;
   6933 
   6934 	default:
   6935 		/*
   6936 		 * Don't support this protocol or encapsulation.
   6937 		 */
   6938 		*fieldsp = 0;
   6939 		*cmdp = 0;
   6940 		return 0;
   6941 	}
   6942 
   6943 	if ((m0->m_pkthdr.csum_flags &
   6944 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6945 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6946 	} else
   6947 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   6948 
   6949 	ipcse = offset + iphl - 1;
   6950 
   6951 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6952 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6953 	seg = 0;
   6954 	fields = 0;
   6955 
   6956 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6957 		int hlen = offset + iphl;
   6958 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6959 
   6960 		if (__predict_false(m0->m_len <
   6961 				    (hlen + sizeof(struct tcphdr)))) {
   6962 			/*
   6963 			 * TCP/IP headers are not in the first mbuf; we need
   6964 			 * to do this the slow and painful way. Let's just
   6965 			 * hope this doesn't happen very often.
   6966 			 */
   6967 			struct tcphdr th;
   6968 
   6969 			WM_Q_EVCNT_INCR(txq, tsopain);
   6970 
   6971 			m_copydata(m0, hlen, sizeof(th), &th);
   6972 			if (v4) {
   6973 				struct ip ip;
   6974 
   6975 				m_copydata(m0, offset, sizeof(ip), &ip);
   6976 				ip.ip_len = 0;
   6977 				m_copyback(m0,
   6978 				    offset + offsetof(struct ip, ip_len),
   6979 				    sizeof(ip.ip_len), &ip.ip_len);
   6980 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6981 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6982 			} else {
   6983 				struct ip6_hdr ip6;
   6984 
   6985 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6986 				ip6.ip6_plen = 0;
   6987 				m_copyback(m0,
   6988 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6989 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6990 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6991 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6992 			}
   6993 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6994 			    sizeof(th.th_sum), &th.th_sum);
   6995 
   6996 			hlen += th.th_off << 2;
   6997 		} else {
   6998 			/*
   6999 			 * TCP/IP headers are in the first mbuf; we can do
   7000 			 * this the easy way.
   7001 			 */
   7002 			struct tcphdr *th;
   7003 
   7004 			if (v4) {
   7005 				struct ip *ip =
   7006 				    (void *)(mtod(m0, char *) + offset);
   7007 				th = (void *)(mtod(m0, char *) + hlen);
   7008 
   7009 				ip->ip_len = 0;
   7010 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7011 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7012 			} else {
   7013 				struct ip6_hdr *ip6 =
   7014 				    (void *)(mtod(m0, char *) + offset);
   7015 				th = (void *)(mtod(m0, char *) + hlen);
   7016 
   7017 				ip6->ip6_plen = 0;
   7018 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7019 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7020 			}
   7021 			hlen += th->th_off << 2;
   7022 		}
   7023 
   7024 		if (v4) {
   7025 			WM_Q_EVCNT_INCR(txq, tso);
   7026 			cmdlen |= WTX_TCPIP_CMD_IP;
   7027 		} else {
   7028 			WM_Q_EVCNT_INCR(txq, tso6);
   7029 			ipcse = 0;
   7030 		}
   7031 		cmd |= WTX_TCPIP_CMD_TSE;
   7032 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7033 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7034 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7035 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7036 	}
   7037 
   7038 	/*
   7039 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7040 	 * offload feature, if we load the context descriptor, we
   7041 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7042 	 */
   7043 
   7044 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7045 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7046 	    WTX_TCPIP_IPCSE(ipcse);
   7047 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7048 		WM_Q_EVCNT_INCR(txq, ipsum);
   7049 		fields |= WTX_IXSM;
   7050 	}
   7051 
   7052 	offset += iphl;
   7053 
   7054 	if (m0->m_pkthdr.csum_flags &
   7055 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7056 		WM_Q_EVCNT_INCR(txq, tusum);
   7057 		fields |= WTX_TXSM;
   7058 		tucs = WTX_TCPIP_TUCSS(offset) |
   7059 		    WTX_TCPIP_TUCSO(offset +
   7060 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7061 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7062 	} else if ((m0->m_pkthdr.csum_flags &
   7063 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7064 		WM_Q_EVCNT_INCR(txq, tusum6);
   7065 		fields |= WTX_TXSM;
   7066 		tucs = WTX_TCPIP_TUCSS(offset) |
   7067 		    WTX_TCPIP_TUCSO(offset +
   7068 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7069 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7070 	} else {
   7071 		/* Just initialize it to a valid TCP context. */
   7072 		tucs = WTX_TCPIP_TUCSS(offset) |
   7073 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7074 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7075 	}
   7076 
   7077 	/*
   7078 	 * We don't have to write context descriptor for every packet
   7079 	 * except for 82574. For 82574, we must write context descriptor
   7080 	 * for every packet when we use two descriptor queues.
   7081 	 * It would be overhead to write context descriptor for every packet,
   7082 	 * however it does not cause problems.
   7083 	 */
   7084 	/* Fill in the context descriptor. */
   7085 	t = (struct livengood_tcpip_ctxdesc *)
   7086 	    &txq->txq_descs[txq->txq_next];
   7087 	t->tcpip_ipcs = htole32(ipcs);
   7088 	t->tcpip_tucs = htole32(tucs);
   7089 	t->tcpip_cmdlen = htole32(cmdlen);
   7090 	t->tcpip_seg = htole32(seg);
   7091 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7092 
   7093 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7094 	txs->txs_ndesc++;
   7095 
   7096 	*cmdp = cmd;
   7097 	*fieldsp = fields;
   7098 
   7099 	return 0;
   7100 }
   7101 
   7102 static inline int
   7103 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7104 {
   7105 	struct wm_softc *sc = ifp->if_softc;
   7106 	u_int cpuid = cpu_index(curcpu());
   7107 
   7108 	/*
   7109 	 * Currently, simple distribute strategy.
   7110 	 * TODO:
   7111 	 * distribute by flowid(RSS has value).
   7112 	 */
   7113 	return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   7114 }
   7115 
   7116 /*
   7117  * wm_start:		[ifnet interface function]
   7118  *
   7119  *	Start packet transmission on the interface.
   7120  */
   7121 static void
   7122 wm_start(struct ifnet *ifp)
   7123 {
   7124 	struct wm_softc *sc = ifp->if_softc;
   7125 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7126 
   7127 #ifdef WM_MPSAFE
   7128 	KASSERT(if_is_mpsafe(ifp));
   7129 #endif
   7130 	/*
   7131 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7132 	 */
   7133 
   7134 	mutex_enter(txq->txq_lock);
   7135 	if (!txq->txq_stopping)
   7136 		wm_start_locked(ifp);
   7137 	mutex_exit(txq->txq_lock);
   7138 }
   7139 
   7140 static void
   7141 wm_start_locked(struct ifnet *ifp)
   7142 {
   7143 	struct wm_softc *sc = ifp->if_softc;
   7144 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7145 
   7146 	wm_send_common_locked(ifp, txq, false);
   7147 }
   7148 
   7149 static int
   7150 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7151 {
   7152 	int qid;
   7153 	struct wm_softc *sc = ifp->if_softc;
   7154 	struct wm_txqueue *txq;
   7155 
   7156 	qid = wm_select_txqueue(ifp, m);
   7157 	txq = &sc->sc_queue[qid].wmq_txq;
   7158 
   7159 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7160 		m_freem(m);
   7161 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7162 		return ENOBUFS;
   7163 	}
   7164 
   7165 	/*
   7166 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7167 	 */
   7168 	ifp->if_obytes += m->m_pkthdr.len;
   7169 	if (m->m_flags & M_MCAST)
   7170 		ifp->if_omcasts++;
   7171 
   7172 	if (mutex_tryenter(txq->txq_lock)) {
   7173 		if (!txq->txq_stopping)
   7174 			wm_transmit_locked(ifp, txq);
   7175 		mutex_exit(txq->txq_lock);
   7176 	}
   7177 
   7178 	return 0;
   7179 }
   7180 
   7181 static void
   7182 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7183 {
   7184 
   7185 	wm_send_common_locked(ifp, txq, true);
   7186 }
   7187 
   7188 static void
   7189 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7190     bool is_transmit)
   7191 {
   7192 	struct wm_softc *sc = ifp->if_softc;
   7193 	struct mbuf *m0;
   7194 	struct wm_txsoft *txs;
   7195 	bus_dmamap_t dmamap;
   7196 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7197 	bus_addr_t curaddr;
   7198 	bus_size_t seglen, curlen;
   7199 	uint32_t cksumcmd;
   7200 	uint8_t cksumfields;
   7201 	bool remap = true;
   7202 
   7203 	KASSERT(mutex_owned(txq->txq_lock));
   7204 
   7205 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7206 		return;
   7207 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7208 		return;
   7209 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7210 		return;
   7211 
   7212 	/* Remember the previous number of free descriptors. */
   7213 	ofree = txq->txq_free;
   7214 
   7215 	/*
   7216 	 * Loop through the send queue, setting up transmit descriptors
   7217 	 * until we drain the queue, or use up all available transmit
   7218 	 * descriptors.
   7219 	 */
   7220 	for (;;) {
   7221 		m0 = NULL;
   7222 
   7223 		/* Get a work queue entry. */
   7224 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7225 			wm_txeof(txq, UINT_MAX);
   7226 			if (txq->txq_sfree == 0) {
   7227 				DPRINTF(WM_DEBUG_TX,
   7228 				    ("%s: TX: no free job descriptors\n",
   7229 					device_xname(sc->sc_dev)));
   7230 				WM_Q_EVCNT_INCR(txq, txsstall);
   7231 				break;
   7232 			}
   7233 		}
   7234 
   7235 		/* Grab a packet off the queue. */
   7236 		if (is_transmit)
   7237 			m0 = pcq_get(txq->txq_interq);
   7238 		else
   7239 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7240 		if (m0 == NULL)
   7241 			break;
   7242 
   7243 		DPRINTF(WM_DEBUG_TX,
   7244 		    ("%s: TX: have packet to transmit: %p\n",
   7245 			device_xname(sc->sc_dev), m0));
   7246 
   7247 		txs = &txq->txq_soft[txq->txq_snext];
   7248 		dmamap = txs->txs_dmamap;
   7249 
   7250 		use_tso = (m0->m_pkthdr.csum_flags &
   7251 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7252 
   7253 		/*
   7254 		 * So says the Linux driver:
   7255 		 * The controller does a simple calculation to make sure
   7256 		 * there is enough room in the FIFO before initiating the
   7257 		 * DMA for each buffer. The calc is:
   7258 		 *	4 = ceil(buffer len / MSS)
   7259 		 * To make sure we don't overrun the FIFO, adjust the max
   7260 		 * buffer len if the MSS drops.
   7261 		 */
   7262 		dmamap->dm_maxsegsz =
   7263 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7264 		    ? m0->m_pkthdr.segsz << 2
   7265 		    : WTX_MAX_LEN;
   7266 
   7267 		/*
   7268 		 * Load the DMA map.  If this fails, the packet either
   7269 		 * didn't fit in the allotted number of segments, or we
   7270 		 * were short on resources.  For the too-many-segments
   7271 		 * case, we simply report an error and drop the packet,
   7272 		 * since we can't sanely copy a jumbo packet to a single
   7273 		 * buffer.
   7274 		 */
   7275 retry:
   7276 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7277 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7278 		if (__predict_false(error)) {
   7279 			if (error == EFBIG) {
   7280 				if (remap == true) {
   7281 					struct mbuf *m;
   7282 
   7283 					remap = false;
   7284 					m = m_defrag(m0, M_NOWAIT);
   7285 					if (m != NULL) {
   7286 						WM_Q_EVCNT_INCR(txq, defrag);
   7287 						m0 = m;
   7288 						goto retry;
   7289 					}
   7290 				}
   7291 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7292 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7293 				    "DMA segments, dropping...\n",
   7294 				    device_xname(sc->sc_dev));
   7295 				wm_dump_mbuf_chain(sc, m0);
   7296 				m_freem(m0);
   7297 				continue;
   7298 			}
   7299 			/*  Short on resources, just stop for now. */
   7300 			DPRINTF(WM_DEBUG_TX,
   7301 			    ("%s: TX: dmamap load failed: %d\n",
   7302 				device_xname(sc->sc_dev), error));
   7303 			break;
   7304 		}
   7305 
   7306 		segs_needed = dmamap->dm_nsegs;
   7307 		if (use_tso) {
   7308 			/* For sentinel descriptor; see below. */
   7309 			segs_needed++;
   7310 		}
   7311 
   7312 		/*
   7313 		 * Ensure we have enough descriptors free to describe
   7314 		 * the packet. Note, we always reserve one descriptor
   7315 		 * at the end of the ring due to the semantics of the
   7316 		 * TDT register, plus one more in the event we need
   7317 		 * to load offload context.
   7318 		 */
   7319 		if (segs_needed > txq->txq_free - 2) {
   7320 			/*
   7321 			 * Not enough free descriptors to transmit this
   7322 			 * packet.  We haven't committed anything yet,
   7323 			 * so just unload the DMA map, put the packet
   7324 			 * pack on the queue, and punt. Notify the upper
   7325 			 * layer that there are no more slots left.
   7326 			 */
   7327 			DPRINTF(WM_DEBUG_TX,
   7328 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7329 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7330 				segs_needed, txq->txq_free - 1));
   7331 			if (!is_transmit)
   7332 				ifp->if_flags |= IFF_OACTIVE;
   7333 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7334 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7335 			WM_Q_EVCNT_INCR(txq, txdstall);
   7336 			break;
   7337 		}
   7338 
   7339 		/*
   7340 		 * Check for 82547 Tx FIFO bug. We need to do this
   7341 		 * once we know we can transmit the packet, since we
   7342 		 * do some internal FIFO space accounting here.
   7343 		 */
   7344 		if (sc->sc_type == WM_T_82547 &&
   7345 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7346 			DPRINTF(WM_DEBUG_TX,
   7347 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7348 				device_xname(sc->sc_dev)));
   7349 			if (!is_transmit)
   7350 				ifp->if_flags |= IFF_OACTIVE;
   7351 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7352 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7353 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7354 			break;
   7355 		}
   7356 
   7357 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7358 
   7359 		DPRINTF(WM_DEBUG_TX,
   7360 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7361 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7362 
   7363 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7364 
   7365 		/*
   7366 		 * Store a pointer to the packet so that we can free it
   7367 		 * later.
   7368 		 *
   7369 		 * Initially, we consider the number of descriptors the
   7370 		 * packet uses the number of DMA segments.  This may be
   7371 		 * incremented by 1 if we do checksum offload (a descriptor
   7372 		 * is used to set the checksum context).
   7373 		 */
   7374 		txs->txs_mbuf = m0;
   7375 		txs->txs_firstdesc = txq->txq_next;
   7376 		txs->txs_ndesc = segs_needed;
   7377 
   7378 		/* Set up offload parameters for this packet. */
   7379 		if (m0->m_pkthdr.csum_flags &
   7380 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7381 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7382 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7383 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7384 					  &cksumfields) != 0) {
   7385 				/* Error message already displayed. */
   7386 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7387 				continue;
   7388 			}
   7389 		} else {
   7390 			cksumcmd = 0;
   7391 			cksumfields = 0;
   7392 		}
   7393 
   7394 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7395 
   7396 		/* Sync the DMA map. */
   7397 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7398 		    BUS_DMASYNC_PREWRITE);
   7399 
   7400 		/* Initialize the transmit descriptor. */
   7401 		for (nexttx = txq->txq_next, seg = 0;
   7402 		     seg < dmamap->dm_nsegs; seg++) {
   7403 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7404 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7405 			     seglen != 0;
   7406 			     curaddr += curlen, seglen -= curlen,
   7407 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7408 				curlen = seglen;
   7409 
   7410 				/*
   7411 				 * So says the Linux driver:
   7412 				 * Work around for premature descriptor
   7413 				 * write-backs in TSO mode.  Append a
   7414 				 * 4-byte sentinel descriptor.
   7415 				 */
   7416 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7417 				    curlen > 8)
   7418 					curlen -= 4;
   7419 
   7420 				wm_set_dma_addr(
   7421 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7422 				txq->txq_descs[nexttx].wtx_cmdlen
   7423 				    = htole32(cksumcmd | curlen);
   7424 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7425 				    = 0;
   7426 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7427 				    = cksumfields;
   7428 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7429 				lasttx = nexttx;
   7430 
   7431 				DPRINTF(WM_DEBUG_TX,
   7432 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7433 					"len %#04zx\n",
   7434 					device_xname(sc->sc_dev), nexttx,
   7435 					(uint64_t)curaddr, curlen));
   7436 			}
   7437 		}
   7438 
   7439 		KASSERT(lasttx != -1);
   7440 
   7441 		/*
   7442 		 * Set up the command byte on the last descriptor of
   7443 		 * the packet. If we're in the interrupt delay window,
   7444 		 * delay the interrupt.
   7445 		 */
   7446 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7447 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7448 
   7449 		/*
   7450 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7451 		 * up the descriptor to encapsulate the packet for us.
   7452 		 *
   7453 		 * This is only valid on the last descriptor of the packet.
   7454 		 */
   7455 		if (vlan_has_tag(m0)) {
   7456 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7457 			    htole32(WTX_CMD_VLE);
   7458 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7459 			    = htole16(vlan_get_tag(m0));
   7460 		}
   7461 
   7462 		txs->txs_lastdesc = lasttx;
   7463 
   7464 		DPRINTF(WM_DEBUG_TX,
   7465 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7466 			device_xname(sc->sc_dev),
   7467 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7468 
   7469 		/* Sync the descriptors we're using. */
   7470 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7471 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7472 
   7473 		/* Give the packet to the chip. */
   7474 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7475 
   7476 		DPRINTF(WM_DEBUG_TX,
   7477 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7478 
   7479 		DPRINTF(WM_DEBUG_TX,
   7480 		    ("%s: TX: finished transmitting packet, job %d\n",
   7481 			device_xname(sc->sc_dev), txq->txq_snext));
   7482 
   7483 		/* Advance the tx pointer. */
   7484 		txq->txq_free -= txs->txs_ndesc;
   7485 		txq->txq_next = nexttx;
   7486 
   7487 		txq->txq_sfree--;
   7488 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7489 
   7490 		/* Pass the packet to any BPF listeners. */
   7491 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7492 	}
   7493 
   7494 	if (m0 != NULL) {
   7495 		if (!is_transmit)
   7496 			ifp->if_flags |= IFF_OACTIVE;
   7497 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7498 		WM_Q_EVCNT_INCR(txq, descdrop);
   7499 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7500 			__func__));
   7501 		m_freem(m0);
   7502 	}
   7503 
   7504 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7505 		/* No more slots; notify upper layer. */
   7506 		if (!is_transmit)
   7507 			ifp->if_flags |= IFF_OACTIVE;
   7508 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7509 	}
   7510 
   7511 	if (txq->txq_free != ofree) {
   7512 		/* Set a watchdog timer in case the chip flakes out. */
   7513 		txq->txq_lastsent = time_uptime;
   7514 		txq->txq_sending = true;
   7515 	}
   7516 }
   7517 
   7518 /*
   7519  * wm_nq_tx_offload:
   7520  *
   7521  *	Set up TCP/IP checksumming parameters for the
   7522  *	specified packet, for NEWQUEUE devices
   7523  */
   7524 static int
   7525 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7526     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7527 {
   7528 	struct mbuf *m0 = txs->txs_mbuf;
   7529 	uint32_t vl_len, mssidx, cmdc;
   7530 	struct ether_header *eh;
   7531 	int offset, iphl;
   7532 
   7533 	/*
   7534 	 * XXX It would be nice if the mbuf pkthdr had offset
   7535 	 * fields for the protocol headers.
   7536 	 */
   7537 	*cmdlenp = 0;
   7538 	*fieldsp = 0;
   7539 
   7540 	eh = mtod(m0, struct ether_header *);
   7541 	switch (htons(eh->ether_type)) {
   7542 	case ETHERTYPE_IP:
   7543 	case ETHERTYPE_IPV6:
   7544 		offset = ETHER_HDR_LEN;
   7545 		break;
   7546 
   7547 	case ETHERTYPE_VLAN:
   7548 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7549 		break;
   7550 
   7551 	default:
   7552 		/* Don't support this protocol or encapsulation. */
   7553 		*do_csum = false;
   7554 		return 0;
   7555 	}
   7556 	*do_csum = true;
   7557 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7558 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7559 
   7560 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7561 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7562 
   7563 	if ((m0->m_pkthdr.csum_flags &
   7564 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7565 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7566 	} else {
   7567 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7568 	}
   7569 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7570 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7571 
   7572 	if (vlan_has_tag(m0)) {
   7573 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7574 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7575 		*cmdlenp |= NQTX_CMD_VLE;
   7576 	}
   7577 
   7578 	mssidx = 0;
   7579 
   7580 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7581 		int hlen = offset + iphl;
   7582 		int tcp_hlen;
   7583 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7584 
   7585 		if (__predict_false(m0->m_len <
   7586 				    (hlen + sizeof(struct tcphdr)))) {
   7587 			/*
   7588 			 * TCP/IP headers are not in the first mbuf; we need
   7589 			 * to do this the slow and painful way. Let's just
   7590 			 * hope this doesn't happen very often.
   7591 			 */
   7592 			struct tcphdr th;
   7593 
   7594 			WM_Q_EVCNT_INCR(txq, tsopain);
   7595 
   7596 			m_copydata(m0, hlen, sizeof(th), &th);
   7597 			if (v4) {
   7598 				struct ip ip;
   7599 
   7600 				m_copydata(m0, offset, sizeof(ip), &ip);
   7601 				ip.ip_len = 0;
   7602 				m_copyback(m0,
   7603 				    offset + offsetof(struct ip, ip_len),
   7604 				    sizeof(ip.ip_len), &ip.ip_len);
   7605 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7606 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7607 			} else {
   7608 				struct ip6_hdr ip6;
   7609 
   7610 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7611 				ip6.ip6_plen = 0;
   7612 				m_copyback(m0,
   7613 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7614 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7615 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7616 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7617 			}
   7618 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7619 			    sizeof(th.th_sum), &th.th_sum);
   7620 
   7621 			tcp_hlen = th.th_off << 2;
   7622 		} else {
   7623 			/*
   7624 			 * TCP/IP headers are in the first mbuf; we can do
   7625 			 * this the easy way.
   7626 			 */
   7627 			struct tcphdr *th;
   7628 
   7629 			if (v4) {
   7630 				struct ip *ip =
   7631 				    (void *)(mtod(m0, char *) + offset);
   7632 				th = (void *)(mtod(m0, char *) + hlen);
   7633 
   7634 				ip->ip_len = 0;
   7635 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7636 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7637 			} else {
   7638 				struct ip6_hdr *ip6 =
   7639 				    (void *)(mtod(m0, char *) + offset);
   7640 				th = (void *)(mtod(m0, char *) + hlen);
   7641 
   7642 				ip6->ip6_plen = 0;
   7643 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7644 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7645 			}
   7646 			tcp_hlen = th->th_off << 2;
   7647 		}
   7648 		hlen += tcp_hlen;
   7649 		*cmdlenp |= NQTX_CMD_TSE;
   7650 
   7651 		if (v4) {
   7652 			WM_Q_EVCNT_INCR(txq, tso);
   7653 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7654 		} else {
   7655 			WM_Q_EVCNT_INCR(txq, tso6);
   7656 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7657 		}
   7658 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7659 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7660 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7661 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7662 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7663 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7664 	} else {
   7665 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7666 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7667 	}
   7668 
   7669 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7670 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7671 		cmdc |= NQTXC_CMD_IP4;
   7672 	}
   7673 
   7674 	if (m0->m_pkthdr.csum_flags &
   7675 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7676 		WM_Q_EVCNT_INCR(txq, tusum);
   7677 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   7678 			cmdc |= NQTXC_CMD_TCP;
   7679 		else
   7680 			cmdc |= NQTXC_CMD_UDP;
   7681 
   7682 		cmdc |= NQTXC_CMD_IP4;
   7683 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7684 	}
   7685 	if (m0->m_pkthdr.csum_flags &
   7686 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7687 		WM_Q_EVCNT_INCR(txq, tusum6);
   7688 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   7689 			cmdc |= NQTXC_CMD_TCP;
   7690 		else
   7691 			cmdc |= NQTXC_CMD_UDP;
   7692 
   7693 		cmdc |= NQTXC_CMD_IP6;
   7694 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7695 	}
   7696 
   7697 	/*
   7698 	 * We don't have to write context descriptor for every packet to
   7699 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7700 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7701 	 * controllers.
   7702 	 * It would be overhead to write context descriptor for every packet,
   7703 	 * however it does not cause problems.
   7704 	 */
   7705 	/* Fill in the context descriptor. */
   7706 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7707 	    htole32(vl_len);
   7708 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7709 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7710 	    htole32(cmdc);
   7711 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7712 	    htole32(mssidx);
   7713 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7714 	DPRINTF(WM_DEBUG_TX,
   7715 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7716 		txq->txq_next, 0, vl_len));
   7717 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7718 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7719 	txs->txs_ndesc++;
   7720 	return 0;
   7721 }
   7722 
   7723 /*
   7724  * wm_nq_start:		[ifnet interface function]
   7725  *
   7726  *	Start packet transmission on the interface for NEWQUEUE devices
   7727  */
   7728 static void
   7729 wm_nq_start(struct ifnet *ifp)
   7730 {
   7731 	struct wm_softc *sc = ifp->if_softc;
   7732 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7733 
   7734 #ifdef WM_MPSAFE
   7735 	KASSERT(if_is_mpsafe(ifp));
   7736 #endif
   7737 	/*
   7738 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7739 	 */
   7740 
   7741 	mutex_enter(txq->txq_lock);
   7742 	if (!txq->txq_stopping)
   7743 		wm_nq_start_locked(ifp);
   7744 	mutex_exit(txq->txq_lock);
   7745 }
   7746 
   7747 static void
   7748 wm_nq_start_locked(struct ifnet *ifp)
   7749 {
   7750 	struct wm_softc *sc = ifp->if_softc;
   7751 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7752 
   7753 	wm_nq_send_common_locked(ifp, txq, false);
   7754 }
   7755 
   7756 static int
   7757 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7758 {
   7759 	int qid;
   7760 	struct wm_softc *sc = ifp->if_softc;
   7761 	struct wm_txqueue *txq;
   7762 
   7763 	qid = wm_select_txqueue(ifp, m);
   7764 	txq = &sc->sc_queue[qid].wmq_txq;
   7765 
   7766 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7767 		m_freem(m);
   7768 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7769 		return ENOBUFS;
   7770 	}
   7771 
   7772 	/*
   7773 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7774 	 */
   7775 	ifp->if_obytes += m->m_pkthdr.len;
   7776 	if (m->m_flags & M_MCAST)
   7777 		ifp->if_omcasts++;
   7778 
   7779 	/*
   7780 	 * The situations which this mutex_tryenter() fails at running time
   7781 	 * are below two patterns.
   7782 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7783 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7784 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7785 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7786 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   7787 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   7788 	 * stuck, either.
   7789 	 */
   7790 	if (mutex_tryenter(txq->txq_lock)) {
   7791 		if (!txq->txq_stopping)
   7792 			wm_nq_transmit_locked(ifp, txq);
   7793 		mutex_exit(txq->txq_lock);
   7794 	}
   7795 
   7796 	return 0;
   7797 }
   7798 
   7799 static void
   7800 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7801 {
   7802 
   7803 	wm_nq_send_common_locked(ifp, txq, true);
   7804 }
   7805 
   7806 static void
   7807 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7808     bool is_transmit)
   7809 {
   7810 	struct wm_softc *sc = ifp->if_softc;
   7811 	struct mbuf *m0;
   7812 	struct wm_txsoft *txs;
   7813 	bus_dmamap_t dmamap;
   7814 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7815 	bool do_csum, sent;
   7816 	bool remap = true;
   7817 
   7818 	KASSERT(mutex_owned(txq->txq_lock));
   7819 
   7820 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7821 		return;
   7822 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7823 		return;
   7824 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7825 		return;
   7826 
   7827 	sent = false;
   7828 
   7829 	/*
   7830 	 * Loop through the send queue, setting up transmit descriptors
   7831 	 * until we drain the queue, or use up all available transmit
   7832 	 * descriptors.
   7833 	 */
   7834 	for (;;) {
   7835 		m0 = NULL;
   7836 
   7837 		/* Get a work queue entry. */
   7838 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7839 			wm_txeof(txq, UINT_MAX);
   7840 			if (txq->txq_sfree == 0) {
   7841 				DPRINTF(WM_DEBUG_TX,
   7842 				    ("%s: TX: no free job descriptors\n",
   7843 					device_xname(sc->sc_dev)));
   7844 				WM_Q_EVCNT_INCR(txq, txsstall);
   7845 				break;
   7846 			}
   7847 		}
   7848 
   7849 		/* Grab a packet off the queue. */
   7850 		if (is_transmit)
   7851 			m0 = pcq_get(txq->txq_interq);
   7852 		else
   7853 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7854 		if (m0 == NULL)
   7855 			break;
   7856 
   7857 		DPRINTF(WM_DEBUG_TX,
   7858 		    ("%s: TX: have packet to transmit: %p\n",
   7859 		    device_xname(sc->sc_dev), m0));
   7860 
   7861 		txs = &txq->txq_soft[txq->txq_snext];
   7862 		dmamap = txs->txs_dmamap;
   7863 
   7864 		/*
   7865 		 * Load the DMA map.  If this fails, the packet either
   7866 		 * didn't fit in the allotted number of segments, or we
   7867 		 * were short on resources.  For the too-many-segments
   7868 		 * case, we simply report an error and drop the packet,
   7869 		 * since we can't sanely copy a jumbo packet to a single
   7870 		 * buffer.
   7871 		 */
   7872 retry:
   7873 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7874 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7875 		if (__predict_false(error)) {
   7876 			if (error == EFBIG) {
   7877 				if (remap == true) {
   7878 					struct mbuf *m;
   7879 
   7880 					remap = false;
   7881 					m = m_defrag(m0, M_NOWAIT);
   7882 					if (m != NULL) {
   7883 						WM_Q_EVCNT_INCR(txq, defrag);
   7884 						m0 = m;
   7885 						goto retry;
   7886 					}
   7887 				}
   7888 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7889 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7890 				    "DMA segments, dropping...\n",
   7891 				    device_xname(sc->sc_dev));
   7892 				wm_dump_mbuf_chain(sc, m0);
   7893 				m_freem(m0);
   7894 				continue;
   7895 			}
   7896 			/* Short on resources, just stop for now. */
   7897 			DPRINTF(WM_DEBUG_TX,
   7898 			    ("%s: TX: dmamap load failed: %d\n",
   7899 				device_xname(sc->sc_dev), error));
   7900 			break;
   7901 		}
   7902 
   7903 		segs_needed = dmamap->dm_nsegs;
   7904 
   7905 		/*
   7906 		 * Ensure we have enough descriptors free to describe
   7907 		 * the packet. Note, we always reserve one descriptor
   7908 		 * at the end of the ring due to the semantics of the
   7909 		 * TDT register, plus one more in the event we need
   7910 		 * to load offload context.
   7911 		 */
   7912 		if (segs_needed > txq->txq_free - 2) {
   7913 			/*
   7914 			 * Not enough free descriptors to transmit this
   7915 			 * packet.  We haven't committed anything yet,
   7916 			 * so just unload the DMA map, put the packet
   7917 			 * pack on the queue, and punt. Notify the upper
   7918 			 * layer that there are no more slots left.
   7919 			 */
   7920 			DPRINTF(WM_DEBUG_TX,
   7921 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7922 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7923 				segs_needed, txq->txq_free - 1));
   7924 			if (!is_transmit)
   7925 				ifp->if_flags |= IFF_OACTIVE;
   7926 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7927 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7928 			WM_Q_EVCNT_INCR(txq, txdstall);
   7929 			break;
   7930 		}
   7931 
   7932 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7933 
   7934 		DPRINTF(WM_DEBUG_TX,
   7935 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7936 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7937 
   7938 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7939 
   7940 		/*
   7941 		 * Store a pointer to the packet so that we can free it
   7942 		 * later.
   7943 		 *
   7944 		 * Initially, we consider the number of descriptors the
   7945 		 * packet uses the number of DMA segments.  This may be
   7946 		 * incremented by 1 if we do checksum offload (a descriptor
   7947 		 * is used to set the checksum context).
   7948 		 */
   7949 		txs->txs_mbuf = m0;
   7950 		txs->txs_firstdesc = txq->txq_next;
   7951 		txs->txs_ndesc = segs_needed;
   7952 
   7953 		/* Set up offload parameters for this packet. */
   7954 		uint32_t cmdlen, fields, dcmdlen;
   7955 		if (m0->m_pkthdr.csum_flags &
   7956 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7957 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7958 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7959 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7960 			    &do_csum) != 0) {
   7961 				/* Error message already displayed. */
   7962 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7963 				continue;
   7964 			}
   7965 		} else {
   7966 			do_csum = false;
   7967 			cmdlen = 0;
   7968 			fields = 0;
   7969 		}
   7970 
   7971 		/* Sync the DMA map. */
   7972 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7973 		    BUS_DMASYNC_PREWRITE);
   7974 
   7975 		/* Initialize the first transmit descriptor. */
   7976 		nexttx = txq->txq_next;
   7977 		if (!do_csum) {
   7978 			/* setup a legacy descriptor */
   7979 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7980 			    dmamap->dm_segs[0].ds_addr);
   7981 			txq->txq_descs[nexttx].wtx_cmdlen =
   7982 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7983 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7984 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7985 			if (vlan_has_tag(m0)) {
   7986 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7987 				    htole32(WTX_CMD_VLE);
   7988 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7989 				    htole16(vlan_get_tag(m0));
   7990 			} else
   7991 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7992 
   7993 			dcmdlen = 0;
   7994 		} else {
   7995 			/* setup an advanced data descriptor */
   7996 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7997 			    htole64(dmamap->dm_segs[0].ds_addr);
   7998 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7999 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8000 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   8001 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8002 			    htole32(fields);
   8003 			DPRINTF(WM_DEBUG_TX,
   8004 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8005 				device_xname(sc->sc_dev), nexttx,
   8006 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8007 			DPRINTF(WM_DEBUG_TX,
   8008 			    ("\t 0x%08x%08x\n", fields,
   8009 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8010 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8011 		}
   8012 
   8013 		lasttx = nexttx;
   8014 		nexttx = WM_NEXTTX(txq, nexttx);
   8015 		/*
   8016 		 * fill in the next descriptors. legacy or advanced format
   8017 		 * is the same here
   8018 		 */
   8019 		for (seg = 1; seg < dmamap->dm_nsegs;
   8020 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8021 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8022 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8023 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8024 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8025 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8026 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8027 			lasttx = nexttx;
   8028 
   8029 			DPRINTF(WM_DEBUG_TX,
   8030 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8031 				device_xname(sc->sc_dev), nexttx,
   8032 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8033 				dmamap->dm_segs[seg].ds_len));
   8034 		}
   8035 
   8036 		KASSERT(lasttx != -1);
   8037 
   8038 		/*
   8039 		 * Set up the command byte on the last descriptor of
   8040 		 * the packet. If we're in the interrupt delay window,
   8041 		 * delay the interrupt.
   8042 		 */
   8043 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8044 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8045 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8046 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8047 
   8048 		txs->txs_lastdesc = lasttx;
   8049 
   8050 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8051 		    device_xname(sc->sc_dev),
   8052 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8053 
   8054 		/* Sync the descriptors we're using. */
   8055 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8056 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8057 
   8058 		/* Give the packet to the chip. */
   8059 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8060 		sent = true;
   8061 
   8062 		DPRINTF(WM_DEBUG_TX,
   8063 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8064 
   8065 		DPRINTF(WM_DEBUG_TX,
   8066 		    ("%s: TX: finished transmitting packet, job %d\n",
   8067 			device_xname(sc->sc_dev), txq->txq_snext));
   8068 
   8069 		/* Advance the tx pointer. */
   8070 		txq->txq_free -= txs->txs_ndesc;
   8071 		txq->txq_next = nexttx;
   8072 
   8073 		txq->txq_sfree--;
   8074 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8075 
   8076 		/* Pass the packet to any BPF listeners. */
   8077 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8078 	}
   8079 
   8080 	if (m0 != NULL) {
   8081 		if (!is_transmit)
   8082 			ifp->if_flags |= IFF_OACTIVE;
   8083 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8084 		WM_Q_EVCNT_INCR(txq, descdrop);
   8085 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8086 			__func__));
   8087 		m_freem(m0);
   8088 	}
   8089 
   8090 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8091 		/* No more slots; notify upper layer. */
   8092 		if (!is_transmit)
   8093 			ifp->if_flags |= IFF_OACTIVE;
   8094 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8095 	}
   8096 
   8097 	if (sent) {
   8098 		/* Set a watchdog timer in case the chip flakes out. */
   8099 		txq->txq_lastsent = time_uptime;
   8100 		txq->txq_sending = true;
   8101 	}
   8102 }
   8103 
   8104 static void
   8105 wm_deferred_start_locked(struct wm_txqueue *txq)
   8106 {
   8107 	struct wm_softc *sc = txq->txq_sc;
   8108 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8109 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8110 	int qid = wmq->wmq_id;
   8111 
   8112 	KASSERT(mutex_owned(txq->txq_lock));
   8113 
   8114 	if (txq->txq_stopping) {
   8115 		mutex_exit(txq->txq_lock);
   8116 		return;
   8117 	}
   8118 
   8119 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8120 		/* XXX need for ALTQ or one CPU system */
   8121 		if (qid == 0)
   8122 			wm_nq_start_locked(ifp);
   8123 		wm_nq_transmit_locked(ifp, txq);
   8124 	} else {
   8125 		/* XXX need for ALTQ or one CPU system */
   8126 		if (qid == 0)
   8127 			wm_start_locked(ifp);
   8128 		wm_transmit_locked(ifp, txq);
   8129 	}
   8130 }
   8131 
   8132 /* Interrupt */
   8133 
   8134 /*
   8135  * wm_txeof:
   8136  *
   8137  *	Helper; handle transmit interrupts.
   8138  */
   8139 static bool
   8140 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8141 {
   8142 	struct wm_softc *sc = txq->txq_sc;
   8143 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8144 	struct wm_txsoft *txs;
   8145 	int count = 0;
   8146 	int i;
   8147 	uint8_t status;
   8148 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8149 	bool more = false;
   8150 
   8151 	KASSERT(mutex_owned(txq->txq_lock));
   8152 
   8153 	if (txq->txq_stopping)
   8154 		return false;
   8155 
   8156 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8157 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8158 	if (wmq->wmq_id == 0)
   8159 		ifp->if_flags &= ~IFF_OACTIVE;
   8160 
   8161 	/*
   8162 	 * Go through the Tx list and free mbufs for those
   8163 	 * frames which have been transmitted.
   8164 	 */
   8165 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8166 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8167 		if (limit-- == 0) {
   8168 			more = true;
   8169 			DPRINTF(WM_DEBUG_TX,
   8170 			    ("%s: TX: loop limited, job %d is not processed\n",
   8171 				device_xname(sc->sc_dev), i));
   8172 			break;
   8173 		}
   8174 
   8175 		txs = &txq->txq_soft[i];
   8176 
   8177 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8178 			device_xname(sc->sc_dev), i));
   8179 
   8180 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8181 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8182 
   8183 		status =
   8184 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8185 		if ((status & WTX_ST_DD) == 0) {
   8186 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8187 			    BUS_DMASYNC_PREREAD);
   8188 			break;
   8189 		}
   8190 
   8191 		count++;
   8192 		DPRINTF(WM_DEBUG_TX,
   8193 		    ("%s: TX: job %d done: descs %d..%d\n",
   8194 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8195 		    txs->txs_lastdesc));
   8196 
   8197 		/*
   8198 		 * XXX We should probably be using the statistics
   8199 		 * XXX registers, but I don't know if they exist
   8200 		 * XXX on chips before the i82544.
   8201 		 */
   8202 
   8203 #ifdef WM_EVENT_COUNTERS
   8204 		if (status & WTX_ST_TU)
   8205 			WM_Q_EVCNT_INCR(txq, underrun);
   8206 #endif /* WM_EVENT_COUNTERS */
   8207 
   8208 		/*
   8209 		 * 82574 and newer's document says the status field has neither
   8210 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8211 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8212 		 * Developer's Manual", 82574 datasheet and newer.
   8213 		 *
   8214 		 * XXX I saw the LC bit was set on I218 even though the media
   8215 		 * was full duplex, so the bit might be used for other
   8216 		 * meaning ...(I have no document).
   8217 		 */
   8218 
   8219 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8220 		    && ((sc->sc_type < WM_T_82574)
   8221 			|| (sc->sc_type == WM_T_80003))) {
   8222 			ifp->if_oerrors++;
   8223 			if (status & WTX_ST_LC)
   8224 				log(LOG_WARNING, "%s: late collision\n",
   8225 				    device_xname(sc->sc_dev));
   8226 			else if (status & WTX_ST_EC) {
   8227 				ifp->if_collisions +=
   8228 				    TX_COLLISION_THRESHOLD + 1;
   8229 				log(LOG_WARNING, "%s: excessive collisions\n",
   8230 				    device_xname(sc->sc_dev));
   8231 			}
   8232 		} else
   8233 			ifp->if_opackets++;
   8234 
   8235 		txq->txq_packets++;
   8236 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8237 
   8238 		txq->txq_free += txs->txs_ndesc;
   8239 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8240 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8241 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8242 		m_freem(txs->txs_mbuf);
   8243 		txs->txs_mbuf = NULL;
   8244 	}
   8245 
   8246 	/* Update the dirty transmit buffer pointer. */
   8247 	txq->txq_sdirty = i;
   8248 	DPRINTF(WM_DEBUG_TX,
   8249 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8250 
   8251 	if (count != 0)
   8252 		rnd_add_uint32(&sc->rnd_source, count);
   8253 
   8254 	/*
   8255 	 * If there are no more pending transmissions, cancel the watchdog
   8256 	 * timer.
   8257 	 */
   8258 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8259 		txq->txq_sending = false;
   8260 
   8261 	return more;
   8262 }
   8263 
   8264 static inline uint32_t
   8265 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8266 {
   8267 	struct wm_softc *sc = rxq->rxq_sc;
   8268 
   8269 	if (sc->sc_type == WM_T_82574)
   8270 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8271 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8272 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8273 	else
   8274 		return rxq->rxq_descs[idx].wrx_status;
   8275 }
   8276 
   8277 static inline uint32_t
   8278 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8279 {
   8280 	struct wm_softc *sc = rxq->rxq_sc;
   8281 
   8282 	if (sc->sc_type == WM_T_82574)
   8283 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8284 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8285 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8286 	else
   8287 		return rxq->rxq_descs[idx].wrx_errors;
   8288 }
   8289 
   8290 static inline uint16_t
   8291 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8292 {
   8293 	struct wm_softc *sc = rxq->rxq_sc;
   8294 
   8295 	if (sc->sc_type == WM_T_82574)
   8296 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8297 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8298 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8299 	else
   8300 		return rxq->rxq_descs[idx].wrx_special;
   8301 }
   8302 
   8303 static inline int
   8304 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8305 {
   8306 	struct wm_softc *sc = rxq->rxq_sc;
   8307 
   8308 	if (sc->sc_type == WM_T_82574)
   8309 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8310 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8311 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8312 	else
   8313 		return rxq->rxq_descs[idx].wrx_len;
   8314 }
   8315 
   8316 #ifdef WM_DEBUG
   8317 static inline uint32_t
   8318 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8319 {
   8320 	struct wm_softc *sc = rxq->rxq_sc;
   8321 
   8322 	if (sc->sc_type == WM_T_82574)
   8323 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8324 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8325 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8326 	else
   8327 		return 0;
   8328 }
   8329 
   8330 static inline uint8_t
   8331 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8332 {
   8333 	struct wm_softc *sc = rxq->rxq_sc;
   8334 
   8335 	if (sc->sc_type == WM_T_82574)
   8336 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8337 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8338 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8339 	else
   8340 		return 0;
   8341 }
   8342 #endif /* WM_DEBUG */
   8343 
   8344 static inline bool
   8345 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8346     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8347 {
   8348 
   8349 	if (sc->sc_type == WM_T_82574)
   8350 		return (status & ext_bit) != 0;
   8351 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8352 		return (status & nq_bit) != 0;
   8353 	else
   8354 		return (status & legacy_bit) != 0;
   8355 }
   8356 
   8357 static inline bool
   8358 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8359     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8360 {
   8361 
   8362 	if (sc->sc_type == WM_T_82574)
   8363 		return (error & ext_bit) != 0;
   8364 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8365 		return (error & nq_bit) != 0;
   8366 	else
   8367 		return (error & legacy_bit) != 0;
   8368 }
   8369 
   8370 static inline bool
   8371 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8372 {
   8373 
   8374 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8375 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8376 		return true;
   8377 	else
   8378 		return false;
   8379 }
   8380 
   8381 static inline bool
   8382 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8383 {
   8384 	struct wm_softc *sc = rxq->rxq_sc;
   8385 
   8386 	/* XXXX missing error bit for newqueue? */
   8387 	if (wm_rxdesc_is_set_error(sc, errors,
   8388 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8389 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8390 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8391 		NQRXC_ERROR_RXE)) {
   8392 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8393 		    EXTRXC_ERROR_SE, 0))
   8394 			log(LOG_WARNING, "%s: symbol error\n",
   8395 			    device_xname(sc->sc_dev));
   8396 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8397 		    EXTRXC_ERROR_SEQ, 0))
   8398 			log(LOG_WARNING, "%s: receive sequence error\n",
   8399 			    device_xname(sc->sc_dev));
   8400 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8401 		    EXTRXC_ERROR_CE, 0))
   8402 			log(LOG_WARNING, "%s: CRC error\n",
   8403 			    device_xname(sc->sc_dev));
   8404 		return true;
   8405 	}
   8406 
   8407 	return false;
   8408 }
   8409 
   8410 static inline bool
   8411 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8412 {
   8413 	struct wm_softc *sc = rxq->rxq_sc;
   8414 
   8415 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8416 		NQRXC_STATUS_DD)) {
   8417 		/* We have processed all of the receive descriptors. */
   8418 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8419 		return false;
   8420 	}
   8421 
   8422 	return true;
   8423 }
   8424 
   8425 static inline bool
   8426 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8427     uint16_t vlantag, struct mbuf *m)
   8428 {
   8429 
   8430 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8431 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8432 		vlan_set_tag(m, le16toh(vlantag));
   8433 	}
   8434 
   8435 	return true;
   8436 }
   8437 
   8438 static inline void
   8439 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8440     uint32_t errors, struct mbuf *m)
   8441 {
   8442 	struct wm_softc *sc = rxq->rxq_sc;
   8443 
   8444 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8445 		if (wm_rxdesc_is_set_status(sc, status,
   8446 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8447 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8448 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8449 			if (wm_rxdesc_is_set_error(sc, errors,
   8450 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8451 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8452 		}
   8453 		if (wm_rxdesc_is_set_status(sc, status,
   8454 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8455 			/*
   8456 			 * Note: we don't know if this was TCP or UDP,
   8457 			 * so we just set both bits, and expect the
   8458 			 * upper layers to deal.
   8459 			 */
   8460 			WM_Q_EVCNT_INCR(rxq, tusum);
   8461 			m->m_pkthdr.csum_flags |=
   8462 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8463 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8464 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8465 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8466 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8467 		}
   8468 	}
   8469 }
   8470 
   8471 /*
   8472  * wm_rxeof:
   8473  *
   8474  *	Helper; handle receive interrupts.
   8475  */
   8476 static bool
   8477 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8478 {
   8479 	struct wm_softc *sc = rxq->rxq_sc;
   8480 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8481 	struct wm_rxsoft *rxs;
   8482 	struct mbuf *m;
   8483 	int i, len;
   8484 	int count = 0;
   8485 	uint32_t status, errors;
   8486 	uint16_t vlantag;
   8487 	bool more = false;
   8488 
   8489 	KASSERT(mutex_owned(rxq->rxq_lock));
   8490 
   8491 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8492 		if (limit-- == 0) {
   8493 			rxq->rxq_ptr = i;
   8494 			more = true;
   8495 			DPRINTF(WM_DEBUG_RX,
   8496 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8497 				device_xname(sc->sc_dev), i));
   8498 			break;
   8499 		}
   8500 
   8501 		rxs = &rxq->rxq_soft[i];
   8502 
   8503 		DPRINTF(WM_DEBUG_RX,
   8504 		    ("%s: RX: checking descriptor %d\n",
   8505 			device_xname(sc->sc_dev), i));
   8506 		wm_cdrxsync(rxq, i,
   8507 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8508 
   8509 		status = wm_rxdesc_get_status(rxq, i);
   8510 		errors = wm_rxdesc_get_errors(rxq, i);
   8511 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8512 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8513 #ifdef WM_DEBUG
   8514 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8515 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8516 #endif
   8517 
   8518 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8519 			/*
   8520 			 * Update the receive pointer holding rxq_lock
   8521 			 * consistent with increment counter.
   8522 			 */
   8523 			rxq->rxq_ptr = i;
   8524 			break;
   8525 		}
   8526 
   8527 		count++;
   8528 		if (__predict_false(rxq->rxq_discard)) {
   8529 			DPRINTF(WM_DEBUG_RX,
   8530 			    ("%s: RX: discarding contents of descriptor %d\n",
   8531 				device_xname(sc->sc_dev), i));
   8532 			wm_init_rxdesc(rxq, i);
   8533 			if (wm_rxdesc_is_eop(rxq, status)) {
   8534 				/* Reset our state. */
   8535 				DPRINTF(WM_DEBUG_RX,
   8536 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8537 					device_xname(sc->sc_dev)));
   8538 				rxq->rxq_discard = 0;
   8539 			}
   8540 			continue;
   8541 		}
   8542 
   8543 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8544 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8545 
   8546 		m = rxs->rxs_mbuf;
   8547 
   8548 		/*
   8549 		 * Add a new receive buffer to the ring, unless of
   8550 		 * course the length is zero. Treat the latter as a
   8551 		 * failed mapping.
   8552 		 */
   8553 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8554 			/*
   8555 			 * Failed, throw away what we've done so
   8556 			 * far, and discard the rest of the packet.
   8557 			 */
   8558 			ifp->if_ierrors++;
   8559 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8560 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8561 			wm_init_rxdesc(rxq, i);
   8562 			if (!wm_rxdesc_is_eop(rxq, status))
   8563 				rxq->rxq_discard = 1;
   8564 			if (rxq->rxq_head != NULL)
   8565 				m_freem(rxq->rxq_head);
   8566 			WM_RXCHAIN_RESET(rxq);
   8567 			DPRINTF(WM_DEBUG_RX,
   8568 			    ("%s: RX: Rx buffer allocation failed, "
   8569 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8570 				rxq->rxq_discard ? " (discard)" : ""));
   8571 			continue;
   8572 		}
   8573 
   8574 		m->m_len = len;
   8575 		rxq->rxq_len += len;
   8576 		DPRINTF(WM_DEBUG_RX,
   8577 		    ("%s: RX: buffer at %p len %d\n",
   8578 			device_xname(sc->sc_dev), m->m_data, len));
   8579 
   8580 		/* If this is not the end of the packet, keep looking. */
   8581 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8582 			WM_RXCHAIN_LINK(rxq, m);
   8583 			DPRINTF(WM_DEBUG_RX,
   8584 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8585 				device_xname(sc->sc_dev), rxq->rxq_len));
   8586 			continue;
   8587 		}
   8588 
   8589 		/*
   8590 		 * Okay, we have the entire packet now. The chip is
   8591 		 * configured to include the FCS except I350 and I21[01]
   8592 		 * (not all chips can be configured to strip it),
   8593 		 * so we need to trim it.
   8594 		 * May need to adjust length of previous mbuf in the
   8595 		 * chain if the current mbuf is too short.
   8596 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8597 		 * is always set in I350, so we don't trim it.
   8598 		 */
   8599 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8600 		    && (sc->sc_type != WM_T_I210)
   8601 		    && (sc->sc_type != WM_T_I211)) {
   8602 			if (m->m_len < ETHER_CRC_LEN) {
   8603 				rxq->rxq_tail->m_len
   8604 				    -= (ETHER_CRC_LEN - m->m_len);
   8605 				m->m_len = 0;
   8606 			} else
   8607 				m->m_len -= ETHER_CRC_LEN;
   8608 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8609 		} else
   8610 			len = rxq->rxq_len;
   8611 
   8612 		WM_RXCHAIN_LINK(rxq, m);
   8613 
   8614 		*rxq->rxq_tailp = NULL;
   8615 		m = rxq->rxq_head;
   8616 
   8617 		WM_RXCHAIN_RESET(rxq);
   8618 
   8619 		DPRINTF(WM_DEBUG_RX,
   8620 		    ("%s: RX: have entire packet, len -> %d\n",
   8621 			device_xname(sc->sc_dev), len));
   8622 
   8623 		/* If an error occurred, update stats and drop the packet. */
   8624 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8625 			m_freem(m);
   8626 			continue;
   8627 		}
   8628 
   8629 		/* No errors.  Receive the packet. */
   8630 		m_set_rcvif(m, ifp);
   8631 		m->m_pkthdr.len = len;
   8632 		/*
   8633 		 * TODO
   8634 		 * should be save rsshash and rsstype to this mbuf.
   8635 		 */
   8636 		DPRINTF(WM_DEBUG_RX,
   8637 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8638 			device_xname(sc->sc_dev), rsstype, rsshash));
   8639 
   8640 		/*
   8641 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8642 		 * for us.  Associate the tag with the packet.
   8643 		 */
   8644 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8645 			continue;
   8646 
   8647 		/* Set up checksum info for this packet. */
   8648 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8649 		/*
   8650 		 * Update the receive pointer holding rxq_lock consistent with
   8651 		 * increment counter.
   8652 		 */
   8653 		rxq->rxq_ptr = i;
   8654 		rxq->rxq_packets++;
   8655 		rxq->rxq_bytes += len;
   8656 		mutex_exit(rxq->rxq_lock);
   8657 
   8658 		/* Pass it on. */
   8659 		if_percpuq_enqueue(sc->sc_ipq, m);
   8660 
   8661 		mutex_enter(rxq->rxq_lock);
   8662 
   8663 		if (rxq->rxq_stopping)
   8664 			break;
   8665 	}
   8666 
   8667 	if (count != 0)
   8668 		rnd_add_uint32(&sc->rnd_source, count);
   8669 
   8670 	DPRINTF(WM_DEBUG_RX,
   8671 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8672 
   8673 	return more;
   8674 }
   8675 
   8676 /*
   8677  * wm_linkintr_gmii:
   8678  *
   8679  *	Helper; handle link interrupts for GMII.
   8680  */
   8681 static void
   8682 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8683 {
   8684 
   8685 	KASSERT(WM_CORE_LOCKED(sc));
   8686 
   8687 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8688 		__func__));
   8689 
   8690 	if (icr & ICR_LSC) {
   8691 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8692 		uint32_t reg;
   8693 		bool link;
   8694 
   8695 		link = status & STATUS_LU;
   8696 		if (link) {
   8697 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8698 				device_xname(sc->sc_dev),
   8699 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8700 		} else {
   8701 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8702 				device_xname(sc->sc_dev)));
   8703 		}
   8704 		if ((sc->sc_type == WM_T_ICH8) && (link == false))
   8705 			wm_gig_downshift_workaround_ich8lan(sc);
   8706 
   8707 		if ((sc->sc_type == WM_T_ICH8)
   8708 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8709 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8710 		}
   8711 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8712 			device_xname(sc->sc_dev)));
   8713 		mii_pollstat(&sc->sc_mii);
   8714 		if (sc->sc_type == WM_T_82543) {
   8715 			int miistatus, active;
   8716 
   8717 			/*
   8718 			 * With 82543, we need to force speed and
   8719 			 * duplex on the MAC equal to what the PHY
   8720 			 * speed and duplex configuration is.
   8721 			 */
   8722 			miistatus = sc->sc_mii.mii_media_status;
   8723 
   8724 			if (miistatus & IFM_ACTIVE) {
   8725 				active = sc->sc_mii.mii_media_active;
   8726 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8727 				switch (IFM_SUBTYPE(active)) {
   8728 				case IFM_10_T:
   8729 					sc->sc_ctrl |= CTRL_SPEED_10;
   8730 					break;
   8731 				case IFM_100_TX:
   8732 					sc->sc_ctrl |= CTRL_SPEED_100;
   8733 					break;
   8734 				case IFM_1000_T:
   8735 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8736 					break;
   8737 				default:
   8738 					/*
   8739 					 * fiber?
   8740 					 * Shoud not enter here.
   8741 					 */
   8742 					printf("unknown media (%x)\n", active);
   8743 					break;
   8744 				}
   8745 				if (active & IFM_FDX)
   8746 					sc->sc_ctrl |= CTRL_FD;
   8747 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8748 			}
   8749 		} else if (sc->sc_type == WM_T_PCH) {
   8750 			wm_k1_gig_workaround_hv(sc,
   8751 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8752 		}
   8753 
   8754 		if ((sc->sc_phytype == WMPHY_82578)
   8755 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8756 			== IFM_1000_T)) {
   8757 
   8758 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8759 				delay(200*1000); /* XXX too big */
   8760 
   8761 				/* Link stall fix for link up */
   8762 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8763 				    HV_MUX_DATA_CTRL,
   8764 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8765 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8766 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8767 				    HV_MUX_DATA_CTRL,
   8768 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8769 			}
   8770 		}
   8771 		/*
   8772 		 * I217 Packet Loss issue:
   8773 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8774 		 * on power up.
   8775 		 * Set the Beacon Duration for I217 to 8 usec
   8776 		 */
   8777 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8778 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8779 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8780 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8781 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8782 		}
   8783 
   8784 		/* Work-around I218 hang issue */
   8785 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   8786 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   8787 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   8788 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   8789 			wm_k1_workaround_lpt_lp(sc, link);
   8790 
   8791 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8792 			/*
   8793 			 * Set platform power management values for Latency
   8794 			 * Tolerance Reporting (LTR)
   8795 			 */
   8796 			wm_platform_pm_pch_lpt(sc,
   8797 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8798 		}
   8799 
   8800 		/* FEXTNVM6 K1-off workaround */
   8801 		if (sc->sc_type == WM_T_PCH_SPT) {
   8802 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8803 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8804 			    & FEXTNVM6_K1_OFF_ENABLE)
   8805 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8806 			else
   8807 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8808 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8809 		}
   8810 	} else if (icr & ICR_RXSEQ) {
   8811 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8812 			device_xname(sc->sc_dev)));
   8813 	}
   8814 }
   8815 
   8816 /*
   8817  * wm_linkintr_tbi:
   8818  *
   8819  *	Helper; handle link interrupts for TBI mode.
   8820  */
   8821 static void
   8822 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8823 {
   8824 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8825 	uint32_t status;
   8826 
   8827 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8828 		__func__));
   8829 
   8830 	status = CSR_READ(sc, WMREG_STATUS);
   8831 	if (icr & ICR_LSC) {
   8832 		wm_check_for_link(sc);
   8833 		if (status & STATUS_LU) {
   8834 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8835 				device_xname(sc->sc_dev),
   8836 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8837 			/*
   8838 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8839 			 * so we should update sc->sc_ctrl
   8840 			 */
   8841 
   8842 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8843 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8844 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8845 			if (status & STATUS_FD)
   8846 				sc->sc_tctl |=
   8847 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8848 			else
   8849 				sc->sc_tctl |=
   8850 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8851 			if (sc->sc_ctrl & CTRL_TFCE)
   8852 				sc->sc_fcrtl |= FCRTL_XONE;
   8853 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8854 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8855 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   8856 			sc->sc_tbi_linkup = 1;
   8857 			if_link_state_change(ifp, LINK_STATE_UP);
   8858 		} else {
   8859 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8860 				device_xname(sc->sc_dev)));
   8861 			sc->sc_tbi_linkup = 0;
   8862 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8863 		}
   8864 		/* Update LED */
   8865 		wm_tbi_serdes_set_linkled(sc);
   8866 	} else if (icr & ICR_RXSEQ) {
   8867 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   8868 			device_xname(sc->sc_dev)));
   8869 	}
   8870 }
   8871 
   8872 /*
   8873  * wm_linkintr_serdes:
   8874  *
   8875  *	Helper; handle link interrupts for TBI mode.
   8876  */
   8877 static void
   8878 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8879 {
   8880 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8881 	struct mii_data *mii = &sc->sc_mii;
   8882 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8883 	uint32_t pcs_adv, pcs_lpab, reg;
   8884 
   8885 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8886 		__func__));
   8887 
   8888 	if (icr & ICR_LSC) {
   8889 		/* Check PCS */
   8890 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8891 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8892 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8893 				device_xname(sc->sc_dev)));
   8894 			mii->mii_media_status |= IFM_ACTIVE;
   8895 			sc->sc_tbi_linkup = 1;
   8896 			if_link_state_change(ifp, LINK_STATE_UP);
   8897 		} else {
   8898 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8899 				device_xname(sc->sc_dev)));
   8900 			mii->mii_media_status |= IFM_NONE;
   8901 			sc->sc_tbi_linkup = 0;
   8902 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8903 			wm_tbi_serdes_set_linkled(sc);
   8904 			return;
   8905 		}
   8906 		mii->mii_media_active |= IFM_1000_SX;
   8907 		if ((reg & PCS_LSTS_FDX) != 0)
   8908 			mii->mii_media_active |= IFM_FDX;
   8909 		else
   8910 			mii->mii_media_active |= IFM_HDX;
   8911 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8912 			/* Check flow */
   8913 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8914 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8915 				DPRINTF(WM_DEBUG_LINK,
   8916 				    ("XXX LINKOK but not ACOMP\n"));
   8917 				return;
   8918 			}
   8919 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8920 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8921 			DPRINTF(WM_DEBUG_LINK,
   8922 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8923 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8924 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8925 				mii->mii_media_active |= IFM_FLOW
   8926 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8927 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8928 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8929 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8930 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8931 				mii->mii_media_active |= IFM_FLOW
   8932 				    | IFM_ETH_TXPAUSE;
   8933 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8934 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8935 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8936 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8937 				mii->mii_media_active |= IFM_FLOW
   8938 				    | IFM_ETH_RXPAUSE;
   8939 		}
   8940 		/* Update LED */
   8941 		wm_tbi_serdes_set_linkled(sc);
   8942 	} else {
   8943 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   8944 		    device_xname(sc->sc_dev)));
   8945 	}
   8946 }
   8947 
   8948 /*
   8949  * wm_linkintr:
   8950  *
   8951  *	Helper; handle link interrupts.
   8952  */
   8953 static void
   8954 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8955 {
   8956 
   8957 	KASSERT(WM_CORE_LOCKED(sc));
   8958 
   8959 	if (sc->sc_flags & WM_F_HAS_MII)
   8960 		wm_linkintr_gmii(sc, icr);
   8961 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8962 	    && (sc->sc_type >= WM_T_82575))
   8963 		wm_linkintr_serdes(sc, icr);
   8964 	else
   8965 		wm_linkintr_tbi(sc, icr);
   8966 }
   8967 
   8968 /*
   8969  * wm_intr_legacy:
   8970  *
   8971  *	Interrupt service routine for INTx and MSI.
   8972  */
   8973 static int
   8974 wm_intr_legacy(void *arg)
   8975 {
   8976 	struct wm_softc *sc = arg;
   8977 	struct wm_queue *wmq = &sc->sc_queue[0];
   8978 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8979 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8980 	uint32_t icr, rndval = 0;
   8981 	int handled = 0;
   8982 
   8983 	while (1 /* CONSTCOND */) {
   8984 		icr = CSR_READ(sc, WMREG_ICR);
   8985 		if ((icr & sc->sc_icr) == 0)
   8986 			break;
   8987 		if (handled == 0) {
   8988 			DPRINTF(WM_DEBUG_TX,
   8989 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8990 		}
   8991 		if (rndval == 0)
   8992 			rndval = icr;
   8993 
   8994 		mutex_enter(rxq->rxq_lock);
   8995 
   8996 		if (rxq->rxq_stopping) {
   8997 			mutex_exit(rxq->rxq_lock);
   8998 			break;
   8999 		}
   9000 
   9001 		handled = 1;
   9002 
   9003 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9004 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9005 			DPRINTF(WM_DEBUG_RX,
   9006 			    ("%s: RX: got Rx intr 0x%08x\n",
   9007 				device_xname(sc->sc_dev),
   9008 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9009 			WM_Q_EVCNT_INCR(rxq, intr);
   9010 		}
   9011 #endif
   9012 		/*
   9013 		 * wm_rxeof() does *not* call upper layer functions directly,
   9014 		 * as if_percpuq_enqueue() just call softint_schedule().
   9015 		 * So, we can call wm_rxeof() in interrupt context.
   9016 		 */
   9017 		wm_rxeof(rxq, UINT_MAX);
   9018 
   9019 		mutex_exit(rxq->rxq_lock);
   9020 		mutex_enter(txq->txq_lock);
   9021 
   9022 		if (txq->txq_stopping) {
   9023 			mutex_exit(txq->txq_lock);
   9024 			break;
   9025 		}
   9026 
   9027 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9028 		if (icr & ICR_TXDW) {
   9029 			DPRINTF(WM_DEBUG_TX,
   9030 			    ("%s: TX: got TXDW interrupt\n",
   9031 				device_xname(sc->sc_dev)));
   9032 			WM_Q_EVCNT_INCR(txq, txdw);
   9033 		}
   9034 #endif
   9035 		wm_txeof(txq, UINT_MAX);
   9036 
   9037 		mutex_exit(txq->txq_lock);
   9038 		WM_CORE_LOCK(sc);
   9039 
   9040 		if (sc->sc_core_stopping) {
   9041 			WM_CORE_UNLOCK(sc);
   9042 			break;
   9043 		}
   9044 
   9045 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9046 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9047 			wm_linkintr(sc, icr);
   9048 		}
   9049 
   9050 		WM_CORE_UNLOCK(sc);
   9051 
   9052 		if (icr & ICR_RXO) {
   9053 #if defined(WM_DEBUG)
   9054 			log(LOG_WARNING, "%s: Receive overrun\n",
   9055 			    device_xname(sc->sc_dev));
   9056 #endif /* defined(WM_DEBUG) */
   9057 		}
   9058 	}
   9059 
   9060 	rnd_add_uint32(&sc->rnd_source, rndval);
   9061 
   9062 	if (handled) {
   9063 		/* Try to get more packets going. */
   9064 		softint_schedule(wmq->wmq_si);
   9065 	}
   9066 
   9067 	return handled;
   9068 }
   9069 
   9070 static inline void
   9071 wm_txrxintr_disable(struct wm_queue *wmq)
   9072 {
   9073 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9074 
   9075 	if (sc->sc_type == WM_T_82574)
   9076 		CSR_WRITE(sc, WMREG_IMC,
   9077 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9078 	else if (sc->sc_type == WM_T_82575)
   9079 		CSR_WRITE(sc, WMREG_EIMC,
   9080 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9081 	else
   9082 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9083 }
   9084 
   9085 static inline void
   9086 wm_txrxintr_enable(struct wm_queue *wmq)
   9087 {
   9088 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9089 
   9090 	wm_itrs_calculate(sc, wmq);
   9091 
   9092 	/*
   9093 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9094 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9095 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9096 	 * while each wm_handle_queue(wmq) is runnig.
   9097 	 */
   9098 	if (sc->sc_type == WM_T_82574)
   9099 		CSR_WRITE(sc, WMREG_IMS,
   9100 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9101 	else if (sc->sc_type == WM_T_82575)
   9102 		CSR_WRITE(sc, WMREG_EIMS,
   9103 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9104 	else
   9105 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9106 }
   9107 
   9108 static int
   9109 wm_txrxintr_msix(void *arg)
   9110 {
   9111 	struct wm_queue *wmq = arg;
   9112 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9113 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9114 	struct wm_softc *sc = txq->txq_sc;
   9115 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9116 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9117 	bool txmore;
   9118 	bool rxmore;
   9119 
   9120 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9121 
   9122 	DPRINTF(WM_DEBUG_TX,
   9123 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9124 
   9125 	wm_txrxintr_disable(wmq);
   9126 
   9127 	mutex_enter(txq->txq_lock);
   9128 
   9129 	if (txq->txq_stopping) {
   9130 		mutex_exit(txq->txq_lock);
   9131 		return 0;
   9132 	}
   9133 
   9134 	WM_Q_EVCNT_INCR(txq, txdw);
   9135 	txmore = wm_txeof(txq, txlimit);
   9136 	/* wm_deferred start() is done in wm_handle_queue(). */
   9137 	mutex_exit(txq->txq_lock);
   9138 
   9139 	DPRINTF(WM_DEBUG_RX,
   9140 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9141 	mutex_enter(rxq->rxq_lock);
   9142 
   9143 	if (rxq->rxq_stopping) {
   9144 		mutex_exit(rxq->rxq_lock);
   9145 		return 0;
   9146 	}
   9147 
   9148 	WM_Q_EVCNT_INCR(rxq, intr);
   9149 	rxmore = wm_rxeof(rxq, rxlimit);
   9150 	mutex_exit(rxq->rxq_lock);
   9151 
   9152 	wm_itrs_writereg(sc, wmq);
   9153 
   9154 	if (txmore || rxmore)
   9155 		softint_schedule(wmq->wmq_si);
   9156 	else
   9157 		wm_txrxintr_enable(wmq);
   9158 
   9159 	return 1;
   9160 }
   9161 
   9162 static void
   9163 wm_handle_queue(void *arg)
   9164 {
   9165 	struct wm_queue *wmq = arg;
   9166 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9167 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9168 	struct wm_softc *sc = txq->txq_sc;
   9169 	u_int txlimit = sc->sc_tx_process_limit;
   9170 	u_int rxlimit = sc->sc_rx_process_limit;
   9171 	bool txmore;
   9172 	bool rxmore;
   9173 
   9174 	mutex_enter(txq->txq_lock);
   9175 	if (txq->txq_stopping) {
   9176 		mutex_exit(txq->txq_lock);
   9177 		return;
   9178 	}
   9179 	txmore = wm_txeof(txq, txlimit);
   9180 	wm_deferred_start_locked(txq);
   9181 	mutex_exit(txq->txq_lock);
   9182 
   9183 	mutex_enter(rxq->rxq_lock);
   9184 	if (rxq->rxq_stopping) {
   9185 		mutex_exit(rxq->rxq_lock);
   9186 		return;
   9187 	}
   9188 	WM_Q_EVCNT_INCR(rxq, defer);
   9189 	rxmore = wm_rxeof(rxq, rxlimit);
   9190 	mutex_exit(rxq->rxq_lock);
   9191 
   9192 	if (txmore || rxmore)
   9193 		softint_schedule(wmq->wmq_si);
   9194 	else
   9195 		wm_txrxintr_enable(wmq);
   9196 }
   9197 
   9198 /*
   9199  * wm_linkintr_msix:
   9200  *
   9201  *	Interrupt service routine for link status change for MSI-X.
   9202  */
   9203 static int
   9204 wm_linkintr_msix(void *arg)
   9205 {
   9206 	struct wm_softc *sc = arg;
   9207 	uint32_t reg;
   9208 	bool has_rxo;
   9209 
   9210 	DPRINTF(WM_DEBUG_LINK,
   9211 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9212 
   9213 	reg = CSR_READ(sc, WMREG_ICR);
   9214 	WM_CORE_LOCK(sc);
   9215 	if (sc->sc_core_stopping)
   9216 		goto out;
   9217 
   9218 	if ((reg & ICR_LSC) != 0) {
   9219 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9220 		wm_linkintr(sc, ICR_LSC);
   9221 	}
   9222 
   9223 	/*
   9224 	 * XXX 82574 MSI-X mode workaround
   9225 	 *
   9226 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9227 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9228 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9229 	 * interrupts by writing WMREG_ICS to process receive packets.
   9230 	 */
   9231 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9232 #if defined(WM_DEBUG)
   9233 		log(LOG_WARNING, "%s: Receive overrun\n",
   9234 		    device_xname(sc->sc_dev));
   9235 #endif /* defined(WM_DEBUG) */
   9236 
   9237 		has_rxo = true;
   9238 		/*
   9239 		 * The RXO interrupt is very high rate when receive traffic is
   9240 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9241 		 * interrupts. ICR_OTHER will be enabled at the end of
   9242 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9243 		 * ICR_RXQ(1) interrupts.
   9244 		 */
   9245 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9246 
   9247 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9248 	}
   9249 
   9250 
   9251 
   9252 out:
   9253 	WM_CORE_UNLOCK(sc);
   9254 
   9255 	if (sc->sc_type == WM_T_82574) {
   9256 		if (!has_rxo)
   9257 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9258 		else
   9259 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9260 	} else if (sc->sc_type == WM_T_82575)
   9261 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9262 	else
   9263 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9264 
   9265 	return 1;
   9266 }
   9267 
   9268 /*
   9269  * Media related.
   9270  * GMII, SGMII, TBI (and SERDES)
   9271  */
   9272 
   9273 /* Common */
   9274 
   9275 /*
   9276  * wm_tbi_serdes_set_linkled:
   9277  *
   9278  *	Update the link LED on TBI and SERDES devices.
   9279  */
   9280 static void
   9281 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9282 {
   9283 
   9284 	if (sc->sc_tbi_linkup)
   9285 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9286 	else
   9287 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9288 
   9289 	/* 82540 or newer devices are active low */
   9290 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9291 
   9292 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9293 }
   9294 
   9295 /* GMII related */
   9296 
   9297 /*
   9298  * wm_gmii_reset:
   9299  *
   9300  *	Reset the PHY.
   9301  */
   9302 static void
   9303 wm_gmii_reset(struct wm_softc *sc)
   9304 {
   9305 	uint32_t reg;
   9306 	int rv;
   9307 
   9308 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9309 		device_xname(sc->sc_dev), __func__));
   9310 
   9311 	rv = sc->phy.acquire(sc);
   9312 	if (rv != 0) {
   9313 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9314 		    __func__);
   9315 		return;
   9316 	}
   9317 
   9318 	switch (sc->sc_type) {
   9319 	case WM_T_82542_2_0:
   9320 	case WM_T_82542_2_1:
   9321 		/* null */
   9322 		break;
   9323 	case WM_T_82543:
   9324 		/*
   9325 		 * With 82543, we need to force speed and duplex on the MAC
   9326 		 * equal to what the PHY speed and duplex configuration is.
   9327 		 * In addition, we need to perform a hardware reset on the PHY
   9328 		 * to take it out of reset.
   9329 		 */
   9330 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9331 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9332 
   9333 		/* The PHY reset pin is active-low. */
   9334 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9335 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9336 		    CTRL_EXT_SWDPIN(4));
   9337 		reg |= CTRL_EXT_SWDPIO(4);
   9338 
   9339 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9340 		CSR_WRITE_FLUSH(sc);
   9341 		delay(10*1000);
   9342 
   9343 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9344 		CSR_WRITE_FLUSH(sc);
   9345 		delay(150);
   9346 #if 0
   9347 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9348 #endif
   9349 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9350 		break;
   9351 	case WM_T_82544:	/* reset 10000us */
   9352 	case WM_T_82540:
   9353 	case WM_T_82545:
   9354 	case WM_T_82545_3:
   9355 	case WM_T_82546:
   9356 	case WM_T_82546_3:
   9357 	case WM_T_82541:
   9358 	case WM_T_82541_2:
   9359 	case WM_T_82547:
   9360 	case WM_T_82547_2:
   9361 	case WM_T_82571:	/* reset 100us */
   9362 	case WM_T_82572:
   9363 	case WM_T_82573:
   9364 	case WM_T_82574:
   9365 	case WM_T_82575:
   9366 	case WM_T_82576:
   9367 	case WM_T_82580:
   9368 	case WM_T_I350:
   9369 	case WM_T_I354:
   9370 	case WM_T_I210:
   9371 	case WM_T_I211:
   9372 	case WM_T_82583:
   9373 	case WM_T_80003:
   9374 		/* generic reset */
   9375 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9376 		CSR_WRITE_FLUSH(sc);
   9377 		delay(20000);
   9378 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9379 		CSR_WRITE_FLUSH(sc);
   9380 		delay(20000);
   9381 
   9382 		if ((sc->sc_type == WM_T_82541)
   9383 		    || (sc->sc_type == WM_T_82541_2)
   9384 		    || (sc->sc_type == WM_T_82547)
   9385 		    || (sc->sc_type == WM_T_82547_2)) {
   9386 			/* workaround for igp are done in igp_reset() */
   9387 			/* XXX add code to set LED after phy reset */
   9388 		}
   9389 		break;
   9390 	case WM_T_ICH8:
   9391 	case WM_T_ICH9:
   9392 	case WM_T_ICH10:
   9393 	case WM_T_PCH:
   9394 	case WM_T_PCH2:
   9395 	case WM_T_PCH_LPT:
   9396 	case WM_T_PCH_SPT:
   9397 	case WM_T_PCH_CNP:
   9398 		/* generic reset */
   9399 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9400 		CSR_WRITE_FLUSH(sc);
   9401 		delay(100);
   9402 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9403 		CSR_WRITE_FLUSH(sc);
   9404 		delay(150);
   9405 		break;
   9406 	default:
   9407 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9408 		    __func__);
   9409 		break;
   9410 	}
   9411 
   9412 	sc->phy.release(sc);
   9413 
   9414 	/* get_cfg_done */
   9415 	wm_get_cfg_done(sc);
   9416 
   9417 	/* extra setup */
   9418 	switch (sc->sc_type) {
   9419 	case WM_T_82542_2_0:
   9420 	case WM_T_82542_2_1:
   9421 	case WM_T_82543:
   9422 	case WM_T_82544:
   9423 	case WM_T_82540:
   9424 	case WM_T_82545:
   9425 	case WM_T_82545_3:
   9426 	case WM_T_82546:
   9427 	case WM_T_82546_3:
   9428 	case WM_T_82541_2:
   9429 	case WM_T_82547_2:
   9430 	case WM_T_82571:
   9431 	case WM_T_82572:
   9432 	case WM_T_82573:
   9433 	case WM_T_82574:
   9434 	case WM_T_82583:
   9435 	case WM_T_82575:
   9436 	case WM_T_82576:
   9437 	case WM_T_82580:
   9438 	case WM_T_I350:
   9439 	case WM_T_I354:
   9440 	case WM_T_I210:
   9441 	case WM_T_I211:
   9442 	case WM_T_80003:
   9443 		/* null */
   9444 		break;
   9445 	case WM_T_82541:
   9446 	case WM_T_82547:
   9447 		/* XXX Configure actively LED after PHY reset */
   9448 		break;
   9449 	case WM_T_ICH8:
   9450 	case WM_T_ICH9:
   9451 	case WM_T_ICH10:
   9452 	case WM_T_PCH:
   9453 	case WM_T_PCH2:
   9454 	case WM_T_PCH_LPT:
   9455 	case WM_T_PCH_SPT:
   9456 	case WM_T_PCH_CNP:
   9457 		wm_phy_post_reset(sc);
   9458 		break;
   9459 	default:
   9460 		panic("%s: unknown type\n", __func__);
   9461 		break;
   9462 	}
   9463 }
   9464 
   9465 /*
   9466  * Setup sc_phytype and mii_{read|write}reg.
   9467  *
   9468  *  To identify PHY type, correct read/write function should be selected.
   9469  * To select correct read/write function, PCI ID or MAC type are required
   9470  * without accessing PHY registers.
   9471  *
   9472  *  On the first call of this function, PHY ID is not known yet. Check
   9473  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9474  * result might be incorrect.
   9475  *
   9476  *  In the second call, PHY OUI and model is used to identify PHY type.
   9477  * It might not be perfpect because of the lack of compared entry, but it
   9478  * would be better than the first call.
   9479  *
   9480  *  If the detected new result and previous assumption is different,
   9481  * diagnous message will be printed.
   9482  */
   9483 static void
   9484 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9485     uint16_t phy_model)
   9486 {
   9487 	device_t dev = sc->sc_dev;
   9488 	struct mii_data *mii = &sc->sc_mii;
   9489 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9490 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9491 	mii_readreg_t new_readreg;
   9492 	mii_writereg_t new_writereg;
   9493 
   9494 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9495 		device_xname(sc->sc_dev), __func__));
   9496 
   9497 	if (mii->mii_readreg == NULL) {
   9498 		/*
   9499 		 *  This is the first call of this function. For ICH and PCH
   9500 		 * variants, it's difficult to determine the PHY access method
   9501 		 * by sc_type, so use the PCI product ID for some devices.
   9502 		 */
   9503 
   9504 		switch (sc->sc_pcidevid) {
   9505 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9506 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9507 			/* 82577 */
   9508 			new_phytype = WMPHY_82577;
   9509 			break;
   9510 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9511 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9512 			/* 82578 */
   9513 			new_phytype = WMPHY_82578;
   9514 			break;
   9515 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9516 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9517 			/* 82579 */
   9518 			new_phytype = WMPHY_82579;
   9519 			break;
   9520 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9521 		case PCI_PRODUCT_INTEL_82801I_BM:
   9522 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9523 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9524 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9525 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9526 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9527 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9528 			/* ICH8, 9, 10 with 82567 */
   9529 			new_phytype = WMPHY_BM;
   9530 			break;
   9531 		default:
   9532 			break;
   9533 		}
   9534 	} else {
   9535 		/* It's not the first call. Use PHY OUI and model */
   9536 		switch (phy_oui) {
   9537 		case MII_OUI_ATHEROS: /* XXX ??? */
   9538 			switch (phy_model) {
   9539 			case 0x0004: /* XXX */
   9540 				new_phytype = WMPHY_82578;
   9541 				break;
   9542 			default:
   9543 				break;
   9544 			}
   9545 			break;
   9546 		case MII_OUI_xxMARVELL:
   9547 			switch (phy_model) {
   9548 			case MII_MODEL_xxMARVELL_I210:
   9549 				new_phytype = WMPHY_I210;
   9550 				break;
   9551 			case MII_MODEL_xxMARVELL_E1011:
   9552 			case MII_MODEL_xxMARVELL_E1000_3:
   9553 			case MII_MODEL_xxMARVELL_E1000_5:
   9554 			case MII_MODEL_xxMARVELL_E1112:
   9555 				new_phytype = WMPHY_M88;
   9556 				break;
   9557 			case MII_MODEL_xxMARVELL_E1149:
   9558 				new_phytype = WMPHY_BM;
   9559 				break;
   9560 			case MII_MODEL_xxMARVELL_E1111:
   9561 			case MII_MODEL_xxMARVELL_I347:
   9562 			case MII_MODEL_xxMARVELL_E1512:
   9563 			case MII_MODEL_xxMARVELL_E1340M:
   9564 			case MII_MODEL_xxMARVELL_E1543:
   9565 				new_phytype = WMPHY_M88;
   9566 				break;
   9567 			case MII_MODEL_xxMARVELL_I82563:
   9568 				new_phytype = WMPHY_GG82563;
   9569 				break;
   9570 			default:
   9571 				break;
   9572 			}
   9573 			break;
   9574 		case MII_OUI_INTEL:
   9575 			switch (phy_model) {
   9576 			case MII_MODEL_INTEL_I82577:
   9577 				new_phytype = WMPHY_82577;
   9578 				break;
   9579 			case MII_MODEL_INTEL_I82579:
   9580 				new_phytype = WMPHY_82579;
   9581 				break;
   9582 			case MII_MODEL_INTEL_I217:
   9583 				new_phytype = WMPHY_I217;
   9584 				break;
   9585 			case MII_MODEL_INTEL_I82580:
   9586 			case MII_MODEL_INTEL_I350:
   9587 				new_phytype = WMPHY_82580;
   9588 				break;
   9589 			default:
   9590 				break;
   9591 			}
   9592 			break;
   9593 		case MII_OUI_yyINTEL:
   9594 			switch (phy_model) {
   9595 			case MII_MODEL_yyINTEL_I82562G:
   9596 			case MII_MODEL_yyINTEL_I82562EM:
   9597 			case MII_MODEL_yyINTEL_I82562ET:
   9598 				new_phytype = WMPHY_IFE;
   9599 				break;
   9600 			case MII_MODEL_yyINTEL_IGP01E1000:
   9601 				new_phytype = WMPHY_IGP;
   9602 				break;
   9603 			case MII_MODEL_yyINTEL_I82566:
   9604 				new_phytype = WMPHY_IGP_3;
   9605 				break;
   9606 			default:
   9607 				break;
   9608 			}
   9609 			break;
   9610 		default:
   9611 			break;
   9612 		}
   9613 		if (new_phytype == WMPHY_UNKNOWN)
   9614 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9615 			    __func__);
   9616 
   9617 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9618 		    && (sc->sc_phytype != new_phytype )) {
   9619 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9620 			    "was incorrect. PHY type from PHY ID = %u\n",
   9621 			    sc->sc_phytype, new_phytype);
   9622 		}
   9623 	}
   9624 
   9625 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9626 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9627 		/* SGMII */
   9628 		new_readreg = wm_sgmii_readreg;
   9629 		new_writereg = wm_sgmii_writereg;
   9630 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9631 		/* BM2 (phyaddr == 1) */
   9632 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9633 		    && (new_phytype != WMPHY_BM)
   9634 		    && (new_phytype != WMPHY_UNKNOWN))
   9635 			doubt_phytype = new_phytype;
   9636 		new_phytype = WMPHY_BM;
   9637 		new_readreg = wm_gmii_bm_readreg;
   9638 		new_writereg = wm_gmii_bm_writereg;
   9639 	} else if (sc->sc_type >= WM_T_PCH) {
   9640 		/* All PCH* use _hv_ */
   9641 		new_readreg = wm_gmii_hv_readreg;
   9642 		new_writereg = wm_gmii_hv_writereg;
   9643 	} else if (sc->sc_type >= WM_T_ICH8) {
   9644 		/* non-82567 ICH8, 9 and 10 */
   9645 		new_readreg = wm_gmii_i82544_readreg;
   9646 		new_writereg = wm_gmii_i82544_writereg;
   9647 	} else if (sc->sc_type >= WM_T_80003) {
   9648 		/* 80003 */
   9649 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9650 		    && (new_phytype != WMPHY_GG82563)
   9651 		    && (new_phytype != WMPHY_UNKNOWN))
   9652 			doubt_phytype = new_phytype;
   9653 		new_phytype = WMPHY_GG82563;
   9654 		new_readreg = wm_gmii_i80003_readreg;
   9655 		new_writereg = wm_gmii_i80003_writereg;
   9656 	} else if (sc->sc_type >= WM_T_I210) {
   9657 		/* I210 and I211 */
   9658 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9659 		    && (new_phytype != WMPHY_I210)
   9660 		    && (new_phytype != WMPHY_UNKNOWN))
   9661 			doubt_phytype = new_phytype;
   9662 		new_phytype = WMPHY_I210;
   9663 		new_readreg = wm_gmii_gs40g_readreg;
   9664 		new_writereg = wm_gmii_gs40g_writereg;
   9665 	} else if (sc->sc_type >= WM_T_82580) {
   9666 		/* 82580, I350 and I354 */
   9667 		new_readreg = wm_gmii_82580_readreg;
   9668 		new_writereg = wm_gmii_82580_writereg;
   9669 	} else if (sc->sc_type >= WM_T_82544) {
   9670 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9671 		new_readreg = wm_gmii_i82544_readreg;
   9672 		new_writereg = wm_gmii_i82544_writereg;
   9673 	} else {
   9674 		new_readreg = wm_gmii_i82543_readreg;
   9675 		new_writereg = wm_gmii_i82543_writereg;
   9676 	}
   9677 
   9678 	if (new_phytype == WMPHY_BM) {
   9679 		/* All BM use _bm_ */
   9680 		new_readreg = wm_gmii_bm_readreg;
   9681 		new_writereg = wm_gmii_bm_writereg;
   9682 	}
   9683 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   9684 		/* All PCH* use _hv_ */
   9685 		new_readreg = wm_gmii_hv_readreg;
   9686 		new_writereg = wm_gmii_hv_writereg;
   9687 	}
   9688 
   9689 	/* Diag output */
   9690 	if (doubt_phytype != WMPHY_UNKNOWN)
   9691 		aprint_error_dev(dev, "Assumed new PHY type was "
   9692 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9693 		    new_phytype);
   9694 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9695 	    && (sc->sc_phytype != new_phytype ))
   9696 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9697 		    "was incorrect. New PHY type = %u\n",
   9698 		    sc->sc_phytype, new_phytype);
   9699 
   9700 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9701 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9702 
   9703 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9704 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9705 		    "function was incorrect.\n");
   9706 
   9707 	/* Update now */
   9708 	sc->sc_phytype = new_phytype;
   9709 	mii->mii_readreg = new_readreg;
   9710 	mii->mii_writereg = new_writereg;
   9711 }
   9712 
   9713 /*
   9714  * wm_get_phy_id_82575:
   9715  *
   9716  * Return PHY ID. Return -1 if it failed.
   9717  */
   9718 static int
   9719 wm_get_phy_id_82575(struct wm_softc *sc)
   9720 {
   9721 	uint32_t reg;
   9722 	int phyid = -1;
   9723 
   9724 	/* XXX */
   9725 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9726 		return -1;
   9727 
   9728 	if (wm_sgmii_uses_mdio(sc)) {
   9729 		switch (sc->sc_type) {
   9730 		case WM_T_82575:
   9731 		case WM_T_82576:
   9732 			reg = CSR_READ(sc, WMREG_MDIC);
   9733 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9734 			break;
   9735 		case WM_T_82580:
   9736 		case WM_T_I350:
   9737 		case WM_T_I354:
   9738 		case WM_T_I210:
   9739 		case WM_T_I211:
   9740 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9741 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9742 			break;
   9743 		default:
   9744 			return -1;
   9745 		}
   9746 	}
   9747 
   9748 	return phyid;
   9749 }
   9750 
   9751 
   9752 /*
   9753  * wm_gmii_mediainit:
   9754  *
   9755  *	Initialize media for use on 1000BASE-T devices.
   9756  */
   9757 static void
   9758 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9759 {
   9760 	device_t dev = sc->sc_dev;
   9761 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9762 	struct mii_data *mii = &sc->sc_mii;
   9763 	uint32_t reg;
   9764 
   9765 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9766 		device_xname(sc->sc_dev), __func__));
   9767 
   9768 	/* We have GMII. */
   9769 	sc->sc_flags |= WM_F_HAS_MII;
   9770 
   9771 	if (sc->sc_type == WM_T_80003)
   9772 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9773 	else
   9774 		sc->sc_tipg = TIPG_1000T_DFLT;
   9775 
   9776 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9777 	if ((sc->sc_type == WM_T_82580)
   9778 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9779 	    || (sc->sc_type == WM_T_I211)) {
   9780 		reg = CSR_READ(sc, WMREG_PHPM);
   9781 		reg &= ~PHPM_GO_LINK_D;
   9782 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9783 	}
   9784 
   9785 	/*
   9786 	 * Let the chip set speed/duplex on its own based on
   9787 	 * signals from the PHY.
   9788 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9789 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9790 	 */
   9791 	sc->sc_ctrl |= CTRL_SLU;
   9792 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9793 
   9794 	/* Initialize our media structures and probe the GMII. */
   9795 	mii->mii_ifp = ifp;
   9796 
   9797 	mii->mii_statchg = wm_gmii_statchg;
   9798 
   9799 	/* get PHY control from SMBus to PCIe */
   9800 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9801 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   9802 	    || (sc->sc_type == WM_T_PCH_CNP))
   9803 		wm_smbustopci(sc);
   9804 
   9805 	wm_gmii_reset(sc);
   9806 
   9807 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9808 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9809 	    wm_gmii_mediastatus);
   9810 
   9811 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9812 	    || (sc->sc_type == WM_T_82580)
   9813 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9814 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9815 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9816 			/* Attach only one port */
   9817 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9818 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9819 		} else {
   9820 			int i, id;
   9821 			uint32_t ctrl_ext;
   9822 
   9823 			id = wm_get_phy_id_82575(sc);
   9824 			if (id != -1) {
   9825 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9826 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9827 			}
   9828 			if ((id == -1)
   9829 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9830 				/* Power on sgmii phy if it is disabled */
   9831 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9832 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9833 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9834 				CSR_WRITE_FLUSH(sc);
   9835 				delay(300*1000); /* XXX too long */
   9836 
   9837 				/* from 1 to 8 */
   9838 				for (i = 1; i < 8; i++)
   9839 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9840 					    0xffffffff, i, MII_OFFSET_ANY,
   9841 					    MIIF_DOPAUSE);
   9842 
   9843 				/* restore previous sfp cage power state */
   9844 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9845 			}
   9846 		}
   9847 	} else
   9848 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9849 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9850 
   9851 	/*
   9852 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9853 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9854 	 */
   9855 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   9856 		|| (sc->sc_type == WM_T_PCH_SPT)
   9857 		|| (sc->sc_type == WM_T_PCH_CNP))
   9858 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9859 		wm_set_mdio_slow_mode_hv(sc);
   9860 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9861 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9862 	}
   9863 
   9864 	/*
   9865 	 * (For ICH8 variants)
   9866 	 * If PHY detection failed, use BM's r/w function and retry.
   9867 	 */
   9868 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9869 		/* if failed, retry with *_bm_* */
   9870 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9871 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9872 		    sc->sc_phytype);
   9873 		sc->sc_phytype = WMPHY_BM;
   9874 		mii->mii_readreg = wm_gmii_bm_readreg;
   9875 		mii->mii_writereg = wm_gmii_bm_writereg;
   9876 
   9877 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9878 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9879 	}
   9880 
   9881 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9882 		/* Any PHY wasn't find */
   9883 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9884 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9885 		sc->sc_phytype = WMPHY_NONE;
   9886 	} else {
   9887 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9888 
   9889 		/*
   9890 		 * PHY Found! Check PHY type again by the second call of
   9891 		 * wm_gmii_setup_phytype.
   9892 		 */
   9893 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9894 		    child->mii_mpd_model);
   9895 
   9896 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9897 	}
   9898 }
   9899 
   9900 /*
   9901  * wm_gmii_mediachange:	[ifmedia interface function]
   9902  *
   9903  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9904  */
   9905 static int
   9906 wm_gmii_mediachange(struct ifnet *ifp)
   9907 {
   9908 	struct wm_softc *sc = ifp->if_softc;
   9909 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9910 	int rc;
   9911 
   9912 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9913 		device_xname(sc->sc_dev), __func__));
   9914 	if ((ifp->if_flags & IFF_UP) == 0)
   9915 		return 0;
   9916 
   9917 	/* Disable D0 LPLU. */
   9918 	wm_lplu_d0_disable(sc);
   9919 
   9920 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9921 	sc->sc_ctrl |= CTRL_SLU;
   9922 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9923 	    || (sc->sc_type > WM_T_82543)) {
   9924 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9925 	} else {
   9926 		sc->sc_ctrl &= ~CTRL_ASDE;
   9927 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9928 		if (ife->ifm_media & IFM_FDX)
   9929 			sc->sc_ctrl |= CTRL_FD;
   9930 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9931 		case IFM_10_T:
   9932 			sc->sc_ctrl |= CTRL_SPEED_10;
   9933 			break;
   9934 		case IFM_100_TX:
   9935 			sc->sc_ctrl |= CTRL_SPEED_100;
   9936 			break;
   9937 		case IFM_1000_T:
   9938 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9939 			break;
   9940 		default:
   9941 			panic("wm_gmii_mediachange: bad media 0x%x",
   9942 			    ife->ifm_media);
   9943 		}
   9944 	}
   9945 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9946 	CSR_WRITE_FLUSH(sc);
   9947 	if (sc->sc_type <= WM_T_82543)
   9948 		wm_gmii_reset(sc);
   9949 
   9950 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9951 		return 0;
   9952 	return rc;
   9953 }
   9954 
   9955 /*
   9956  * wm_gmii_mediastatus:	[ifmedia interface function]
   9957  *
   9958  *	Get the current interface media status on a 1000BASE-T device.
   9959  */
   9960 static void
   9961 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9962 {
   9963 	struct wm_softc *sc = ifp->if_softc;
   9964 
   9965 	ether_mediastatus(ifp, ifmr);
   9966 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9967 	    | sc->sc_flowflags;
   9968 }
   9969 
   9970 #define	MDI_IO		CTRL_SWDPIN(2)
   9971 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9972 #define	MDI_CLK		CTRL_SWDPIN(3)
   9973 
   9974 static void
   9975 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9976 {
   9977 	uint32_t i, v;
   9978 
   9979 	v = CSR_READ(sc, WMREG_CTRL);
   9980 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9981 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9982 
   9983 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9984 		if (data & i)
   9985 			v |= MDI_IO;
   9986 		else
   9987 			v &= ~MDI_IO;
   9988 		CSR_WRITE(sc, WMREG_CTRL, v);
   9989 		CSR_WRITE_FLUSH(sc);
   9990 		delay(10);
   9991 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9992 		CSR_WRITE_FLUSH(sc);
   9993 		delay(10);
   9994 		CSR_WRITE(sc, WMREG_CTRL, v);
   9995 		CSR_WRITE_FLUSH(sc);
   9996 		delay(10);
   9997 	}
   9998 }
   9999 
   10000 static uint32_t
   10001 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10002 {
   10003 	uint32_t v, i, data = 0;
   10004 
   10005 	v = CSR_READ(sc, WMREG_CTRL);
   10006 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10007 	v |= CTRL_SWDPIO(3);
   10008 
   10009 	CSR_WRITE(sc, WMREG_CTRL, v);
   10010 	CSR_WRITE_FLUSH(sc);
   10011 	delay(10);
   10012 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10013 	CSR_WRITE_FLUSH(sc);
   10014 	delay(10);
   10015 	CSR_WRITE(sc, WMREG_CTRL, v);
   10016 	CSR_WRITE_FLUSH(sc);
   10017 	delay(10);
   10018 
   10019 	for (i = 0; i < 16; i++) {
   10020 		data <<= 1;
   10021 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10022 		CSR_WRITE_FLUSH(sc);
   10023 		delay(10);
   10024 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10025 			data |= 1;
   10026 		CSR_WRITE(sc, WMREG_CTRL, v);
   10027 		CSR_WRITE_FLUSH(sc);
   10028 		delay(10);
   10029 	}
   10030 
   10031 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10032 	CSR_WRITE_FLUSH(sc);
   10033 	delay(10);
   10034 	CSR_WRITE(sc, WMREG_CTRL, v);
   10035 	CSR_WRITE_FLUSH(sc);
   10036 	delay(10);
   10037 
   10038 	return data;
   10039 }
   10040 
   10041 #undef MDI_IO
   10042 #undef MDI_DIR
   10043 #undef MDI_CLK
   10044 
   10045 /*
   10046  * wm_gmii_i82543_readreg:	[mii interface function]
   10047  *
   10048  *	Read a PHY register on the GMII (i82543 version).
   10049  */
   10050 static int
   10051 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   10052 {
   10053 	struct wm_softc *sc = device_private(dev);
   10054 	int rv;
   10055 
   10056 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10057 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10058 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10059 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   10060 
   10061 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   10062 		device_xname(dev), phy, reg, rv));
   10063 
   10064 	return rv;
   10065 }
   10066 
   10067 /*
   10068  * wm_gmii_i82543_writereg:	[mii interface function]
   10069  *
   10070  *	Write a PHY register on the GMII (i82543 version).
   10071  */
   10072 static void
   10073 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   10074 {
   10075 	struct wm_softc *sc = device_private(dev);
   10076 
   10077 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10078 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10079 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10080 	    (MII_COMMAND_START << 30), 32);
   10081 }
   10082 
   10083 /*
   10084  * wm_gmii_mdic_readreg:	[mii interface function]
   10085  *
   10086  *	Read a PHY register on the GMII.
   10087  */
   10088 static int
   10089 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   10090 {
   10091 	struct wm_softc *sc = device_private(dev);
   10092 	uint32_t mdic = 0;
   10093 	int i, rv;
   10094 
   10095 	if (reg > MII_ADDRMASK) {
   10096 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10097 		    __func__, sc->sc_phytype, reg);
   10098 		reg &= MII_ADDRMASK;
   10099 	}
   10100 
   10101 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10102 	    MDIC_REGADD(reg));
   10103 
   10104 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10105 		delay(50);
   10106 		mdic = CSR_READ(sc, WMREG_MDIC);
   10107 		if (mdic & MDIC_READY)
   10108 			break;
   10109 	}
   10110 
   10111 	if ((mdic & MDIC_READY) == 0) {
   10112 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   10113 		    device_xname(dev), phy, reg);
   10114 		return 0;
   10115 	} else if (mdic & MDIC_E) {
   10116 #if 0 /* This is normal if no PHY is present. */
   10117 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   10118 		    device_xname(dev), phy, reg);
   10119 #endif
   10120 		return 0;
   10121 	} else {
   10122 		rv = MDIC_DATA(mdic);
   10123 		if (rv == 0xffff)
   10124 			rv = 0;
   10125 	}
   10126 
   10127 	/*
   10128 	 * Allow some time after each MDIC transaction to avoid
   10129 	 * reading duplicate data in the next MDIC transaction.
   10130 	 */
   10131 	if (sc->sc_type == WM_T_PCH2)
   10132 		delay(100);
   10133 
   10134 	return rv;
   10135 }
   10136 
   10137 /*
   10138  * wm_gmii_mdic_writereg:	[mii interface function]
   10139  *
   10140  *	Write a PHY register on the GMII.
   10141  */
   10142 static void
   10143 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   10144 {
   10145 	struct wm_softc *sc = device_private(dev);
   10146 	uint32_t mdic = 0;
   10147 	int i;
   10148 
   10149 	if (reg > MII_ADDRMASK) {
   10150 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10151 		    __func__, sc->sc_phytype, reg);
   10152 		reg &= MII_ADDRMASK;
   10153 	}
   10154 
   10155 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10156 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10157 
   10158 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10159 		delay(50);
   10160 		mdic = CSR_READ(sc, WMREG_MDIC);
   10161 		if (mdic & MDIC_READY)
   10162 			break;
   10163 	}
   10164 
   10165 	if ((mdic & MDIC_READY) == 0) {
   10166 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   10167 		    device_xname(dev), phy, reg);
   10168 		return;
   10169 	} else if (mdic & MDIC_E) {
   10170 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   10171 		    device_xname(dev), phy, reg);
   10172 		return;
   10173 	}
   10174 
   10175 	/*
   10176 	 * Allow some time after each MDIC transaction to avoid
   10177 	 * reading duplicate data in the next MDIC transaction.
   10178 	 */
   10179 	if (sc->sc_type == WM_T_PCH2)
   10180 		delay(100);
   10181 }
   10182 
   10183 /*
   10184  * wm_gmii_i82544_readreg:	[mii interface function]
   10185  *
   10186  *	Read a PHY register on the GMII.
   10187  */
   10188 static int
   10189 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   10190 {
   10191 	struct wm_softc *sc = device_private(dev);
   10192 	int rv;
   10193 
   10194 	if (sc->phy.acquire(sc)) {
   10195 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10196 		return 0;
   10197 	}
   10198 
   10199 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10200 		switch (sc->sc_phytype) {
   10201 		case WMPHY_IGP:
   10202 		case WMPHY_IGP_2:
   10203 		case WMPHY_IGP_3:
   10204 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10205 			    reg);
   10206 			break;
   10207 		default:
   10208 #ifdef WM_DEBUG
   10209 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10210 			    __func__, sc->sc_phytype, reg);
   10211 #endif
   10212 			break;
   10213 		}
   10214 	}
   10215 
   10216 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10217 	sc->phy.release(sc);
   10218 
   10219 	return rv;
   10220 }
   10221 
   10222 /*
   10223  * wm_gmii_i82544_writereg:	[mii interface function]
   10224  *
   10225  *	Write a PHY register on the GMII.
   10226  */
   10227 static void
   10228 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   10229 {
   10230 	struct wm_softc *sc = device_private(dev);
   10231 
   10232 	if (sc->phy.acquire(sc)) {
   10233 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10234 		return;
   10235 	}
   10236 
   10237 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10238 		switch (sc->sc_phytype) {
   10239 		case WMPHY_IGP:
   10240 		case WMPHY_IGP_2:
   10241 		case WMPHY_IGP_3:
   10242 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10243 			    reg);
   10244 			break;
   10245 		default:
   10246 #ifdef WM_DEBUG
   10247 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10248 			    __func__, sc->sc_phytype, reg);
   10249 #endif
   10250 			break;
   10251 		}
   10252 	}
   10253 
   10254 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10255 	sc->phy.release(sc);
   10256 }
   10257 
   10258 /*
   10259  * wm_gmii_i80003_readreg:	[mii interface function]
   10260  *
   10261  *	Read a PHY register on the kumeran
   10262  * This could be handled by the PHY layer if we didn't have to lock the
   10263  * ressource ...
   10264  */
   10265 static int
   10266 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   10267 {
   10268 	struct wm_softc *sc = device_private(dev);
   10269 	int page_select, temp;
   10270 	int rv;
   10271 
   10272 	if (phy != 1) /* only one PHY on kumeran bus */
   10273 		return 0;
   10274 
   10275 	if (sc->phy.acquire(sc)) {
   10276 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10277 		return 0;
   10278 	}
   10279 
   10280 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10281 		page_select = GG82563_PHY_PAGE_SELECT;
   10282 	else {
   10283 		/*
   10284 		 * Use Alternative Page Select register to access registers
   10285 		 * 30 and 31.
   10286 		 */
   10287 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10288 	}
   10289 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10290 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10291 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10292 		/*
   10293 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10294 		 * register.
   10295 		 */
   10296 		delay(200);
   10297 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10298 			device_printf(dev, "%s failed\n", __func__);
   10299 			rv = 0; /* XXX */
   10300 			goto out;
   10301 		}
   10302 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10303 		delay(200);
   10304 	} else
   10305 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10306 
   10307 out:
   10308 	sc->phy.release(sc);
   10309 	return rv;
   10310 }
   10311 
   10312 /*
   10313  * wm_gmii_i80003_writereg:	[mii interface function]
   10314  *
   10315  *	Write a PHY register on the kumeran.
   10316  * This could be handled by the PHY layer if we didn't have to lock the
   10317  * ressource ...
   10318  */
   10319 static void
   10320 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10321 {
   10322 	struct wm_softc *sc = device_private(dev);
   10323 	int page_select, temp;
   10324 
   10325 	if (phy != 1) /* only one PHY on kumeran bus */
   10326 		return;
   10327 
   10328 	if (sc->phy.acquire(sc)) {
   10329 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10330 		return;
   10331 	}
   10332 
   10333 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10334 		page_select = GG82563_PHY_PAGE_SELECT;
   10335 	else {
   10336 		/*
   10337 		 * Use Alternative Page Select register to access registers
   10338 		 * 30 and 31.
   10339 		 */
   10340 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10341 	}
   10342 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10343 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10344 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10345 		/*
   10346 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10347 		 * register.
   10348 		 */
   10349 		delay(200);
   10350 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10351 			device_printf(dev, "%s failed\n", __func__);
   10352 			goto out;
   10353 		}
   10354 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10355 		delay(200);
   10356 	} else
   10357 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10358 
   10359 out:
   10360 	sc->phy.release(sc);
   10361 }
   10362 
   10363 /*
   10364  * wm_gmii_bm_readreg:	[mii interface function]
   10365  *
   10366  *	Read a PHY register on the kumeran
   10367  * This could be handled by the PHY layer if we didn't have to lock the
   10368  * ressource ...
   10369  */
   10370 static int
   10371 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10372 {
   10373 	struct wm_softc *sc = device_private(dev);
   10374 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10375 	uint16_t val;
   10376 	int rv;
   10377 
   10378 	if (sc->phy.acquire(sc)) {
   10379 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10380 		return 0;
   10381 	}
   10382 
   10383 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10384 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10385 		    || (reg == 31)) ? 1 : phy;
   10386 	/* Page 800 works differently than the rest so it has its own func */
   10387 	if (page == BM_WUC_PAGE) {
   10388 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10389 		rv = val;
   10390 		goto release;
   10391 	}
   10392 
   10393 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10394 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10395 		    && (sc->sc_type != WM_T_82583))
   10396 			wm_gmii_mdic_writereg(dev, phy,
   10397 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10398 		else
   10399 			wm_gmii_mdic_writereg(dev, phy,
   10400 			    BME1000_PHY_PAGE_SELECT, page);
   10401 	}
   10402 
   10403 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10404 
   10405 release:
   10406 	sc->phy.release(sc);
   10407 	return rv;
   10408 }
   10409 
   10410 /*
   10411  * wm_gmii_bm_writereg:	[mii interface function]
   10412  *
   10413  *	Write a PHY register on the kumeran.
   10414  * This could be handled by the PHY layer if we didn't have to lock the
   10415  * ressource ...
   10416  */
   10417 static void
   10418 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10419 {
   10420 	struct wm_softc *sc = device_private(dev);
   10421 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10422 
   10423 	if (sc->phy.acquire(sc)) {
   10424 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10425 		return;
   10426 	}
   10427 
   10428 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10429 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10430 		    || (reg == 31)) ? 1 : phy;
   10431 	/* Page 800 works differently than the rest so it has its own func */
   10432 	if (page == BM_WUC_PAGE) {
   10433 		uint16_t tmp;
   10434 
   10435 		tmp = val;
   10436 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10437 		goto release;
   10438 	}
   10439 
   10440 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10441 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10442 		    && (sc->sc_type != WM_T_82583))
   10443 			wm_gmii_mdic_writereg(dev, phy,
   10444 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10445 		else
   10446 			wm_gmii_mdic_writereg(dev, phy,
   10447 			    BME1000_PHY_PAGE_SELECT, page);
   10448 	}
   10449 
   10450 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10451 
   10452 release:
   10453 	sc->phy.release(sc);
   10454 }
   10455 
   10456 static void
   10457 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10458 {
   10459 	struct wm_softc *sc = device_private(dev);
   10460 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10461 	uint16_t wuce, reg;
   10462 
   10463 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10464 		device_xname(dev), __func__));
   10465 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10466 	if (sc->sc_type == WM_T_PCH) {
   10467 		/* XXX e1000 driver do nothing... why? */
   10468 	}
   10469 
   10470 	/*
   10471 	 * 1) Enable PHY wakeup register first.
   10472 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10473 	 */
   10474 
   10475 	/* Set page 769 */
   10476 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10477 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10478 
   10479 	/* Read WUCE and save it */
   10480 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10481 
   10482 	reg = wuce | BM_WUC_ENABLE_BIT;
   10483 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10484 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10485 
   10486 	/* Select page 800 */
   10487 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10488 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10489 
   10490 	/*
   10491 	 * 2) Access PHY wakeup register.
   10492 	 * See e1000_access_phy_wakeup_reg_bm.
   10493 	 */
   10494 
   10495 	/* Write page 800 */
   10496 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10497 
   10498 	if (rd)
   10499 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10500 	else
   10501 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10502 
   10503 	/*
   10504 	 * 3) Disable PHY wakeup register.
   10505 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10506 	 */
   10507 	/* Set page 769 */
   10508 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10509 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10510 
   10511 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10512 }
   10513 
   10514 /*
   10515  * wm_gmii_hv_readreg:	[mii interface function]
   10516  *
   10517  *	Read a PHY register on the kumeran
   10518  * This could be handled by the PHY layer if we didn't have to lock the
   10519  * ressource ...
   10520  */
   10521 static int
   10522 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10523 {
   10524 	struct wm_softc *sc = device_private(dev);
   10525 	int rv;
   10526 
   10527 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10528 		device_xname(dev), __func__));
   10529 	if (sc->phy.acquire(sc)) {
   10530 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10531 		return 0;
   10532 	}
   10533 
   10534 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
   10535 	sc->phy.release(sc);
   10536 	return rv;
   10537 }
   10538 
   10539 static int
   10540 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
   10541 {
   10542 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10543 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10544 	uint16_t val;
   10545 	int rv;
   10546 
   10547 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10548 
   10549 	/* Page 800 works differently than the rest so it has its own func */
   10550 	if (page == BM_WUC_PAGE) {
   10551 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10552 		return val;
   10553 	}
   10554 
   10555 	/*
   10556 	 * Lower than page 768 works differently than the rest so it has its
   10557 	 * own func
   10558 	 */
   10559 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10560 		printf("gmii_hv_readreg!!!\n");
   10561 		return 0;
   10562 	}
   10563 
   10564 	/*
   10565 	 * XXX I21[789] documents say that the SMBus Address register is at
   10566 	 * PHY address 01, Page 0 (not 768), Register 26.
   10567 	 */
   10568 	if (page == HV_INTC_FC_PAGE_START)
   10569 		page = 0;
   10570 
   10571 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10572 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10573 		    page << BME1000_PAGE_SHIFT);
   10574 	}
   10575 
   10576 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10577 	return rv;
   10578 }
   10579 
   10580 /*
   10581  * wm_gmii_hv_writereg:	[mii interface function]
   10582  *
   10583  *	Write a PHY register on the kumeran.
   10584  * This could be handled by the PHY layer if we didn't have to lock the
   10585  * ressource ...
   10586  */
   10587 static void
   10588 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10589 {
   10590 	struct wm_softc *sc = device_private(dev);
   10591 
   10592 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10593 		device_xname(dev), __func__));
   10594 
   10595 	if (sc->phy.acquire(sc)) {
   10596 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10597 		return;
   10598 	}
   10599 
   10600 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10601 	sc->phy.release(sc);
   10602 }
   10603 
   10604 static void
   10605 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
   10606 {
   10607 	struct wm_softc *sc = device_private(dev);
   10608 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10609 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10610 
   10611 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10612 
   10613 	/* Page 800 works differently than the rest so it has its own func */
   10614 	if (page == BM_WUC_PAGE) {
   10615 		uint16_t tmp;
   10616 
   10617 		tmp = val;
   10618 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10619 		return;
   10620 	}
   10621 
   10622 	/*
   10623 	 * Lower than page 768 works differently than the rest so it has its
   10624 	 * own func
   10625 	 */
   10626 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10627 		printf("gmii_hv_writereg!!!\n");
   10628 		return;
   10629 	}
   10630 
   10631 	{
   10632 		/*
   10633 		 * XXX I21[789] documents say that the SMBus Address register
   10634 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10635 		 */
   10636 		if (page == HV_INTC_FC_PAGE_START)
   10637 			page = 0;
   10638 
   10639 		/*
   10640 		 * XXX Workaround MDIO accesses being disabled after entering
   10641 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10642 		 * register is set)
   10643 		 */
   10644 		if (sc->sc_phytype == WMPHY_82578) {
   10645 			struct mii_softc *child;
   10646 
   10647 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10648 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10649 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10650 			    && ((val & (1 << 11)) != 0)) {
   10651 				printf("XXX need workaround\n");
   10652 			}
   10653 		}
   10654 
   10655 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10656 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10657 			    page << BME1000_PAGE_SHIFT);
   10658 		}
   10659 	}
   10660 
   10661 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10662 }
   10663 
   10664 /*
   10665  * wm_gmii_82580_readreg:	[mii interface function]
   10666  *
   10667  *	Read a PHY register on the 82580 and I350.
   10668  * This could be handled by the PHY layer if we didn't have to lock the
   10669  * ressource ...
   10670  */
   10671 static int
   10672 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10673 {
   10674 	struct wm_softc *sc = device_private(dev);
   10675 	int rv;
   10676 
   10677 	if (sc->phy.acquire(sc) != 0) {
   10678 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10679 		return 0;
   10680 	}
   10681 
   10682 #ifdef DIAGNOSTIC
   10683 	if (reg > MII_ADDRMASK) {
   10684 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10685 		    __func__, sc->sc_phytype, reg);
   10686 		reg &= MII_ADDRMASK;
   10687 	}
   10688 #endif
   10689 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10690 
   10691 	sc->phy.release(sc);
   10692 	return rv;
   10693 }
   10694 
   10695 /*
   10696  * wm_gmii_82580_writereg:	[mii interface function]
   10697  *
   10698  *	Write a PHY register on the 82580 and I350.
   10699  * This could be handled by the PHY layer if we didn't have to lock the
   10700  * ressource ...
   10701  */
   10702 static void
   10703 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10704 {
   10705 	struct wm_softc *sc = device_private(dev);
   10706 
   10707 	if (sc->phy.acquire(sc) != 0) {
   10708 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10709 		return;
   10710 	}
   10711 
   10712 #ifdef DIAGNOSTIC
   10713 	if (reg > MII_ADDRMASK) {
   10714 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10715 		    __func__, sc->sc_phytype, reg);
   10716 		reg &= MII_ADDRMASK;
   10717 	}
   10718 #endif
   10719 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10720 
   10721 	sc->phy.release(sc);
   10722 }
   10723 
   10724 /*
   10725  * wm_gmii_gs40g_readreg:	[mii interface function]
   10726  *
   10727  *	Read a PHY register on the I2100 and I211.
   10728  * This could be handled by the PHY layer if we didn't have to lock the
   10729  * ressource ...
   10730  */
   10731 static int
   10732 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10733 {
   10734 	struct wm_softc *sc = device_private(dev);
   10735 	int page, offset;
   10736 	int rv;
   10737 
   10738 	/* Acquire semaphore */
   10739 	if (sc->phy.acquire(sc)) {
   10740 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10741 		return 0;
   10742 	}
   10743 
   10744 	/* Page select */
   10745 	page = reg >> GS40G_PAGE_SHIFT;
   10746 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10747 
   10748 	/* Read reg */
   10749 	offset = reg & GS40G_OFFSET_MASK;
   10750 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10751 
   10752 	sc->phy.release(sc);
   10753 	return rv;
   10754 }
   10755 
   10756 /*
   10757  * wm_gmii_gs40g_writereg:	[mii interface function]
   10758  *
   10759  *	Write a PHY register on the I210 and I211.
   10760  * This could be handled by the PHY layer if we didn't have to lock the
   10761  * ressource ...
   10762  */
   10763 static void
   10764 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10765 {
   10766 	struct wm_softc *sc = device_private(dev);
   10767 	int page, offset;
   10768 
   10769 	/* Acquire semaphore */
   10770 	if (sc->phy.acquire(sc)) {
   10771 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10772 		return;
   10773 	}
   10774 
   10775 	/* Page select */
   10776 	page = reg >> GS40G_PAGE_SHIFT;
   10777 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10778 
   10779 	/* Write reg */
   10780 	offset = reg & GS40G_OFFSET_MASK;
   10781 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10782 
   10783 	/* Release semaphore */
   10784 	sc->phy.release(sc);
   10785 }
   10786 
   10787 /*
   10788  * wm_gmii_statchg:	[mii interface function]
   10789  *
   10790  *	Callback from MII layer when media changes.
   10791  */
   10792 static void
   10793 wm_gmii_statchg(struct ifnet *ifp)
   10794 {
   10795 	struct wm_softc *sc = ifp->if_softc;
   10796 	struct mii_data *mii = &sc->sc_mii;
   10797 
   10798 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10799 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10800 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10801 
   10802 	/*
   10803 	 * Get flow control negotiation result.
   10804 	 */
   10805 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10806 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10807 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10808 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10809 	}
   10810 
   10811 	if (sc->sc_flowflags & IFM_FLOW) {
   10812 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10813 			sc->sc_ctrl |= CTRL_TFCE;
   10814 			sc->sc_fcrtl |= FCRTL_XONE;
   10815 		}
   10816 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10817 			sc->sc_ctrl |= CTRL_RFCE;
   10818 	}
   10819 
   10820 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10821 		DPRINTF(WM_DEBUG_LINK,
   10822 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10823 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10824 	} else {
   10825 		DPRINTF(WM_DEBUG_LINK,
   10826 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10827 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10828 	}
   10829 
   10830 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10831 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10832 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10833 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10834 	if (sc->sc_type == WM_T_80003) {
   10835 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10836 		case IFM_1000_T:
   10837 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10838 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10839 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10840 			break;
   10841 		default:
   10842 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10843 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10844 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10845 			break;
   10846 		}
   10847 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10848 	}
   10849 }
   10850 
   10851 /* kumeran related (80003, ICH* and PCH*) */
   10852 
   10853 /*
   10854  * wm_kmrn_readreg:
   10855  *
   10856  *	Read a kumeran register
   10857  */
   10858 static int
   10859 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   10860 {
   10861 	int rv;
   10862 
   10863 	if (sc->sc_type == WM_T_80003)
   10864 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10865 	else
   10866 		rv = sc->phy.acquire(sc);
   10867 	if (rv != 0) {
   10868 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10869 		    __func__);
   10870 		return rv;
   10871 	}
   10872 
   10873 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   10874 
   10875 	if (sc->sc_type == WM_T_80003)
   10876 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10877 	else
   10878 		sc->phy.release(sc);
   10879 
   10880 	return rv;
   10881 }
   10882 
   10883 static int
   10884 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   10885 {
   10886 
   10887 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10888 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10889 	    KUMCTRLSTA_REN);
   10890 	CSR_WRITE_FLUSH(sc);
   10891 	delay(2);
   10892 
   10893 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10894 
   10895 	return 0;
   10896 }
   10897 
   10898 /*
   10899  * wm_kmrn_writereg:
   10900  *
   10901  *	Write a kumeran register
   10902  */
   10903 static int
   10904 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   10905 {
   10906 	int rv;
   10907 
   10908 	if (sc->sc_type == WM_T_80003)
   10909 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10910 	else
   10911 		rv = sc->phy.acquire(sc);
   10912 	if (rv != 0) {
   10913 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10914 		    __func__);
   10915 		return rv;
   10916 	}
   10917 
   10918 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   10919 
   10920 	if (sc->sc_type == WM_T_80003)
   10921 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10922 	else
   10923 		sc->phy.release(sc);
   10924 
   10925 	return rv;
   10926 }
   10927 
   10928 static int
   10929 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   10930 {
   10931 
   10932 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10933 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   10934 
   10935 	return 0;
   10936 }
   10937 
   10938 /* SGMII related */
   10939 
   10940 /*
   10941  * wm_sgmii_uses_mdio
   10942  *
   10943  * Check whether the transaction is to the internal PHY or the external
   10944  * MDIO interface. Return true if it's MDIO.
   10945  */
   10946 static bool
   10947 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10948 {
   10949 	uint32_t reg;
   10950 	bool ismdio = false;
   10951 
   10952 	switch (sc->sc_type) {
   10953 	case WM_T_82575:
   10954 	case WM_T_82576:
   10955 		reg = CSR_READ(sc, WMREG_MDIC);
   10956 		ismdio = ((reg & MDIC_DEST) != 0);
   10957 		break;
   10958 	case WM_T_82580:
   10959 	case WM_T_I350:
   10960 	case WM_T_I354:
   10961 	case WM_T_I210:
   10962 	case WM_T_I211:
   10963 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10964 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10965 		break;
   10966 	default:
   10967 		break;
   10968 	}
   10969 
   10970 	return ismdio;
   10971 }
   10972 
   10973 /*
   10974  * wm_sgmii_readreg:	[mii interface function]
   10975  *
   10976  *	Read a PHY register on the SGMII
   10977  * This could be handled by the PHY layer if we didn't have to lock the
   10978  * ressource ...
   10979  */
   10980 static int
   10981 wm_sgmii_readreg(device_t dev, int phy, int reg)
   10982 {
   10983 	struct wm_softc *sc = device_private(dev);
   10984 	uint32_t i2ccmd;
   10985 	int i, rv;
   10986 
   10987 	if (sc->phy.acquire(sc)) {
   10988 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10989 		return 0;
   10990 	}
   10991 
   10992 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10993 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10994 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10995 
   10996 	/* Poll the ready bit */
   10997 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10998 		delay(50);
   10999 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11000 		if (i2ccmd & I2CCMD_READY)
   11001 			break;
   11002 	}
   11003 	if ((i2ccmd & I2CCMD_READY) == 0)
   11004 		device_printf(dev, "I2CCMD Read did not complete\n");
   11005 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11006 		device_printf(dev, "I2CCMD Error bit set\n");
   11007 
   11008 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11009 
   11010 	sc->phy.release(sc);
   11011 	return rv;
   11012 }
   11013 
   11014 /*
   11015  * wm_sgmii_writereg:	[mii interface function]
   11016  *
   11017  *	Write a PHY register on the SGMII.
   11018  * This could be handled by the PHY layer if we didn't have to lock the
   11019  * ressource ...
   11020  */
   11021 static void
   11022 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   11023 {
   11024 	struct wm_softc *sc = device_private(dev);
   11025 	uint32_t i2ccmd;
   11026 	int i;
   11027 	int swapdata;
   11028 
   11029 	if (sc->phy.acquire(sc) != 0) {
   11030 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11031 		return;
   11032 	}
   11033 	/* Swap the data bytes for the I2C interface */
   11034 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11035 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11036 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11037 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11038 
   11039 	/* Poll the ready bit */
   11040 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11041 		delay(50);
   11042 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11043 		if (i2ccmd & I2CCMD_READY)
   11044 			break;
   11045 	}
   11046 	if ((i2ccmd & I2CCMD_READY) == 0)
   11047 		device_printf(dev, "I2CCMD Write did not complete\n");
   11048 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11049 		device_printf(dev, "I2CCMD Error bit set\n");
   11050 
   11051 	sc->phy.release(sc);
   11052 }
   11053 
   11054 /* TBI related */
   11055 
   11056 static bool
   11057 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11058 {
   11059 	bool sig;
   11060 
   11061 	sig = ctrl & CTRL_SWDPIN(1);
   11062 
   11063 	/*
   11064 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11065 	 * detect a signal, 1 if they don't.
   11066 	 */
   11067 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11068 		sig = !sig;
   11069 
   11070 	return sig;
   11071 }
   11072 
   11073 /*
   11074  * wm_tbi_mediainit:
   11075  *
   11076  *	Initialize media for use on 1000BASE-X devices.
   11077  */
   11078 static void
   11079 wm_tbi_mediainit(struct wm_softc *sc)
   11080 {
   11081 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11082 	const char *sep = "";
   11083 
   11084 	if (sc->sc_type < WM_T_82543)
   11085 		sc->sc_tipg = TIPG_WM_DFLT;
   11086 	else
   11087 		sc->sc_tipg = TIPG_LG_DFLT;
   11088 
   11089 	sc->sc_tbi_serdes_anegticks = 5;
   11090 
   11091 	/* Initialize our media structures */
   11092 	sc->sc_mii.mii_ifp = ifp;
   11093 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11094 
   11095 	if ((sc->sc_type >= WM_T_82575)
   11096 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11097 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11098 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11099 	else
   11100 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11101 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11102 
   11103 	/*
   11104 	 * SWD Pins:
   11105 	 *
   11106 	 *	0 = Link LED (output)
   11107 	 *	1 = Loss Of Signal (input)
   11108 	 */
   11109 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11110 
   11111 	/* XXX Perhaps this is only for TBI */
   11112 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11113 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11114 
   11115 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11116 		sc->sc_ctrl &= ~CTRL_LRST;
   11117 
   11118 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11119 
   11120 #define	ADD(ss, mm, dd)							\
   11121 do {									\
   11122 	aprint_normal("%s%s", sep, ss);					\
   11123 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11124 	sep = ", ";							\
   11125 } while (/*CONSTCOND*/0)
   11126 
   11127 	aprint_normal_dev(sc->sc_dev, "");
   11128 
   11129 	if (sc->sc_type == WM_T_I354) {
   11130 		uint32_t status;
   11131 
   11132 		status = CSR_READ(sc, WMREG_STATUS);
   11133 		if (((status & STATUS_2P5_SKU) != 0)
   11134 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11135 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11136 		} else
   11137 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11138 	} else if (sc->sc_type == WM_T_82545) {
   11139 		/* Only 82545 is LX (XXX except SFP) */
   11140 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11141 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11142 	} else {
   11143 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11144 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11145 	}
   11146 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11147 	aprint_normal("\n");
   11148 
   11149 #undef ADD
   11150 
   11151 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11152 }
   11153 
   11154 /*
   11155  * wm_tbi_mediachange:	[ifmedia interface function]
   11156  *
   11157  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11158  */
   11159 static int
   11160 wm_tbi_mediachange(struct ifnet *ifp)
   11161 {
   11162 	struct wm_softc *sc = ifp->if_softc;
   11163 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11164 	uint32_t status, ctrl;
   11165 	bool signal;
   11166 	int i;
   11167 
   11168 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11169 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11170 		/* XXX need some work for >= 82571 and < 82575 */
   11171 		if (sc->sc_type < WM_T_82575)
   11172 			return 0;
   11173 	}
   11174 
   11175 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11176 	    || (sc->sc_type >= WM_T_82575))
   11177 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11178 
   11179 	sc->sc_ctrl &= ~CTRL_LRST;
   11180 	sc->sc_txcw = TXCW_ANE;
   11181 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11182 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11183 	else if (ife->ifm_media & IFM_FDX)
   11184 		sc->sc_txcw |= TXCW_FD;
   11185 	else
   11186 		sc->sc_txcw |= TXCW_HD;
   11187 
   11188 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11189 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11190 
   11191 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11192 		device_xname(sc->sc_dev), sc->sc_txcw));
   11193 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11194 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11195 	CSR_WRITE_FLUSH(sc);
   11196 	delay(1000);
   11197 
   11198 	ctrl =  CSR_READ(sc, WMREG_CTRL);
   11199 	signal = wm_tbi_havesignal(sc, ctrl);
   11200 
   11201 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11202 		signal));
   11203 
   11204 	if (signal) {
   11205 		/* Have signal; wait for the link to come up. */
   11206 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11207 			delay(10000);
   11208 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11209 				break;
   11210 		}
   11211 
   11212 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11213 			device_xname(sc->sc_dev),i));
   11214 
   11215 		status = CSR_READ(sc, WMREG_STATUS);
   11216 		DPRINTF(WM_DEBUG_LINK,
   11217 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11218 			device_xname(sc->sc_dev),status, STATUS_LU));
   11219 		if (status & STATUS_LU) {
   11220 			/* Link is up. */
   11221 			DPRINTF(WM_DEBUG_LINK,
   11222 			    ("%s: LINK: set media -> link up %s\n",
   11223 				device_xname(sc->sc_dev),
   11224 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11225 
   11226 			/*
   11227 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11228 			 * so we should update sc->sc_ctrl
   11229 			 */
   11230 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11231 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11232 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11233 			if (status & STATUS_FD)
   11234 				sc->sc_tctl |=
   11235 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11236 			else
   11237 				sc->sc_tctl |=
   11238 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11239 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11240 				sc->sc_fcrtl |= FCRTL_XONE;
   11241 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11242 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11243 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11244 			sc->sc_tbi_linkup = 1;
   11245 		} else {
   11246 			if (i == WM_LINKUP_TIMEOUT)
   11247 				wm_check_for_link(sc);
   11248 			/* Link is down. */
   11249 			DPRINTF(WM_DEBUG_LINK,
   11250 			    ("%s: LINK: set media -> link down\n",
   11251 				device_xname(sc->sc_dev)));
   11252 			sc->sc_tbi_linkup = 0;
   11253 		}
   11254 	} else {
   11255 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11256 			device_xname(sc->sc_dev)));
   11257 		sc->sc_tbi_linkup = 0;
   11258 	}
   11259 
   11260 	wm_tbi_serdes_set_linkled(sc);
   11261 
   11262 	return 0;
   11263 }
   11264 
   11265 /*
   11266  * wm_tbi_mediastatus:	[ifmedia interface function]
   11267  *
   11268  *	Get the current interface media status on a 1000BASE-X device.
   11269  */
   11270 static void
   11271 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11272 {
   11273 	struct wm_softc *sc = ifp->if_softc;
   11274 	uint32_t ctrl, status;
   11275 
   11276 	ifmr->ifm_status = IFM_AVALID;
   11277 	ifmr->ifm_active = IFM_ETHER;
   11278 
   11279 	status = CSR_READ(sc, WMREG_STATUS);
   11280 	if ((status & STATUS_LU) == 0) {
   11281 		ifmr->ifm_active |= IFM_NONE;
   11282 		return;
   11283 	}
   11284 
   11285 	ifmr->ifm_status |= IFM_ACTIVE;
   11286 	/* Only 82545 is LX */
   11287 	if (sc->sc_type == WM_T_82545)
   11288 		ifmr->ifm_active |= IFM_1000_LX;
   11289 	else
   11290 		ifmr->ifm_active |= IFM_1000_SX;
   11291 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11292 		ifmr->ifm_active |= IFM_FDX;
   11293 	else
   11294 		ifmr->ifm_active |= IFM_HDX;
   11295 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11296 	if (ctrl & CTRL_RFCE)
   11297 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11298 	if (ctrl & CTRL_TFCE)
   11299 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11300 }
   11301 
   11302 /* XXX TBI only */
   11303 static int
   11304 wm_check_for_link(struct wm_softc *sc)
   11305 {
   11306 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11307 	uint32_t rxcw;
   11308 	uint32_t ctrl;
   11309 	uint32_t status;
   11310 	bool signal;
   11311 
   11312 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11313 		device_xname(sc->sc_dev), __func__));
   11314 
   11315 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11316 		/* XXX need some work for >= 82571 */
   11317 		if (sc->sc_type >= WM_T_82571) {
   11318 			sc->sc_tbi_linkup = 1;
   11319 			return 0;
   11320 		}
   11321 	}
   11322 
   11323 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11324 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11325 	status = CSR_READ(sc, WMREG_STATUS);
   11326 	signal = wm_tbi_havesignal(sc, ctrl);
   11327 
   11328 	DPRINTF(WM_DEBUG_LINK,
   11329 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11330 		device_xname(sc->sc_dev), __func__, signal,
   11331 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11332 
   11333 	/*
   11334 	 * SWDPIN   LU RXCW
   11335 	 *	0    0	  0
   11336 	 *	0    0	  1	(should not happen)
   11337 	 *	0    1	  0	(should not happen)
   11338 	 *	0    1	  1	(should not happen)
   11339 	 *	1    0	  0	Disable autonego and force linkup
   11340 	 *	1    0	  1	got /C/ but not linkup yet
   11341 	 *	1    1	  0	(linkup)
   11342 	 *	1    1	  1	If IFM_AUTO, back to autonego
   11343 	 *
   11344 	 */
   11345 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   11346 		DPRINTF(WM_DEBUG_LINK,
   11347 		    ("%s: %s: force linkup and fullduplex\n",
   11348 			device_xname(sc->sc_dev), __func__));
   11349 		sc->sc_tbi_linkup = 0;
   11350 		/* Disable auto-negotiation in the TXCW register */
   11351 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11352 
   11353 		/*
   11354 		 * Force link-up and also force full-duplex.
   11355 		 *
   11356 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11357 		 * so we should update sc->sc_ctrl
   11358 		 */
   11359 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11360 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11361 	} else if (((status & STATUS_LU) != 0)
   11362 	    && ((rxcw & RXCW_C) != 0)
   11363 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11364 		sc->sc_tbi_linkup = 1;
   11365 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   11366 			device_xname(sc->sc_dev),
   11367 			__func__));
   11368 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11369 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11370 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   11371 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   11372 			device_xname(sc->sc_dev), __func__));
   11373 	} else {
   11374 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   11375 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   11376 			status));
   11377 	}
   11378 
   11379 	return 0;
   11380 }
   11381 
   11382 /*
   11383  * wm_tbi_tick:
   11384  *
   11385  *	Check the link on TBI devices.
   11386  *	This function acts as mii_tick().
   11387  */
   11388 static void
   11389 wm_tbi_tick(struct wm_softc *sc)
   11390 {
   11391 	struct mii_data *mii = &sc->sc_mii;
   11392 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11393 	uint32_t status;
   11394 
   11395 	KASSERT(WM_CORE_LOCKED(sc));
   11396 
   11397 	status = CSR_READ(sc, WMREG_STATUS);
   11398 
   11399 	/* XXX is this needed? */
   11400 	(void)CSR_READ(sc, WMREG_RXCW);
   11401 	(void)CSR_READ(sc, WMREG_CTRL);
   11402 
   11403 	/* set link status */
   11404 	if ((status & STATUS_LU) == 0) {
   11405 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   11406 			device_xname(sc->sc_dev)));
   11407 		sc->sc_tbi_linkup = 0;
   11408 	} else if (sc->sc_tbi_linkup == 0) {
   11409 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   11410 			device_xname(sc->sc_dev),
   11411 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11412 		sc->sc_tbi_linkup = 1;
   11413 		sc->sc_tbi_serdes_ticks = 0;
   11414 	}
   11415 
   11416 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11417 		goto setled;
   11418 
   11419 	if ((status & STATUS_LU) == 0) {
   11420 		sc->sc_tbi_linkup = 0;
   11421 		/* If the timer expired, retry autonegotiation */
   11422 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11423 		    && (++sc->sc_tbi_serdes_ticks
   11424 			>= sc->sc_tbi_serdes_anegticks)) {
   11425 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11426 			sc->sc_tbi_serdes_ticks = 0;
   11427 			/*
   11428 			 * Reset the link, and let autonegotiation do
   11429 			 * its thing
   11430 			 */
   11431 			sc->sc_ctrl |= CTRL_LRST;
   11432 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11433 			CSR_WRITE_FLUSH(sc);
   11434 			delay(1000);
   11435 			sc->sc_ctrl &= ~CTRL_LRST;
   11436 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11437 			CSR_WRITE_FLUSH(sc);
   11438 			delay(1000);
   11439 			CSR_WRITE(sc, WMREG_TXCW,
   11440 			    sc->sc_txcw & ~TXCW_ANE);
   11441 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11442 		}
   11443 	}
   11444 
   11445 setled:
   11446 	wm_tbi_serdes_set_linkled(sc);
   11447 }
   11448 
   11449 /* SERDES related */
   11450 static void
   11451 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11452 {
   11453 	uint32_t reg;
   11454 
   11455 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11456 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11457 		return;
   11458 
   11459 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11460 	reg |= PCS_CFG_PCS_EN;
   11461 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11462 
   11463 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11464 	reg &= ~CTRL_EXT_SWDPIN(3);
   11465 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11466 	CSR_WRITE_FLUSH(sc);
   11467 }
   11468 
   11469 static int
   11470 wm_serdes_mediachange(struct ifnet *ifp)
   11471 {
   11472 	struct wm_softc *sc = ifp->if_softc;
   11473 	bool pcs_autoneg = true; /* XXX */
   11474 	uint32_t ctrl_ext, pcs_lctl, reg;
   11475 
   11476 	/* XXX Currently, this function is not called on 8257[12] */
   11477 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11478 	    || (sc->sc_type >= WM_T_82575))
   11479 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11480 
   11481 	wm_serdes_power_up_link_82575(sc);
   11482 
   11483 	sc->sc_ctrl |= CTRL_SLU;
   11484 
   11485 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11486 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11487 
   11488 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11489 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11490 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11491 	case CTRL_EXT_LINK_MODE_SGMII:
   11492 		pcs_autoneg = true;
   11493 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11494 		break;
   11495 	case CTRL_EXT_LINK_MODE_1000KX:
   11496 		pcs_autoneg = false;
   11497 		/* FALLTHROUGH */
   11498 	default:
   11499 		if ((sc->sc_type == WM_T_82575)
   11500 		    || (sc->sc_type == WM_T_82576)) {
   11501 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11502 				pcs_autoneg = false;
   11503 		}
   11504 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11505 		    | CTRL_FRCFDX;
   11506 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11507 	}
   11508 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11509 
   11510 	if (pcs_autoneg) {
   11511 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11512 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11513 
   11514 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11515 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11516 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11517 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11518 	} else
   11519 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11520 
   11521 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11522 
   11523 
   11524 	return 0;
   11525 }
   11526 
   11527 static void
   11528 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11529 {
   11530 	struct wm_softc *sc = ifp->if_softc;
   11531 	struct mii_data *mii = &sc->sc_mii;
   11532 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11533 	uint32_t pcs_adv, pcs_lpab, reg;
   11534 
   11535 	ifmr->ifm_status = IFM_AVALID;
   11536 	ifmr->ifm_active = IFM_ETHER;
   11537 
   11538 	/* Check PCS */
   11539 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11540 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11541 		ifmr->ifm_active |= IFM_NONE;
   11542 		sc->sc_tbi_linkup = 0;
   11543 		goto setled;
   11544 	}
   11545 
   11546 	sc->sc_tbi_linkup = 1;
   11547 	ifmr->ifm_status |= IFM_ACTIVE;
   11548 	if (sc->sc_type == WM_T_I354) {
   11549 		uint32_t status;
   11550 
   11551 		status = CSR_READ(sc, WMREG_STATUS);
   11552 		if (((status & STATUS_2P5_SKU) != 0)
   11553 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11554 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11555 		} else
   11556 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11557 	} else {
   11558 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11559 		case PCS_LSTS_SPEED_10:
   11560 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11561 			break;
   11562 		case PCS_LSTS_SPEED_100:
   11563 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11564 			break;
   11565 		case PCS_LSTS_SPEED_1000:
   11566 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11567 			break;
   11568 		default:
   11569 			device_printf(sc->sc_dev, "Unknown speed\n");
   11570 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11571 			break;
   11572 		}
   11573 	}
   11574 	if ((reg & PCS_LSTS_FDX) != 0)
   11575 		ifmr->ifm_active |= IFM_FDX;
   11576 	else
   11577 		ifmr->ifm_active |= IFM_HDX;
   11578 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11579 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11580 		/* Check flow */
   11581 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11582 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11583 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11584 			goto setled;
   11585 		}
   11586 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11587 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11588 		DPRINTF(WM_DEBUG_LINK,
   11589 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11590 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11591 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11592 			mii->mii_media_active |= IFM_FLOW
   11593 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11594 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11595 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11596 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11597 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11598 			mii->mii_media_active |= IFM_FLOW
   11599 			    | IFM_ETH_TXPAUSE;
   11600 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11601 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11602 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11603 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11604 			mii->mii_media_active |= IFM_FLOW
   11605 			    | IFM_ETH_RXPAUSE;
   11606 		}
   11607 	}
   11608 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11609 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11610 setled:
   11611 	wm_tbi_serdes_set_linkled(sc);
   11612 }
   11613 
   11614 /*
   11615  * wm_serdes_tick:
   11616  *
   11617  *	Check the link on serdes devices.
   11618  */
   11619 static void
   11620 wm_serdes_tick(struct wm_softc *sc)
   11621 {
   11622 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11623 	struct mii_data *mii = &sc->sc_mii;
   11624 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11625 	uint32_t reg;
   11626 
   11627 	KASSERT(WM_CORE_LOCKED(sc));
   11628 
   11629 	mii->mii_media_status = IFM_AVALID;
   11630 	mii->mii_media_active = IFM_ETHER;
   11631 
   11632 	/* Check PCS */
   11633 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11634 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11635 		mii->mii_media_status |= IFM_ACTIVE;
   11636 		sc->sc_tbi_linkup = 1;
   11637 		sc->sc_tbi_serdes_ticks = 0;
   11638 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11639 		if ((reg & PCS_LSTS_FDX) != 0)
   11640 			mii->mii_media_active |= IFM_FDX;
   11641 		else
   11642 			mii->mii_media_active |= IFM_HDX;
   11643 	} else {
   11644 		mii->mii_media_status |= IFM_NONE;
   11645 		sc->sc_tbi_linkup = 0;
   11646 		/* If the timer expired, retry autonegotiation */
   11647 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11648 		    && (++sc->sc_tbi_serdes_ticks
   11649 			>= sc->sc_tbi_serdes_anegticks)) {
   11650 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11651 			sc->sc_tbi_serdes_ticks = 0;
   11652 			/* XXX */
   11653 			wm_serdes_mediachange(ifp);
   11654 		}
   11655 	}
   11656 
   11657 	wm_tbi_serdes_set_linkled(sc);
   11658 }
   11659 
   11660 /* SFP related */
   11661 
   11662 static int
   11663 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11664 {
   11665 	uint32_t i2ccmd;
   11666 	int i;
   11667 
   11668 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11669 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11670 
   11671 	/* Poll the ready bit */
   11672 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11673 		delay(50);
   11674 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11675 		if (i2ccmd & I2CCMD_READY)
   11676 			break;
   11677 	}
   11678 	if ((i2ccmd & I2CCMD_READY) == 0)
   11679 		return -1;
   11680 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11681 		return -1;
   11682 
   11683 	*data = i2ccmd & 0x00ff;
   11684 
   11685 	return 0;
   11686 }
   11687 
   11688 static uint32_t
   11689 wm_sfp_get_media_type(struct wm_softc *sc)
   11690 {
   11691 	uint32_t ctrl_ext;
   11692 	uint8_t val = 0;
   11693 	int timeout = 3;
   11694 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11695 	int rv = -1;
   11696 
   11697 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11698 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11699 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11700 	CSR_WRITE_FLUSH(sc);
   11701 
   11702 	/* Read SFP module data */
   11703 	while (timeout) {
   11704 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11705 		if (rv == 0)
   11706 			break;
   11707 		delay(100*1000); /* XXX too big */
   11708 		timeout--;
   11709 	}
   11710 	if (rv != 0)
   11711 		goto out;
   11712 	switch (val) {
   11713 	case SFF_SFP_ID_SFF:
   11714 		aprint_normal_dev(sc->sc_dev,
   11715 		    "Module/Connector soldered to board\n");
   11716 		break;
   11717 	case SFF_SFP_ID_SFP:
   11718 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11719 		break;
   11720 	case SFF_SFP_ID_UNKNOWN:
   11721 		goto out;
   11722 	default:
   11723 		break;
   11724 	}
   11725 
   11726 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11727 	if (rv != 0) {
   11728 		goto out;
   11729 	}
   11730 
   11731 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11732 		mediatype = WM_MEDIATYPE_SERDES;
   11733 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   11734 		sc->sc_flags |= WM_F_SGMII;
   11735 		mediatype = WM_MEDIATYPE_COPPER;
   11736 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   11737 		sc->sc_flags |= WM_F_SGMII;
   11738 		mediatype = WM_MEDIATYPE_SERDES;
   11739 	}
   11740 
   11741 out:
   11742 	/* Restore I2C interface setting */
   11743 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11744 
   11745 	return mediatype;
   11746 }
   11747 
   11748 /*
   11749  * NVM related.
   11750  * Microwire, SPI (w/wo EERD) and Flash.
   11751  */
   11752 
   11753 /* Both spi and uwire */
   11754 
   11755 /*
   11756  * wm_eeprom_sendbits:
   11757  *
   11758  *	Send a series of bits to the EEPROM.
   11759  */
   11760 static void
   11761 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11762 {
   11763 	uint32_t reg;
   11764 	int x;
   11765 
   11766 	reg = CSR_READ(sc, WMREG_EECD);
   11767 
   11768 	for (x = nbits; x > 0; x--) {
   11769 		if (bits & (1U << (x - 1)))
   11770 			reg |= EECD_DI;
   11771 		else
   11772 			reg &= ~EECD_DI;
   11773 		CSR_WRITE(sc, WMREG_EECD, reg);
   11774 		CSR_WRITE_FLUSH(sc);
   11775 		delay(2);
   11776 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11777 		CSR_WRITE_FLUSH(sc);
   11778 		delay(2);
   11779 		CSR_WRITE(sc, WMREG_EECD, reg);
   11780 		CSR_WRITE_FLUSH(sc);
   11781 		delay(2);
   11782 	}
   11783 }
   11784 
   11785 /*
   11786  * wm_eeprom_recvbits:
   11787  *
   11788  *	Receive a series of bits from the EEPROM.
   11789  */
   11790 static void
   11791 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11792 {
   11793 	uint32_t reg, val;
   11794 	int x;
   11795 
   11796 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11797 
   11798 	val = 0;
   11799 	for (x = nbits; x > 0; x--) {
   11800 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11801 		CSR_WRITE_FLUSH(sc);
   11802 		delay(2);
   11803 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11804 			val |= (1U << (x - 1));
   11805 		CSR_WRITE(sc, WMREG_EECD, reg);
   11806 		CSR_WRITE_FLUSH(sc);
   11807 		delay(2);
   11808 	}
   11809 	*valp = val;
   11810 }
   11811 
   11812 /* Microwire */
   11813 
   11814 /*
   11815  * wm_nvm_read_uwire:
   11816  *
   11817  *	Read a word from the EEPROM using the MicroWire protocol.
   11818  */
   11819 static int
   11820 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11821 {
   11822 	uint32_t reg, val;
   11823 	int i;
   11824 
   11825 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11826 		device_xname(sc->sc_dev), __func__));
   11827 
   11828 	if (sc->nvm.acquire(sc) != 0)
   11829 		return -1;
   11830 
   11831 	for (i = 0; i < wordcnt; i++) {
   11832 		/* Clear SK and DI. */
   11833 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11834 		CSR_WRITE(sc, WMREG_EECD, reg);
   11835 
   11836 		/*
   11837 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11838 		 * and Xen.
   11839 		 *
   11840 		 * We use this workaround only for 82540 because qemu's
   11841 		 * e1000 act as 82540.
   11842 		 */
   11843 		if (sc->sc_type == WM_T_82540) {
   11844 			reg |= EECD_SK;
   11845 			CSR_WRITE(sc, WMREG_EECD, reg);
   11846 			reg &= ~EECD_SK;
   11847 			CSR_WRITE(sc, WMREG_EECD, reg);
   11848 			CSR_WRITE_FLUSH(sc);
   11849 			delay(2);
   11850 		}
   11851 		/* XXX: end of workaround */
   11852 
   11853 		/* Set CHIP SELECT. */
   11854 		reg |= EECD_CS;
   11855 		CSR_WRITE(sc, WMREG_EECD, reg);
   11856 		CSR_WRITE_FLUSH(sc);
   11857 		delay(2);
   11858 
   11859 		/* Shift in the READ command. */
   11860 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11861 
   11862 		/* Shift in address. */
   11863 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11864 
   11865 		/* Shift out the data. */
   11866 		wm_eeprom_recvbits(sc, &val, 16);
   11867 		data[i] = val & 0xffff;
   11868 
   11869 		/* Clear CHIP SELECT. */
   11870 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11871 		CSR_WRITE(sc, WMREG_EECD, reg);
   11872 		CSR_WRITE_FLUSH(sc);
   11873 		delay(2);
   11874 	}
   11875 
   11876 	sc->nvm.release(sc);
   11877 	return 0;
   11878 }
   11879 
   11880 /* SPI */
   11881 
   11882 /*
   11883  * Set SPI and FLASH related information from the EECD register.
   11884  * For 82541 and 82547, the word size is taken from EEPROM.
   11885  */
   11886 static int
   11887 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11888 {
   11889 	int size;
   11890 	uint32_t reg;
   11891 	uint16_t data;
   11892 
   11893 	reg = CSR_READ(sc, WMREG_EECD);
   11894 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11895 
   11896 	/* Read the size of NVM from EECD by default */
   11897 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11898 	switch (sc->sc_type) {
   11899 	case WM_T_82541:
   11900 	case WM_T_82541_2:
   11901 	case WM_T_82547:
   11902 	case WM_T_82547_2:
   11903 		/* Set dummy value to access EEPROM */
   11904 		sc->sc_nvm_wordsize = 64;
   11905 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   11906 			aprint_error_dev(sc->sc_dev,
   11907 			    "%s: failed to read EEPROM size\n", __func__);
   11908 		}
   11909 		reg = data;
   11910 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11911 		if (size == 0)
   11912 			size = 6; /* 64 word size */
   11913 		else
   11914 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11915 		break;
   11916 	case WM_T_80003:
   11917 	case WM_T_82571:
   11918 	case WM_T_82572:
   11919 	case WM_T_82573: /* SPI case */
   11920 	case WM_T_82574: /* SPI case */
   11921 	case WM_T_82583: /* SPI case */
   11922 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11923 		if (size > 14)
   11924 			size = 14;
   11925 		break;
   11926 	case WM_T_82575:
   11927 	case WM_T_82576:
   11928 	case WM_T_82580:
   11929 	case WM_T_I350:
   11930 	case WM_T_I354:
   11931 	case WM_T_I210:
   11932 	case WM_T_I211:
   11933 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11934 		if (size > 15)
   11935 			size = 15;
   11936 		break;
   11937 	default:
   11938 		aprint_error_dev(sc->sc_dev,
   11939 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11940 		return -1;
   11941 		break;
   11942 	}
   11943 
   11944 	sc->sc_nvm_wordsize = 1 << size;
   11945 
   11946 	return 0;
   11947 }
   11948 
   11949 /*
   11950  * wm_nvm_ready_spi:
   11951  *
   11952  *	Wait for a SPI EEPROM to be ready for commands.
   11953  */
   11954 static int
   11955 wm_nvm_ready_spi(struct wm_softc *sc)
   11956 {
   11957 	uint32_t val;
   11958 	int usec;
   11959 
   11960 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11961 		device_xname(sc->sc_dev), __func__));
   11962 
   11963 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11964 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11965 		wm_eeprom_recvbits(sc, &val, 8);
   11966 		if ((val & SPI_SR_RDY) == 0)
   11967 			break;
   11968 	}
   11969 	if (usec >= SPI_MAX_RETRIES) {
   11970 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11971 		return -1;
   11972 	}
   11973 	return 0;
   11974 }
   11975 
   11976 /*
   11977  * wm_nvm_read_spi:
   11978  *
   11979  *	Read a work from the EEPROM using the SPI protocol.
   11980  */
   11981 static int
   11982 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11983 {
   11984 	uint32_t reg, val;
   11985 	int i;
   11986 	uint8_t opc;
   11987 	int rv = 0;
   11988 
   11989 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11990 		device_xname(sc->sc_dev), __func__));
   11991 
   11992 	if (sc->nvm.acquire(sc) != 0)
   11993 		return -1;
   11994 
   11995 	/* Clear SK and CS. */
   11996 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11997 	CSR_WRITE(sc, WMREG_EECD, reg);
   11998 	CSR_WRITE_FLUSH(sc);
   11999 	delay(2);
   12000 
   12001 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12002 		goto out;
   12003 
   12004 	/* Toggle CS to flush commands. */
   12005 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12006 	CSR_WRITE_FLUSH(sc);
   12007 	delay(2);
   12008 	CSR_WRITE(sc, WMREG_EECD, reg);
   12009 	CSR_WRITE_FLUSH(sc);
   12010 	delay(2);
   12011 
   12012 	opc = SPI_OPC_READ;
   12013 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12014 		opc |= SPI_OPC_A8;
   12015 
   12016 	wm_eeprom_sendbits(sc, opc, 8);
   12017 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12018 
   12019 	for (i = 0; i < wordcnt; i++) {
   12020 		wm_eeprom_recvbits(sc, &val, 16);
   12021 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12022 	}
   12023 
   12024 	/* Raise CS and clear SK. */
   12025 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12026 	CSR_WRITE(sc, WMREG_EECD, reg);
   12027 	CSR_WRITE_FLUSH(sc);
   12028 	delay(2);
   12029 
   12030 out:
   12031 	sc->nvm.release(sc);
   12032 	return rv;
   12033 }
   12034 
   12035 /* Using with EERD */
   12036 
   12037 static int
   12038 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12039 {
   12040 	uint32_t attempts = 100000;
   12041 	uint32_t i, reg = 0;
   12042 	int32_t done = -1;
   12043 
   12044 	for (i = 0; i < attempts; i++) {
   12045 		reg = CSR_READ(sc, rw);
   12046 
   12047 		if (reg & EERD_DONE) {
   12048 			done = 0;
   12049 			break;
   12050 		}
   12051 		delay(5);
   12052 	}
   12053 
   12054 	return done;
   12055 }
   12056 
   12057 static int
   12058 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12059 {
   12060 	int i, eerd = 0;
   12061 	int rv = 0;
   12062 
   12063 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12064 		device_xname(sc->sc_dev), __func__));
   12065 
   12066 	if (sc->nvm.acquire(sc) != 0)
   12067 		return -1;
   12068 
   12069 	for (i = 0; i < wordcnt; i++) {
   12070 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12071 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12072 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12073 		if (rv != 0) {
   12074 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12075 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12076 			break;
   12077 		}
   12078 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12079 	}
   12080 
   12081 	sc->nvm.release(sc);
   12082 	return rv;
   12083 }
   12084 
   12085 /* Flash */
   12086 
   12087 static int
   12088 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12089 {
   12090 	uint32_t eecd;
   12091 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12092 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12093 	uint32_t nvm_dword = 0;
   12094 	uint8_t sig_byte = 0;
   12095 	int rv;
   12096 
   12097 	switch (sc->sc_type) {
   12098 	case WM_T_PCH_SPT:
   12099 	case WM_T_PCH_CNP:
   12100 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12101 		act_offset = ICH_NVM_SIG_WORD * 2;
   12102 
   12103 		/* set bank to 0 in case flash read fails. */
   12104 		*bank = 0;
   12105 
   12106 		/* Check bank 0 */
   12107 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12108 		if (rv != 0)
   12109 			return rv;
   12110 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12111 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12112 			*bank = 0;
   12113 			return 0;
   12114 		}
   12115 
   12116 		/* Check bank 1 */
   12117 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12118 		    &nvm_dword);
   12119 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12120 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12121 			*bank = 1;
   12122 			return 0;
   12123 		}
   12124 		aprint_error_dev(sc->sc_dev,
   12125 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12126 		return -1;
   12127 	case WM_T_ICH8:
   12128 	case WM_T_ICH9:
   12129 		eecd = CSR_READ(sc, WMREG_EECD);
   12130 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12131 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12132 			return 0;
   12133 		}
   12134 		/* FALLTHROUGH */
   12135 	default:
   12136 		/* Default to 0 */
   12137 		*bank = 0;
   12138 
   12139 		/* Check bank 0 */
   12140 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12141 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12142 			*bank = 0;
   12143 			return 0;
   12144 		}
   12145 
   12146 		/* Check bank 1 */
   12147 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12148 		    &sig_byte);
   12149 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12150 			*bank = 1;
   12151 			return 0;
   12152 		}
   12153 	}
   12154 
   12155 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12156 		device_xname(sc->sc_dev)));
   12157 	return -1;
   12158 }
   12159 
   12160 /******************************************************************************
   12161  * This function does initial flash setup so that a new read/write/erase cycle
   12162  * can be started.
   12163  *
   12164  * sc - The pointer to the hw structure
   12165  ****************************************************************************/
   12166 static int32_t
   12167 wm_ich8_cycle_init(struct wm_softc *sc)
   12168 {
   12169 	uint16_t hsfsts;
   12170 	int32_t error = 1;
   12171 	int32_t i     = 0;
   12172 
   12173 	if (sc->sc_type >= WM_T_PCH_SPT)
   12174 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12175 	else
   12176 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12177 
   12178 	/* May be check the Flash Des Valid bit in Hw status */
   12179 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   12180 		return error;
   12181 
   12182 	/* Clear FCERR in Hw status by writing 1 */
   12183 	/* Clear DAEL in Hw status by writing a 1 */
   12184 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12185 
   12186 	if (sc->sc_type >= WM_T_PCH_SPT)
   12187 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12188 	else
   12189 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12190 
   12191 	/*
   12192 	 * Either we should have a hardware SPI cycle in progress bit to check
   12193 	 * against, in order to start a new cycle or FDONE bit should be
   12194 	 * changed in the hardware so that it is 1 after harware reset, which
   12195 	 * can then be used as an indication whether a cycle is in progress or
   12196 	 * has been completed .. we should also have some software semaphore
   12197 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12198 	 * threads access to those bits can be sequentiallized or a way so that
   12199 	 * 2 threads dont start the cycle at the same time
   12200 	 */
   12201 
   12202 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12203 		/*
   12204 		 * There is no cycle running at present, so we can start a
   12205 		 * cycle
   12206 		 */
   12207 
   12208 		/* Begin by setting Flash Cycle Done. */
   12209 		hsfsts |= HSFSTS_DONE;
   12210 		if (sc->sc_type >= WM_T_PCH_SPT)
   12211 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12212 			    hsfsts & 0xffffUL);
   12213 		else
   12214 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12215 		error = 0;
   12216 	} else {
   12217 		/*
   12218 		 * otherwise poll for sometime so the current cycle has a
   12219 		 * chance to end before giving up.
   12220 		 */
   12221 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12222 			if (sc->sc_type >= WM_T_PCH_SPT)
   12223 				hsfsts = ICH8_FLASH_READ32(sc,
   12224 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12225 			else
   12226 				hsfsts = ICH8_FLASH_READ16(sc,
   12227 				    ICH_FLASH_HSFSTS);
   12228 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12229 				error = 0;
   12230 				break;
   12231 			}
   12232 			delay(1);
   12233 		}
   12234 		if (error == 0) {
   12235 			/*
   12236 			 * Successful in waiting for previous cycle to timeout,
   12237 			 * now set the Flash Cycle Done.
   12238 			 */
   12239 			hsfsts |= HSFSTS_DONE;
   12240 			if (sc->sc_type >= WM_T_PCH_SPT)
   12241 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12242 				    hsfsts & 0xffffUL);
   12243 			else
   12244 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12245 				    hsfsts);
   12246 		}
   12247 	}
   12248 	return error;
   12249 }
   12250 
   12251 /******************************************************************************
   12252  * This function starts a flash cycle and waits for its completion
   12253  *
   12254  * sc - The pointer to the hw structure
   12255  ****************************************************************************/
   12256 static int32_t
   12257 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12258 {
   12259 	uint16_t hsflctl;
   12260 	uint16_t hsfsts;
   12261 	int32_t error = 1;
   12262 	uint32_t i = 0;
   12263 
   12264 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12265 	if (sc->sc_type >= WM_T_PCH_SPT)
   12266 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12267 	else
   12268 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12269 	hsflctl |= HSFCTL_GO;
   12270 	if (sc->sc_type >= WM_T_PCH_SPT)
   12271 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12272 		    (uint32_t)hsflctl << 16);
   12273 	else
   12274 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12275 
   12276 	/* Wait till FDONE bit is set to 1 */
   12277 	do {
   12278 		if (sc->sc_type >= WM_T_PCH_SPT)
   12279 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12280 			    & 0xffffUL;
   12281 		else
   12282 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12283 		if (hsfsts & HSFSTS_DONE)
   12284 			break;
   12285 		delay(1);
   12286 		i++;
   12287 	} while (i < timeout);
   12288 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12289 		error = 0;
   12290 
   12291 	return error;
   12292 }
   12293 
   12294 /******************************************************************************
   12295  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12296  *
   12297  * sc - The pointer to the hw structure
   12298  * index - The index of the byte or word to read.
   12299  * size - Size of data to read, 1=byte 2=word, 4=dword
   12300  * data - Pointer to the word to store the value read.
   12301  *****************************************************************************/
   12302 static int32_t
   12303 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12304     uint32_t size, uint32_t *data)
   12305 {
   12306 	uint16_t hsfsts;
   12307 	uint16_t hsflctl;
   12308 	uint32_t flash_linear_address;
   12309 	uint32_t flash_data = 0;
   12310 	int32_t error = 1;
   12311 	int32_t count = 0;
   12312 
   12313 	if (size < 1  || size > 4 || data == 0x0 ||
   12314 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12315 		return error;
   12316 
   12317 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12318 	    sc->sc_ich8_flash_base;
   12319 
   12320 	do {
   12321 		delay(1);
   12322 		/* Steps */
   12323 		error = wm_ich8_cycle_init(sc);
   12324 		if (error)
   12325 			break;
   12326 
   12327 		if (sc->sc_type >= WM_T_PCH_SPT)
   12328 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12329 			    >> 16;
   12330 		else
   12331 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12332 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12333 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12334 		    & HSFCTL_BCOUNT_MASK;
   12335 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12336 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12337 			/*
   12338 			 * In SPT, This register is in Lan memory space, not
   12339 			 * flash. Therefore, only 32 bit access is supported.
   12340 			 */
   12341 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12342 			    (uint32_t)hsflctl << 16);
   12343 		} else
   12344 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12345 
   12346 		/*
   12347 		 * Write the last 24 bits of index into Flash Linear address
   12348 		 * field in Flash Address
   12349 		 */
   12350 		/* TODO: TBD maybe check the index against the size of flash */
   12351 
   12352 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12353 
   12354 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12355 
   12356 		/*
   12357 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12358 		 * the whole sequence a few more times, else read in (shift in)
   12359 		 * the Flash Data0, the order is least significant byte first
   12360 		 * msb to lsb
   12361 		 */
   12362 		if (error == 0) {
   12363 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12364 			if (size == 1)
   12365 				*data = (uint8_t)(flash_data & 0x000000FF);
   12366 			else if (size == 2)
   12367 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12368 			else if (size == 4)
   12369 				*data = (uint32_t)flash_data;
   12370 			break;
   12371 		} else {
   12372 			/*
   12373 			 * If we've gotten here, then things are probably
   12374 			 * completely hosed, but if the error condition is
   12375 			 * detected, it won't hurt to give it another try...
   12376 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12377 			 */
   12378 			if (sc->sc_type >= WM_T_PCH_SPT)
   12379 				hsfsts = ICH8_FLASH_READ32(sc,
   12380 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12381 			else
   12382 				hsfsts = ICH8_FLASH_READ16(sc,
   12383 				    ICH_FLASH_HSFSTS);
   12384 
   12385 			if (hsfsts & HSFSTS_ERR) {
   12386 				/* Repeat for some time before giving up. */
   12387 				continue;
   12388 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12389 				break;
   12390 		}
   12391 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12392 
   12393 	return error;
   12394 }
   12395 
   12396 /******************************************************************************
   12397  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12398  *
   12399  * sc - pointer to wm_hw structure
   12400  * index - The index of the byte to read.
   12401  * data - Pointer to a byte to store the value read.
   12402  *****************************************************************************/
   12403 static int32_t
   12404 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12405 {
   12406 	int32_t status;
   12407 	uint32_t word = 0;
   12408 
   12409 	status = wm_read_ich8_data(sc, index, 1, &word);
   12410 	if (status == 0)
   12411 		*data = (uint8_t)word;
   12412 	else
   12413 		*data = 0;
   12414 
   12415 	return status;
   12416 }
   12417 
   12418 /******************************************************************************
   12419  * Reads a word from the NVM using the ICH8 flash access registers.
   12420  *
   12421  * sc - pointer to wm_hw structure
   12422  * index - The starting byte index of the word to read.
   12423  * data - Pointer to a word to store the value read.
   12424  *****************************************************************************/
   12425 static int32_t
   12426 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12427 {
   12428 	int32_t status;
   12429 	uint32_t word = 0;
   12430 
   12431 	status = wm_read_ich8_data(sc, index, 2, &word);
   12432 	if (status == 0)
   12433 		*data = (uint16_t)word;
   12434 	else
   12435 		*data = 0;
   12436 
   12437 	return status;
   12438 }
   12439 
   12440 /******************************************************************************
   12441  * Reads a dword from the NVM using the ICH8 flash access registers.
   12442  *
   12443  * sc - pointer to wm_hw structure
   12444  * index - The starting byte index of the word to read.
   12445  * data - Pointer to a word to store the value read.
   12446  *****************************************************************************/
   12447 static int32_t
   12448 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12449 {
   12450 	int32_t status;
   12451 
   12452 	status = wm_read_ich8_data(sc, index, 4, data);
   12453 	return status;
   12454 }
   12455 
   12456 /******************************************************************************
   12457  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12458  * register.
   12459  *
   12460  * sc - Struct containing variables accessed by shared code
   12461  * offset - offset of word in the EEPROM to read
   12462  * data - word read from the EEPROM
   12463  * words - number of words to read
   12464  *****************************************************************************/
   12465 static int
   12466 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12467 {
   12468 	int32_t	 rv = 0;
   12469 	uint32_t flash_bank = 0;
   12470 	uint32_t act_offset = 0;
   12471 	uint32_t bank_offset = 0;
   12472 	uint16_t word = 0;
   12473 	uint16_t i = 0;
   12474 
   12475 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12476 		device_xname(sc->sc_dev), __func__));
   12477 
   12478 	if (sc->nvm.acquire(sc) != 0)
   12479 		return -1;
   12480 
   12481 	/*
   12482 	 * We need to know which is the valid flash bank.  In the event
   12483 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12484 	 * managing flash_bank. So it cannot be trusted and needs
   12485 	 * to be updated with each read.
   12486 	 */
   12487 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12488 	if (rv) {
   12489 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12490 			device_xname(sc->sc_dev)));
   12491 		flash_bank = 0;
   12492 	}
   12493 
   12494 	/*
   12495 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12496 	 * size
   12497 	 */
   12498 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12499 
   12500 	for (i = 0; i < words; i++) {
   12501 		/* The NVM part needs a byte offset, hence * 2 */
   12502 		act_offset = bank_offset + ((offset + i) * 2);
   12503 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12504 		if (rv) {
   12505 			aprint_error_dev(sc->sc_dev,
   12506 			    "%s: failed to read NVM\n", __func__);
   12507 			break;
   12508 		}
   12509 		data[i] = word;
   12510 	}
   12511 
   12512 	sc->nvm.release(sc);
   12513 	return rv;
   12514 }
   12515 
   12516 /******************************************************************************
   12517  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12518  * register.
   12519  *
   12520  * sc - Struct containing variables accessed by shared code
   12521  * offset - offset of word in the EEPROM to read
   12522  * data - word read from the EEPROM
   12523  * words - number of words to read
   12524  *****************************************************************************/
   12525 static int
   12526 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12527 {
   12528 	int32_t	 rv = 0;
   12529 	uint32_t flash_bank = 0;
   12530 	uint32_t act_offset = 0;
   12531 	uint32_t bank_offset = 0;
   12532 	uint32_t dword = 0;
   12533 	uint16_t i = 0;
   12534 
   12535 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12536 		device_xname(sc->sc_dev), __func__));
   12537 
   12538 	if (sc->nvm.acquire(sc) != 0)
   12539 		return -1;
   12540 
   12541 	/*
   12542 	 * We need to know which is the valid flash bank.  In the event
   12543 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12544 	 * managing flash_bank. So it cannot be trusted and needs
   12545 	 * to be updated with each read.
   12546 	 */
   12547 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12548 	if (rv) {
   12549 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12550 			device_xname(sc->sc_dev)));
   12551 		flash_bank = 0;
   12552 	}
   12553 
   12554 	/*
   12555 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12556 	 * size
   12557 	 */
   12558 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12559 
   12560 	for (i = 0; i < words; i++) {
   12561 		/* The NVM part needs a byte offset, hence * 2 */
   12562 		act_offset = bank_offset + ((offset + i) * 2);
   12563 		/* but we must read dword aligned, so mask ... */
   12564 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12565 		if (rv) {
   12566 			aprint_error_dev(sc->sc_dev,
   12567 			    "%s: failed to read NVM\n", __func__);
   12568 			break;
   12569 		}
   12570 		/* ... and pick out low or high word */
   12571 		if ((act_offset & 0x2) == 0)
   12572 			data[i] = (uint16_t)(dword & 0xFFFF);
   12573 		else
   12574 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12575 	}
   12576 
   12577 	sc->nvm.release(sc);
   12578 	return rv;
   12579 }
   12580 
   12581 /* iNVM */
   12582 
   12583 static int
   12584 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12585 {
   12586 	int32_t	 rv = 0;
   12587 	uint32_t invm_dword;
   12588 	uint16_t i;
   12589 	uint8_t record_type, word_address;
   12590 
   12591 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12592 		device_xname(sc->sc_dev), __func__));
   12593 
   12594 	for (i = 0; i < INVM_SIZE; i++) {
   12595 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12596 		/* Get record type */
   12597 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12598 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12599 			break;
   12600 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12601 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12602 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12603 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12604 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12605 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12606 			if (word_address == address) {
   12607 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12608 				rv = 0;
   12609 				break;
   12610 			}
   12611 		}
   12612 	}
   12613 
   12614 	return rv;
   12615 }
   12616 
   12617 static int
   12618 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12619 {
   12620 	int rv = 0;
   12621 	int i;
   12622 
   12623 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12624 		device_xname(sc->sc_dev), __func__));
   12625 
   12626 	if (sc->nvm.acquire(sc) != 0)
   12627 		return -1;
   12628 
   12629 	for (i = 0; i < words; i++) {
   12630 		switch (offset + i) {
   12631 		case NVM_OFF_MACADDR:
   12632 		case NVM_OFF_MACADDR1:
   12633 		case NVM_OFF_MACADDR2:
   12634 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12635 			if (rv != 0) {
   12636 				data[i] = 0xffff;
   12637 				rv = -1;
   12638 			}
   12639 			break;
   12640 		case NVM_OFF_CFG2:
   12641 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12642 			if (rv != 0) {
   12643 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12644 				rv = 0;
   12645 			}
   12646 			break;
   12647 		case NVM_OFF_CFG4:
   12648 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12649 			if (rv != 0) {
   12650 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12651 				rv = 0;
   12652 			}
   12653 			break;
   12654 		case NVM_OFF_LED_1_CFG:
   12655 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12656 			if (rv != 0) {
   12657 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12658 				rv = 0;
   12659 			}
   12660 			break;
   12661 		case NVM_OFF_LED_0_2_CFG:
   12662 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12663 			if (rv != 0) {
   12664 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12665 				rv = 0;
   12666 			}
   12667 			break;
   12668 		case NVM_OFF_ID_LED_SETTINGS:
   12669 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12670 			if (rv != 0) {
   12671 				*data = ID_LED_RESERVED_FFFF;
   12672 				rv = 0;
   12673 			}
   12674 			break;
   12675 		default:
   12676 			DPRINTF(WM_DEBUG_NVM,
   12677 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12678 			*data = NVM_RESERVED_WORD;
   12679 			break;
   12680 		}
   12681 	}
   12682 
   12683 	sc->nvm.release(sc);
   12684 	return rv;
   12685 }
   12686 
   12687 /* Lock, detecting NVM type, validate checksum, version and read */
   12688 
   12689 static int
   12690 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12691 {
   12692 	uint32_t eecd = 0;
   12693 
   12694 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12695 	    || sc->sc_type == WM_T_82583) {
   12696 		eecd = CSR_READ(sc, WMREG_EECD);
   12697 
   12698 		/* Isolate bits 15 & 16 */
   12699 		eecd = ((eecd >> 15) & 0x03);
   12700 
   12701 		/* If both bits are set, device is Flash type */
   12702 		if (eecd == 0x03)
   12703 			return 0;
   12704 	}
   12705 	return 1;
   12706 }
   12707 
   12708 static int
   12709 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   12710 {
   12711 	uint32_t eec;
   12712 
   12713 	eec = CSR_READ(sc, WMREG_EEC);
   12714 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12715 		return 1;
   12716 
   12717 	return 0;
   12718 }
   12719 
   12720 /*
   12721  * wm_nvm_validate_checksum
   12722  *
   12723  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12724  */
   12725 static int
   12726 wm_nvm_validate_checksum(struct wm_softc *sc)
   12727 {
   12728 	uint16_t checksum;
   12729 	uint16_t eeprom_data;
   12730 #ifdef WM_DEBUG
   12731 	uint16_t csum_wordaddr, valid_checksum;
   12732 #endif
   12733 	int i;
   12734 
   12735 	checksum = 0;
   12736 
   12737 	/* Don't check for I211 */
   12738 	if (sc->sc_type == WM_T_I211)
   12739 		return 0;
   12740 
   12741 #ifdef WM_DEBUG
   12742 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   12743 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   12744 		csum_wordaddr = NVM_OFF_COMPAT;
   12745 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12746 	} else {
   12747 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12748 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12749 	}
   12750 
   12751 	/* Dump EEPROM image for debug */
   12752 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12753 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12754 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12755 		/* XXX PCH_SPT? */
   12756 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12757 		if ((eeprom_data & valid_checksum) == 0) {
   12758 			DPRINTF(WM_DEBUG_NVM,
   12759 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12760 				device_xname(sc->sc_dev), eeprom_data,
   12761 				    valid_checksum));
   12762 		}
   12763 	}
   12764 
   12765 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12766 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12767 		for (i = 0; i < NVM_SIZE; i++) {
   12768 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12769 				printf("XXXX ");
   12770 			else
   12771 				printf("%04hx ", eeprom_data);
   12772 			if (i % 8 == 7)
   12773 				printf("\n");
   12774 		}
   12775 	}
   12776 
   12777 #endif /* WM_DEBUG */
   12778 
   12779 	for (i = 0; i < NVM_SIZE; i++) {
   12780 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12781 			return 1;
   12782 		checksum += eeprom_data;
   12783 	}
   12784 
   12785 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12786 #ifdef WM_DEBUG
   12787 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12788 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12789 #endif
   12790 	}
   12791 
   12792 	return 0;
   12793 }
   12794 
   12795 static void
   12796 wm_nvm_version_invm(struct wm_softc *sc)
   12797 {
   12798 	uint32_t dword;
   12799 
   12800 	/*
   12801 	 * Linux's code to decode version is very strange, so we don't
   12802 	 * obey that algorithm and just use word 61 as the document.
   12803 	 * Perhaps it's not perfect though...
   12804 	 *
   12805 	 * Example:
   12806 	 *
   12807 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12808 	 */
   12809 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12810 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12811 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12812 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12813 }
   12814 
   12815 static void
   12816 wm_nvm_version(struct wm_softc *sc)
   12817 {
   12818 	uint16_t major, minor, build, patch;
   12819 	uint16_t uid0, uid1;
   12820 	uint16_t nvm_data;
   12821 	uint16_t off;
   12822 	bool check_version = false;
   12823 	bool check_optionrom = false;
   12824 	bool have_build = false;
   12825 	bool have_uid = true;
   12826 
   12827 	/*
   12828 	 * Version format:
   12829 	 *
   12830 	 * XYYZ
   12831 	 * X0YZ
   12832 	 * X0YY
   12833 	 *
   12834 	 * Example:
   12835 	 *
   12836 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12837 	 *	82571	0x50a6	5.10.6?
   12838 	 *	82572	0x506a	5.6.10?
   12839 	 *	82572EI	0x5069	5.6.9?
   12840 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12841 	 *		0x2013	2.1.3?
   12842 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12843 	 */
   12844 
   12845 	/*
   12846 	 * XXX
   12847 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   12848 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   12849 	 */
   12850 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   12851 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   12852 		have_uid = false;
   12853 
   12854 	switch (sc->sc_type) {
   12855 	case WM_T_82571:
   12856 	case WM_T_82572:
   12857 	case WM_T_82574:
   12858 	case WM_T_82583:
   12859 		check_version = true;
   12860 		check_optionrom = true;
   12861 		have_build = true;
   12862 		break;
   12863 	case WM_T_82575:
   12864 	case WM_T_82576:
   12865 	case WM_T_82580:
   12866 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12867 			check_version = true;
   12868 		break;
   12869 	case WM_T_I211:
   12870 		wm_nvm_version_invm(sc);
   12871 		have_uid = false;
   12872 		goto printver;
   12873 	case WM_T_I210:
   12874 		if (!wm_nvm_flash_presence_i210(sc)) {
   12875 			wm_nvm_version_invm(sc);
   12876 			have_uid = false;
   12877 			goto printver;
   12878 		}
   12879 		/* FALLTHROUGH */
   12880 	case WM_T_I350:
   12881 	case WM_T_I354:
   12882 		check_version = true;
   12883 		check_optionrom = true;
   12884 		break;
   12885 	default:
   12886 		return;
   12887 	}
   12888 	if (check_version
   12889 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   12890 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12891 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12892 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12893 			build = nvm_data & NVM_BUILD_MASK;
   12894 			have_build = true;
   12895 		} else
   12896 			minor = nvm_data & 0x00ff;
   12897 
   12898 		/* Decimal */
   12899 		minor = (minor / 16) * 10 + (minor % 16);
   12900 		sc->sc_nvm_ver_major = major;
   12901 		sc->sc_nvm_ver_minor = minor;
   12902 
   12903 printver:
   12904 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12905 		    sc->sc_nvm_ver_minor);
   12906 		if (have_build) {
   12907 			sc->sc_nvm_ver_build = build;
   12908 			aprint_verbose(".%d", build);
   12909 		}
   12910 	}
   12911 
   12912 	/* Assume the Option ROM area is at avove NVM_SIZE */
   12913 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   12914 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   12915 		/* Option ROM Version */
   12916 		if ((off != 0x0000) && (off != 0xffff)) {
   12917 			int rv;
   12918 
   12919 			off += NVM_COMBO_VER_OFF;
   12920 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   12921 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   12922 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   12923 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12924 				/* 16bits */
   12925 				major = uid0 >> 8;
   12926 				build = (uid0 << 8) | (uid1 >> 8);
   12927 				patch = uid1 & 0x00ff;
   12928 				aprint_verbose(", option ROM Version %d.%d.%d",
   12929 				    major, build, patch);
   12930 			}
   12931 		}
   12932 	}
   12933 
   12934 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   12935 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12936 }
   12937 
   12938 /*
   12939  * wm_nvm_read:
   12940  *
   12941  *	Read data from the serial EEPROM.
   12942  */
   12943 static int
   12944 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12945 {
   12946 	int rv;
   12947 
   12948 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12949 		device_xname(sc->sc_dev), __func__));
   12950 
   12951 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12952 		return -1;
   12953 
   12954 	rv = sc->nvm.read(sc, word, wordcnt, data);
   12955 
   12956 	return rv;
   12957 }
   12958 
   12959 /*
   12960  * Hardware semaphores.
   12961  * Very complexed...
   12962  */
   12963 
   12964 static int
   12965 wm_get_null(struct wm_softc *sc)
   12966 {
   12967 
   12968 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12969 		device_xname(sc->sc_dev), __func__));
   12970 	return 0;
   12971 }
   12972 
   12973 static void
   12974 wm_put_null(struct wm_softc *sc)
   12975 {
   12976 
   12977 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12978 		device_xname(sc->sc_dev), __func__));
   12979 	return;
   12980 }
   12981 
   12982 static int
   12983 wm_get_eecd(struct wm_softc *sc)
   12984 {
   12985 	uint32_t reg;
   12986 	int x;
   12987 
   12988 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12989 		device_xname(sc->sc_dev), __func__));
   12990 
   12991 	reg = CSR_READ(sc, WMREG_EECD);
   12992 
   12993 	/* Request EEPROM access. */
   12994 	reg |= EECD_EE_REQ;
   12995 	CSR_WRITE(sc, WMREG_EECD, reg);
   12996 
   12997 	/* ..and wait for it to be granted. */
   12998 	for (x = 0; x < 1000; x++) {
   12999 		reg = CSR_READ(sc, WMREG_EECD);
   13000 		if (reg & EECD_EE_GNT)
   13001 			break;
   13002 		delay(5);
   13003 	}
   13004 	if ((reg & EECD_EE_GNT) == 0) {
   13005 		aprint_error_dev(sc->sc_dev,
   13006 		    "could not acquire EEPROM GNT\n");
   13007 		reg &= ~EECD_EE_REQ;
   13008 		CSR_WRITE(sc, WMREG_EECD, reg);
   13009 		return -1;
   13010 	}
   13011 
   13012 	return 0;
   13013 }
   13014 
   13015 static void
   13016 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13017 {
   13018 
   13019 	*eecd |= EECD_SK;
   13020 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13021 	CSR_WRITE_FLUSH(sc);
   13022 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13023 		delay(1);
   13024 	else
   13025 		delay(50);
   13026 }
   13027 
   13028 static void
   13029 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13030 {
   13031 
   13032 	*eecd &= ~EECD_SK;
   13033 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13034 	CSR_WRITE_FLUSH(sc);
   13035 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13036 		delay(1);
   13037 	else
   13038 		delay(50);
   13039 }
   13040 
   13041 static void
   13042 wm_put_eecd(struct wm_softc *sc)
   13043 {
   13044 	uint32_t reg;
   13045 
   13046 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13047 		device_xname(sc->sc_dev), __func__));
   13048 
   13049 	/* Stop nvm */
   13050 	reg = CSR_READ(sc, WMREG_EECD);
   13051 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13052 		/* Pull CS high */
   13053 		reg |= EECD_CS;
   13054 		wm_nvm_eec_clock_lower(sc, &reg);
   13055 	} else {
   13056 		/* CS on Microwire is active-high */
   13057 		reg &= ~(EECD_CS | EECD_DI);
   13058 		CSR_WRITE(sc, WMREG_EECD, reg);
   13059 		wm_nvm_eec_clock_raise(sc, &reg);
   13060 		wm_nvm_eec_clock_lower(sc, &reg);
   13061 	}
   13062 
   13063 	reg = CSR_READ(sc, WMREG_EECD);
   13064 	reg &= ~EECD_EE_REQ;
   13065 	CSR_WRITE(sc, WMREG_EECD, reg);
   13066 
   13067 	return;
   13068 }
   13069 
   13070 /*
   13071  * Get hardware semaphore.
   13072  * Same as e1000_get_hw_semaphore_generic()
   13073  */
   13074 static int
   13075 wm_get_swsm_semaphore(struct wm_softc *sc)
   13076 {
   13077 	int32_t timeout;
   13078 	uint32_t swsm;
   13079 
   13080 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13081 		device_xname(sc->sc_dev), __func__));
   13082 	KASSERT(sc->sc_nvm_wordsize > 0);
   13083 
   13084 retry:
   13085 	/* Get the SW semaphore. */
   13086 	timeout = sc->sc_nvm_wordsize + 1;
   13087 	while (timeout) {
   13088 		swsm = CSR_READ(sc, WMREG_SWSM);
   13089 
   13090 		if ((swsm & SWSM_SMBI) == 0)
   13091 			break;
   13092 
   13093 		delay(50);
   13094 		timeout--;
   13095 	}
   13096 
   13097 	if (timeout == 0) {
   13098 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13099 			/*
   13100 			 * In rare circumstances, the SW semaphore may already
   13101 			 * be held unintentionally. Clear the semaphore once
   13102 			 * before giving up.
   13103 			 */
   13104 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13105 			wm_put_swsm_semaphore(sc);
   13106 			goto retry;
   13107 		}
   13108 		aprint_error_dev(sc->sc_dev,
   13109 		    "could not acquire SWSM SMBI\n");
   13110 		return 1;
   13111 	}
   13112 
   13113 	/* Get the FW semaphore. */
   13114 	timeout = sc->sc_nvm_wordsize + 1;
   13115 	while (timeout) {
   13116 		swsm = CSR_READ(sc, WMREG_SWSM);
   13117 		swsm |= SWSM_SWESMBI;
   13118 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13119 		/* If we managed to set the bit we got the semaphore. */
   13120 		swsm = CSR_READ(sc, WMREG_SWSM);
   13121 		if (swsm & SWSM_SWESMBI)
   13122 			break;
   13123 
   13124 		delay(50);
   13125 		timeout--;
   13126 	}
   13127 
   13128 	if (timeout == 0) {
   13129 		aprint_error_dev(sc->sc_dev,
   13130 		    "could not acquire SWSM SWESMBI\n");
   13131 		/* Release semaphores */
   13132 		wm_put_swsm_semaphore(sc);
   13133 		return 1;
   13134 	}
   13135 	return 0;
   13136 }
   13137 
   13138 /*
   13139  * Put hardware semaphore.
   13140  * Same as e1000_put_hw_semaphore_generic()
   13141  */
   13142 static void
   13143 wm_put_swsm_semaphore(struct wm_softc *sc)
   13144 {
   13145 	uint32_t swsm;
   13146 
   13147 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13148 		device_xname(sc->sc_dev), __func__));
   13149 
   13150 	swsm = CSR_READ(sc, WMREG_SWSM);
   13151 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13152 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13153 }
   13154 
   13155 /*
   13156  * Get SW/FW semaphore.
   13157  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13158  */
   13159 static int
   13160 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13161 {
   13162 	uint32_t swfw_sync;
   13163 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13164 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13165 	int timeout;
   13166 
   13167 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13168 		device_xname(sc->sc_dev), __func__));
   13169 
   13170 	if (sc->sc_type == WM_T_80003)
   13171 		timeout = 50;
   13172 	else
   13173 		timeout = 200;
   13174 
   13175 	while (timeout) {
   13176 		if (wm_get_swsm_semaphore(sc)) {
   13177 			aprint_error_dev(sc->sc_dev,
   13178 			    "%s: failed to get semaphore\n",
   13179 			    __func__);
   13180 			return 1;
   13181 		}
   13182 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13183 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13184 			swfw_sync |= swmask;
   13185 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13186 			wm_put_swsm_semaphore(sc);
   13187 			return 0;
   13188 		}
   13189 		wm_put_swsm_semaphore(sc);
   13190 		delay(5000);
   13191 		timeout--;
   13192 	}
   13193 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13194 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13195 	return 1;
   13196 }
   13197 
   13198 static void
   13199 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13200 {
   13201 	uint32_t swfw_sync;
   13202 
   13203 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13204 		device_xname(sc->sc_dev), __func__));
   13205 
   13206 	while (wm_get_swsm_semaphore(sc) != 0)
   13207 		continue;
   13208 
   13209 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13210 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13211 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13212 
   13213 	wm_put_swsm_semaphore(sc);
   13214 }
   13215 
   13216 static int
   13217 wm_get_nvm_80003(struct wm_softc *sc)
   13218 {
   13219 	int rv;
   13220 
   13221 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13222 		device_xname(sc->sc_dev), __func__));
   13223 
   13224 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13225 		aprint_error_dev(sc->sc_dev,
   13226 		    "%s: failed to get semaphore(SWFW)\n",
   13227 		    __func__);
   13228 		return rv;
   13229 	}
   13230 
   13231 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13232 	    && (rv = wm_get_eecd(sc)) != 0) {
   13233 		aprint_error_dev(sc->sc_dev,
   13234 		    "%s: failed to get semaphore(EECD)\n",
   13235 		    __func__);
   13236 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13237 		return rv;
   13238 	}
   13239 
   13240 	return 0;
   13241 }
   13242 
   13243 static void
   13244 wm_put_nvm_80003(struct wm_softc *sc)
   13245 {
   13246 
   13247 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13248 		device_xname(sc->sc_dev), __func__));
   13249 
   13250 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13251 		wm_put_eecd(sc);
   13252 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13253 }
   13254 
   13255 static int
   13256 wm_get_nvm_82571(struct wm_softc *sc)
   13257 {
   13258 	int rv;
   13259 
   13260 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13261 		device_xname(sc->sc_dev), __func__));
   13262 
   13263 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13264 		return rv;
   13265 
   13266 	switch (sc->sc_type) {
   13267 	case WM_T_82573:
   13268 		break;
   13269 	default:
   13270 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13271 			rv = wm_get_eecd(sc);
   13272 		break;
   13273 	}
   13274 
   13275 	if (rv != 0) {
   13276 		aprint_error_dev(sc->sc_dev,
   13277 		    "%s: failed to get semaphore\n",
   13278 		    __func__);
   13279 		wm_put_swsm_semaphore(sc);
   13280 	}
   13281 
   13282 	return rv;
   13283 }
   13284 
   13285 static void
   13286 wm_put_nvm_82571(struct wm_softc *sc)
   13287 {
   13288 
   13289 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13290 		device_xname(sc->sc_dev), __func__));
   13291 
   13292 	switch (sc->sc_type) {
   13293 	case WM_T_82573:
   13294 		break;
   13295 	default:
   13296 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13297 			wm_put_eecd(sc);
   13298 		break;
   13299 	}
   13300 
   13301 	wm_put_swsm_semaphore(sc);
   13302 }
   13303 
   13304 static int
   13305 wm_get_phy_82575(struct wm_softc *sc)
   13306 {
   13307 
   13308 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13309 		device_xname(sc->sc_dev), __func__));
   13310 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13311 }
   13312 
   13313 static void
   13314 wm_put_phy_82575(struct wm_softc *sc)
   13315 {
   13316 
   13317 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13318 		device_xname(sc->sc_dev), __func__));
   13319 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13320 }
   13321 
   13322 static int
   13323 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13324 {
   13325 	uint32_t ext_ctrl;
   13326 	int timeout = 200;
   13327 
   13328 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13329 		device_xname(sc->sc_dev), __func__));
   13330 
   13331 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13332 	for (timeout = 0; timeout < 200; timeout++) {
   13333 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13334 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13335 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13336 
   13337 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13338 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13339 			return 0;
   13340 		delay(5000);
   13341 	}
   13342 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13343 	    device_xname(sc->sc_dev), ext_ctrl);
   13344 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13345 	return 1;
   13346 }
   13347 
   13348 static void
   13349 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13350 {
   13351 	uint32_t ext_ctrl;
   13352 
   13353 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13354 		device_xname(sc->sc_dev), __func__));
   13355 
   13356 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13357 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13358 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13359 
   13360 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13361 }
   13362 
   13363 static int
   13364 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13365 {
   13366 	uint32_t ext_ctrl;
   13367 	int timeout;
   13368 
   13369 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13370 		device_xname(sc->sc_dev), __func__));
   13371 	mutex_enter(sc->sc_ich_phymtx);
   13372 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13373 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13374 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13375 			break;
   13376 		delay(1000);
   13377 	}
   13378 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13379 		printf("%s: SW has already locked the resource\n",
   13380 		    device_xname(sc->sc_dev));
   13381 		goto out;
   13382 	}
   13383 
   13384 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13385 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13386 	for (timeout = 0; timeout < 1000; timeout++) {
   13387 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13388 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13389 			break;
   13390 		delay(1000);
   13391 	}
   13392 	if (timeout >= 1000) {
   13393 		printf("%s: failed to acquire semaphore\n",
   13394 		    device_xname(sc->sc_dev));
   13395 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13396 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13397 		goto out;
   13398 	}
   13399 	return 0;
   13400 
   13401 out:
   13402 	mutex_exit(sc->sc_ich_phymtx);
   13403 	return 1;
   13404 }
   13405 
   13406 static void
   13407 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13408 {
   13409 	uint32_t ext_ctrl;
   13410 
   13411 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13412 		device_xname(sc->sc_dev), __func__));
   13413 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13414 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13415 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13416 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13417 	} else {
   13418 		printf("%s: Semaphore unexpectedly released\n",
   13419 		    device_xname(sc->sc_dev));
   13420 	}
   13421 
   13422 	mutex_exit(sc->sc_ich_phymtx);
   13423 }
   13424 
   13425 static int
   13426 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13427 {
   13428 
   13429 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13430 		device_xname(sc->sc_dev), __func__));
   13431 	mutex_enter(sc->sc_ich_nvmmtx);
   13432 
   13433 	return 0;
   13434 }
   13435 
   13436 static void
   13437 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13438 {
   13439 
   13440 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13441 		device_xname(sc->sc_dev), __func__));
   13442 	mutex_exit(sc->sc_ich_nvmmtx);
   13443 }
   13444 
   13445 static int
   13446 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13447 {
   13448 	int i = 0;
   13449 	uint32_t reg;
   13450 
   13451 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13452 		device_xname(sc->sc_dev), __func__));
   13453 
   13454 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13455 	do {
   13456 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13457 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13458 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13459 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13460 			break;
   13461 		delay(2*1000);
   13462 		i++;
   13463 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13464 
   13465 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13466 		wm_put_hw_semaphore_82573(sc);
   13467 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13468 		    device_xname(sc->sc_dev));
   13469 		return -1;
   13470 	}
   13471 
   13472 	return 0;
   13473 }
   13474 
   13475 static void
   13476 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13477 {
   13478 	uint32_t reg;
   13479 
   13480 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13481 		device_xname(sc->sc_dev), __func__));
   13482 
   13483 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13484 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13485 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13486 }
   13487 
   13488 /*
   13489  * Management mode and power management related subroutines.
   13490  * BMC, AMT, suspend/resume and EEE.
   13491  */
   13492 
   13493 #ifdef WM_WOL
   13494 static int
   13495 wm_check_mng_mode(struct wm_softc *sc)
   13496 {
   13497 	int rv;
   13498 
   13499 	switch (sc->sc_type) {
   13500 	case WM_T_ICH8:
   13501 	case WM_T_ICH9:
   13502 	case WM_T_ICH10:
   13503 	case WM_T_PCH:
   13504 	case WM_T_PCH2:
   13505 	case WM_T_PCH_LPT:
   13506 	case WM_T_PCH_SPT:
   13507 	case WM_T_PCH_CNP:
   13508 		rv = wm_check_mng_mode_ich8lan(sc);
   13509 		break;
   13510 	case WM_T_82574:
   13511 	case WM_T_82583:
   13512 		rv = wm_check_mng_mode_82574(sc);
   13513 		break;
   13514 	case WM_T_82571:
   13515 	case WM_T_82572:
   13516 	case WM_T_82573:
   13517 	case WM_T_80003:
   13518 		rv = wm_check_mng_mode_generic(sc);
   13519 		break;
   13520 	default:
   13521 		/* noting to do */
   13522 		rv = 0;
   13523 		break;
   13524 	}
   13525 
   13526 	return rv;
   13527 }
   13528 
   13529 static int
   13530 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13531 {
   13532 	uint32_t fwsm;
   13533 
   13534 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13535 
   13536 	if (((fwsm & FWSM_FW_VALID) != 0)
   13537 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13538 		return 1;
   13539 
   13540 	return 0;
   13541 }
   13542 
   13543 static int
   13544 wm_check_mng_mode_82574(struct wm_softc *sc)
   13545 {
   13546 	uint16_t data;
   13547 
   13548 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13549 
   13550 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13551 		return 1;
   13552 
   13553 	return 0;
   13554 }
   13555 
   13556 static int
   13557 wm_check_mng_mode_generic(struct wm_softc *sc)
   13558 {
   13559 	uint32_t fwsm;
   13560 
   13561 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13562 
   13563 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13564 		return 1;
   13565 
   13566 	return 0;
   13567 }
   13568 #endif /* WM_WOL */
   13569 
   13570 static int
   13571 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13572 {
   13573 	uint32_t manc, fwsm, factps;
   13574 
   13575 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13576 		return 0;
   13577 
   13578 	manc = CSR_READ(sc, WMREG_MANC);
   13579 
   13580 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13581 		device_xname(sc->sc_dev), manc));
   13582 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13583 		return 0;
   13584 
   13585 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13586 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13587 		factps = CSR_READ(sc, WMREG_FACTPS);
   13588 		if (((factps & FACTPS_MNGCG) == 0)
   13589 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13590 			return 1;
   13591 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13592 		uint16_t data;
   13593 
   13594 		factps = CSR_READ(sc, WMREG_FACTPS);
   13595 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13596 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13597 			device_xname(sc->sc_dev), factps, data));
   13598 		if (((factps & FACTPS_MNGCG) == 0)
   13599 		    && ((data & NVM_CFG2_MNGM_MASK)
   13600 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13601 			return 1;
   13602 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13603 	    && ((manc & MANC_ASF_EN) == 0))
   13604 		return 1;
   13605 
   13606 	return 0;
   13607 }
   13608 
   13609 static bool
   13610 wm_phy_resetisblocked(struct wm_softc *sc)
   13611 {
   13612 	bool blocked = false;
   13613 	uint32_t reg;
   13614 	int i = 0;
   13615 
   13616 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13617 		device_xname(sc->sc_dev), __func__));
   13618 
   13619 	switch (sc->sc_type) {
   13620 	case WM_T_ICH8:
   13621 	case WM_T_ICH9:
   13622 	case WM_T_ICH10:
   13623 	case WM_T_PCH:
   13624 	case WM_T_PCH2:
   13625 	case WM_T_PCH_LPT:
   13626 	case WM_T_PCH_SPT:
   13627 	case WM_T_PCH_CNP:
   13628 		do {
   13629 			reg = CSR_READ(sc, WMREG_FWSM);
   13630 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13631 				blocked = true;
   13632 				delay(10*1000);
   13633 				continue;
   13634 			}
   13635 			blocked = false;
   13636 		} while (blocked && (i++ < 30));
   13637 		return blocked;
   13638 		break;
   13639 	case WM_T_82571:
   13640 	case WM_T_82572:
   13641 	case WM_T_82573:
   13642 	case WM_T_82574:
   13643 	case WM_T_82583:
   13644 	case WM_T_80003:
   13645 		reg = CSR_READ(sc, WMREG_MANC);
   13646 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13647 			return true;
   13648 		else
   13649 			return false;
   13650 		break;
   13651 	default:
   13652 		/* no problem */
   13653 		break;
   13654 	}
   13655 
   13656 	return false;
   13657 }
   13658 
   13659 static void
   13660 wm_get_hw_control(struct wm_softc *sc)
   13661 {
   13662 	uint32_t reg;
   13663 
   13664 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13665 		device_xname(sc->sc_dev), __func__));
   13666 
   13667 	if (sc->sc_type == WM_T_82573) {
   13668 		reg = CSR_READ(sc, WMREG_SWSM);
   13669 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13670 	} else if (sc->sc_type >= WM_T_82571) {
   13671 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13672 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13673 	}
   13674 }
   13675 
   13676 static void
   13677 wm_release_hw_control(struct wm_softc *sc)
   13678 {
   13679 	uint32_t reg;
   13680 
   13681 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13682 		device_xname(sc->sc_dev), __func__));
   13683 
   13684 	if (sc->sc_type == WM_T_82573) {
   13685 		reg = CSR_READ(sc, WMREG_SWSM);
   13686 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13687 	} else if (sc->sc_type >= WM_T_82571) {
   13688 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13689 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13690 	}
   13691 }
   13692 
   13693 static void
   13694 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13695 {
   13696 	uint32_t reg;
   13697 
   13698 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13699 		device_xname(sc->sc_dev), __func__));
   13700 
   13701 	if (sc->sc_type < WM_T_PCH2)
   13702 		return;
   13703 
   13704 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13705 
   13706 	if (gate)
   13707 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13708 	else
   13709 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13710 
   13711 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13712 }
   13713 
   13714 static void
   13715 wm_smbustopci(struct wm_softc *sc)
   13716 {
   13717 	uint32_t fwsm, reg;
   13718 	int rv = 0;
   13719 
   13720 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13721 		device_xname(sc->sc_dev), __func__));
   13722 
   13723 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13724 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13725 
   13726 	/* Disable ULP */
   13727 	wm_ulp_disable(sc);
   13728 
   13729 	/* Acquire PHY semaphore */
   13730 	sc->phy.acquire(sc);
   13731 
   13732 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13733 	switch (sc->sc_type) {
   13734 	case WM_T_PCH_LPT:
   13735 	case WM_T_PCH_SPT:
   13736 	case WM_T_PCH_CNP:
   13737 		if (wm_phy_is_accessible_pchlan(sc))
   13738 			break;
   13739 
   13740 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13741 		reg |= CTRL_EXT_FORCE_SMBUS;
   13742 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13743 #if 0
   13744 		/* XXX Isn't this required??? */
   13745 		CSR_WRITE_FLUSH(sc);
   13746 #endif
   13747 		delay(50 * 1000);
   13748 		/* FALLTHROUGH */
   13749 	case WM_T_PCH2:
   13750 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13751 			break;
   13752 		/* FALLTHROUGH */
   13753 	case WM_T_PCH:
   13754 		if (sc->sc_type == WM_T_PCH)
   13755 			if ((fwsm & FWSM_FW_VALID) != 0)
   13756 				break;
   13757 
   13758 		if (wm_phy_resetisblocked(sc) == true) {
   13759 			printf("XXX reset is blocked(3)\n");
   13760 			break;
   13761 		}
   13762 
   13763 		wm_toggle_lanphypc_pch_lpt(sc);
   13764 
   13765 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13766 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13767 				break;
   13768 
   13769 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13770 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13771 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13772 
   13773 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13774 				break;
   13775 			rv = -1;
   13776 		}
   13777 		break;
   13778 	default:
   13779 		break;
   13780 	}
   13781 
   13782 	/* Release semaphore */
   13783 	sc->phy.release(sc);
   13784 
   13785 	if (rv == 0) {
   13786 		if (wm_phy_resetisblocked(sc)) {
   13787 			printf("XXX reset is blocked(4)\n");
   13788 			goto out;
   13789 		}
   13790 		wm_reset_phy(sc);
   13791 		if (wm_phy_resetisblocked(sc))
   13792 			printf("XXX reset is blocked(4)\n");
   13793 	}
   13794 
   13795 out:
   13796 	/*
   13797 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13798 	 */
   13799 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13800 		delay(10*1000);
   13801 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13802 	}
   13803 }
   13804 
   13805 static void
   13806 wm_init_manageability(struct wm_softc *sc)
   13807 {
   13808 
   13809 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13810 		device_xname(sc->sc_dev), __func__));
   13811 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13812 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13813 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13814 
   13815 		/* Disable hardware interception of ARP */
   13816 		manc &= ~MANC_ARP_EN;
   13817 
   13818 		/* Enable receiving management packets to the host */
   13819 		if (sc->sc_type >= WM_T_82571) {
   13820 			manc |= MANC_EN_MNG2HOST;
   13821 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   13822 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13823 		}
   13824 
   13825 		CSR_WRITE(sc, WMREG_MANC, manc);
   13826 	}
   13827 }
   13828 
   13829 static void
   13830 wm_release_manageability(struct wm_softc *sc)
   13831 {
   13832 
   13833 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13834 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13835 
   13836 		manc |= MANC_ARP_EN;
   13837 		if (sc->sc_type >= WM_T_82571)
   13838 			manc &= ~MANC_EN_MNG2HOST;
   13839 
   13840 		CSR_WRITE(sc, WMREG_MANC, manc);
   13841 	}
   13842 }
   13843 
   13844 static void
   13845 wm_get_wakeup(struct wm_softc *sc)
   13846 {
   13847 
   13848 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13849 	switch (sc->sc_type) {
   13850 	case WM_T_82573:
   13851 	case WM_T_82583:
   13852 		sc->sc_flags |= WM_F_HAS_AMT;
   13853 		/* FALLTHROUGH */
   13854 	case WM_T_80003:
   13855 	case WM_T_82575:
   13856 	case WM_T_82576:
   13857 	case WM_T_82580:
   13858 	case WM_T_I350:
   13859 	case WM_T_I354:
   13860 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13861 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13862 		/* FALLTHROUGH */
   13863 	case WM_T_82541:
   13864 	case WM_T_82541_2:
   13865 	case WM_T_82547:
   13866 	case WM_T_82547_2:
   13867 	case WM_T_82571:
   13868 	case WM_T_82572:
   13869 	case WM_T_82574:
   13870 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13871 		break;
   13872 	case WM_T_ICH8:
   13873 	case WM_T_ICH9:
   13874 	case WM_T_ICH10:
   13875 	case WM_T_PCH:
   13876 	case WM_T_PCH2:
   13877 	case WM_T_PCH_LPT:
   13878 	case WM_T_PCH_SPT:
   13879 	case WM_T_PCH_CNP:
   13880 		sc->sc_flags |= WM_F_HAS_AMT;
   13881 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13882 		break;
   13883 	default:
   13884 		break;
   13885 	}
   13886 
   13887 	/* 1: HAS_MANAGE */
   13888 	if (wm_enable_mng_pass_thru(sc) != 0)
   13889 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13890 
   13891 	/*
   13892 	 * Note that the WOL flags is set after the resetting of the eeprom
   13893 	 * stuff
   13894 	 */
   13895 }
   13896 
   13897 /*
   13898  * Unconfigure Ultra Low Power mode.
   13899  * Only for I217 and newer (see below).
   13900  */
   13901 static void
   13902 wm_ulp_disable(struct wm_softc *sc)
   13903 {
   13904 	uint32_t reg;
   13905 	int i = 0;
   13906 
   13907 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13908 		device_xname(sc->sc_dev), __func__));
   13909 	/* Exclude old devices */
   13910 	if ((sc->sc_type < WM_T_PCH_LPT)
   13911 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13912 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13913 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13914 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13915 		return;
   13916 
   13917 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13918 		/* Request ME un-configure ULP mode in the PHY */
   13919 		reg = CSR_READ(sc, WMREG_H2ME);
   13920 		reg &= ~H2ME_ULP;
   13921 		reg |= H2ME_ENFORCE_SETTINGS;
   13922 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13923 
   13924 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13925 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13926 			if (i++ == 30) {
   13927 				printf("%s timed out\n", __func__);
   13928 				return;
   13929 			}
   13930 			delay(10 * 1000);
   13931 		}
   13932 		reg = CSR_READ(sc, WMREG_H2ME);
   13933 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13934 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13935 
   13936 		return;
   13937 	}
   13938 
   13939 	/* Acquire semaphore */
   13940 	sc->phy.acquire(sc);
   13941 
   13942 	/* Toggle LANPHYPC */
   13943 	wm_toggle_lanphypc_pch_lpt(sc);
   13944 
   13945 	/* Unforce SMBus mode in PHY */
   13946 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13947 	if (reg == 0x0000 || reg == 0xffff) {
   13948 		uint32_t reg2;
   13949 
   13950 		printf("%s: Force SMBus first.\n", __func__);
   13951 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13952 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13953 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13954 		delay(50 * 1000);
   13955 
   13956 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13957 	}
   13958 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13959 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13960 
   13961 	/* Unforce SMBus mode in MAC */
   13962 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13963 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13964 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13965 
   13966 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13967 	reg |= HV_PM_CTRL_K1_ENA;
   13968 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13969 
   13970 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13971 	reg &= ~(I218_ULP_CONFIG1_IND
   13972 	    | I218_ULP_CONFIG1_STICKY_ULP
   13973 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13974 	    | I218_ULP_CONFIG1_WOL_HOST
   13975 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13976 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13977 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13978 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13979 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13980 	reg |= I218_ULP_CONFIG1_START;
   13981 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13982 
   13983 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13984 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13985 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13986 
   13987 	/* Release semaphore */
   13988 	sc->phy.release(sc);
   13989 	wm_gmii_reset(sc);
   13990 	delay(50 * 1000);
   13991 }
   13992 
   13993 /* WOL in the newer chipset interfaces (pchlan) */
   13994 static void
   13995 wm_enable_phy_wakeup(struct wm_softc *sc)
   13996 {
   13997 #if 0
   13998 	uint16_t preg;
   13999 
   14000 	/* Copy MAC RARs to PHY RARs */
   14001 
   14002 	/* Copy MAC MTA to PHY MTA */
   14003 
   14004 	/* Configure PHY Rx Control register */
   14005 
   14006 	/* Enable PHY wakeup in MAC register */
   14007 
   14008 	/* Configure and enable PHY wakeup in PHY registers */
   14009 
   14010 	/* Activate PHY wakeup */
   14011 
   14012 	/* XXX */
   14013 #endif
   14014 }
   14015 
   14016 /* Power down workaround on D3 */
   14017 static void
   14018 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14019 {
   14020 	uint32_t reg;
   14021 	int i;
   14022 
   14023 	for (i = 0; i < 2; i++) {
   14024 		/* Disable link */
   14025 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14026 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14027 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14028 
   14029 		/*
   14030 		 * Call gig speed drop workaround on Gig disable before
   14031 		 * accessing any PHY registers
   14032 		 */
   14033 		if (sc->sc_type == WM_T_ICH8)
   14034 			wm_gig_downshift_workaround_ich8lan(sc);
   14035 
   14036 		/* Write VR power-down enable */
   14037 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   14038 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14039 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14040 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   14041 
   14042 		/* Read it back and test */
   14043 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   14044 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14045 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14046 			break;
   14047 
   14048 		/* Issue PHY reset and repeat at most one more time */
   14049 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14050 	}
   14051 }
   14052 
   14053 static void
   14054 wm_enable_wakeup(struct wm_softc *sc)
   14055 {
   14056 	uint32_t reg, pmreg;
   14057 	pcireg_t pmode;
   14058 
   14059 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14060 		device_xname(sc->sc_dev), __func__));
   14061 
   14062 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14063 		&pmreg, NULL) == 0)
   14064 		return;
   14065 
   14066 	/* Advertise the wakeup capability */
   14067 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   14068 	    | CTRL_SWDPIN(3));
   14069 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   14070 
   14071 	/* ICH workaround */
   14072 	switch (sc->sc_type) {
   14073 	case WM_T_ICH8:
   14074 	case WM_T_ICH9:
   14075 	case WM_T_ICH10:
   14076 	case WM_T_PCH:
   14077 	case WM_T_PCH2:
   14078 	case WM_T_PCH_LPT:
   14079 	case WM_T_PCH_SPT:
   14080 	case WM_T_PCH_CNP:
   14081 		/* Disable gig during WOL */
   14082 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14083 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   14084 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14085 		if (sc->sc_type == WM_T_PCH)
   14086 			wm_gmii_reset(sc);
   14087 
   14088 		/* Power down workaround */
   14089 		if (sc->sc_phytype == WMPHY_82577) {
   14090 			struct mii_softc *child;
   14091 
   14092 			/* Assume that the PHY is copper */
   14093 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14094 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   14095 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   14096 				    (768 << 5) | 25, 0x0444); /* magic num */
   14097 		}
   14098 		break;
   14099 	default:
   14100 		break;
   14101 	}
   14102 
   14103 	/* Keep the laser running on fiber adapters */
   14104 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   14105 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   14106 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14107 		reg |= CTRL_EXT_SWDPIN(3);
   14108 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14109 	}
   14110 
   14111 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   14112 #if 0	/* for the multicast packet */
   14113 	reg |= WUFC_MC;
   14114 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   14115 #endif
   14116 
   14117 	if (sc->sc_type >= WM_T_PCH)
   14118 		wm_enable_phy_wakeup(sc);
   14119 	else {
   14120 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   14121 		CSR_WRITE(sc, WMREG_WUFC, reg);
   14122 	}
   14123 
   14124 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14125 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14126 		|| (sc->sc_type == WM_T_PCH2))
   14127 	    && (sc->sc_phytype == WMPHY_IGP_3))
   14128 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   14129 
   14130 	/* Request PME */
   14131 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   14132 #if 0
   14133 	/* Disable WOL */
   14134 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   14135 #else
   14136 	/* For WOL */
   14137 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   14138 #endif
   14139 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   14140 }
   14141 
   14142 /* Disable ASPM L0s and/or L1 for workaround */
   14143 static void
   14144 wm_disable_aspm(struct wm_softc *sc)
   14145 {
   14146 	pcireg_t reg, mask = 0;
   14147 	unsigned const char *str = "";
   14148 
   14149 	/*
   14150 	 *  Only for PCIe device which has PCIe capability in the PCI config
   14151 	 * space.
   14152 	 */
   14153 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   14154 		return;
   14155 
   14156 	switch (sc->sc_type) {
   14157 	case WM_T_82571:
   14158 	case WM_T_82572:
   14159 		/*
   14160 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   14161 		 * State Power management L1 State (ASPM L1).
   14162 		 */
   14163 		mask = PCIE_LCSR_ASPM_L1;
   14164 		str = "L1 is";
   14165 		break;
   14166 	case WM_T_82573:
   14167 	case WM_T_82574:
   14168 	case WM_T_82583:
   14169 		/*
   14170 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   14171 		 *
   14172 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   14173 		 * some chipset.  The document of 82574 and 82583 says that
   14174 		 * disabling L0s with some specific chipset is sufficient,
   14175 		 * but we follow as of the Intel em driver does.
   14176 		 *
   14177 		 * References:
   14178 		 * Errata 8 of the Specification Update of i82573.
   14179 		 * Errata 20 of the Specification Update of i82574.
   14180 		 * Errata 9 of the Specification Update of i82583.
   14181 		 */
   14182 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   14183 		str = "L0s and L1 are";
   14184 		break;
   14185 	default:
   14186 		return;
   14187 	}
   14188 
   14189 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14190 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   14191 	reg &= ~mask;
   14192 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14193 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   14194 
   14195 	/* Print only in wm_attach() */
   14196 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   14197 		aprint_verbose_dev(sc->sc_dev,
   14198 		    "ASPM %s disabled to workaround the errata.\n", str);
   14199 }
   14200 
   14201 /* LPLU */
   14202 
   14203 static void
   14204 wm_lplu_d0_disable(struct wm_softc *sc)
   14205 {
   14206 	struct mii_data *mii = &sc->sc_mii;
   14207 	uint32_t reg;
   14208 
   14209 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14210 		device_xname(sc->sc_dev), __func__));
   14211 
   14212 	if (sc->sc_phytype == WMPHY_IFE)
   14213 		return;
   14214 
   14215 	switch (sc->sc_type) {
   14216 	case WM_T_82571:
   14217 	case WM_T_82572:
   14218 	case WM_T_82573:
   14219 	case WM_T_82575:
   14220 	case WM_T_82576:
   14221 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   14222 		reg &= ~PMR_D0_LPLU;
   14223 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   14224 		break;
   14225 	case WM_T_82580:
   14226 	case WM_T_I350:
   14227 	case WM_T_I210:
   14228 	case WM_T_I211:
   14229 		reg = CSR_READ(sc, WMREG_PHPM);
   14230 		reg &= ~PHPM_D0A_LPLU;
   14231 		CSR_WRITE(sc, WMREG_PHPM, reg);
   14232 		break;
   14233 	case WM_T_82574:
   14234 	case WM_T_82583:
   14235 	case WM_T_ICH8:
   14236 	case WM_T_ICH9:
   14237 	case WM_T_ICH10:
   14238 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14239 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   14240 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14241 		CSR_WRITE_FLUSH(sc);
   14242 		break;
   14243 	case WM_T_PCH:
   14244 	case WM_T_PCH2:
   14245 	case WM_T_PCH_LPT:
   14246 	case WM_T_PCH_SPT:
   14247 	case WM_T_PCH_CNP:
   14248 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   14249 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   14250 		if (wm_phy_resetisblocked(sc) == false)
   14251 			reg |= HV_OEM_BITS_ANEGNOW;
   14252 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   14253 		break;
   14254 	default:
   14255 		break;
   14256 	}
   14257 }
   14258 
   14259 /* EEE */
   14260 
   14261 static void
   14262 wm_set_eee_i350(struct wm_softc *sc)
   14263 {
   14264 	uint32_t ipcnfg, eeer;
   14265 
   14266 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   14267 	eeer = CSR_READ(sc, WMREG_EEER);
   14268 
   14269 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   14270 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14271 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14272 		    | EEER_LPI_FC);
   14273 	} else {
   14274 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14275 		ipcnfg &= ~IPCNFG_10BASE_TE;
   14276 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14277 		    | EEER_LPI_FC);
   14278 	}
   14279 
   14280 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   14281 	CSR_WRITE(sc, WMREG_EEER, eeer);
   14282 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   14283 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   14284 }
   14285 
   14286 /*
   14287  * Workarounds (mainly PHY related).
   14288  * Basically, PHY's workarounds are in the PHY drivers.
   14289  */
   14290 
   14291 /* Work-around for 82566 Kumeran PCS lock loss */
   14292 static void
   14293 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   14294 {
   14295 	struct mii_data *mii = &sc->sc_mii;
   14296 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14297 	int i;
   14298 	int reg;
   14299 
   14300 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14301 		device_xname(sc->sc_dev), __func__));
   14302 
   14303 	/* If the link is not up, do nothing */
   14304 	if ((status & STATUS_LU) == 0)
   14305 		return;
   14306 
   14307 	/* Nothing to do if the link is other than 1Gbps */
   14308 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   14309 		return;
   14310 
   14311 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14312 	for (i = 0; i < 10; i++) {
   14313 		/* read twice */
   14314 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14315 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14316 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   14317 			goto out;	/* GOOD! */
   14318 
   14319 		/* Reset the PHY */
   14320 		wm_reset_phy(sc);
   14321 		delay(5*1000);
   14322 	}
   14323 
   14324 	/* Disable GigE link negotiation */
   14325 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14326 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14327 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14328 
   14329 	/*
   14330 	 * Call gig speed drop workaround on Gig disable before accessing
   14331 	 * any PHY registers.
   14332 	 */
   14333 	wm_gig_downshift_workaround_ich8lan(sc);
   14334 
   14335 out:
   14336 	return;
   14337 }
   14338 
   14339 /* WOL from S5 stops working */
   14340 static void
   14341 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   14342 {
   14343 	uint16_t kmreg;
   14344 
   14345 	/* Only for igp3 */
   14346 	if (sc->sc_phytype == WMPHY_IGP_3) {
   14347 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   14348 			return;
   14349 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   14350 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   14351 			return;
   14352 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   14353 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   14354 	}
   14355 }
   14356 
   14357 /*
   14358  * Workaround for pch's PHYs
   14359  * XXX should be moved to new PHY driver?
   14360  */
   14361 static void
   14362 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   14363 {
   14364 
   14365 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14366 		device_xname(sc->sc_dev), __func__));
   14367 	KASSERT(sc->sc_type == WM_T_PCH);
   14368 
   14369 	if (sc->sc_phytype == WMPHY_82577)
   14370 		wm_set_mdio_slow_mode_hv(sc);
   14371 
   14372 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   14373 
   14374 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   14375 
   14376 	/* 82578 */
   14377 	if (sc->sc_phytype == WMPHY_82578) {
   14378 		struct mii_softc *child;
   14379 
   14380 		/*
   14381 		 * Return registers to default by doing a soft reset then
   14382 		 * writing 0x3140 to the control register
   14383 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   14384 		 */
   14385 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14386 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   14387 			PHY_RESET(child);
   14388 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   14389 			    0x3140);
   14390 		}
   14391 	}
   14392 
   14393 	/* Select page 0 */
   14394 	sc->phy.acquire(sc);
   14395 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   14396 	sc->phy.release(sc);
   14397 
   14398 	/*
   14399 	 * Configure the K1 Si workaround during phy reset assuming there is
   14400 	 * link so that it disables K1 if link is in 1Gbps.
   14401 	 */
   14402 	wm_k1_gig_workaround_hv(sc, 1);
   14403 }
   14404 
   14405 static void
   14406 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   14407 {
   14408 
   14409 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14410 		device_xname(sc->sc_dev), __func__));
   14411 	KASSERT(sc->sc_type == WM_T_PCH2);
   14412 
   14413 	wm_set_mdio_slow_mode_hv(sc);
   14414 }
   14415 
   14416 /**
   14417  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   14418  *  @link: link up bool flag
   14419  *
   14420  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   14421  *  preventing further DMA write requests.  Workaround the issue by disabling
   14422  *  the de-assertion of the clock request when in 1Gpbs mode.
   14423  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   14424  *  speeds in order to avoid Tx hangs.
   14425  **/
   14426 static int
   14427 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   14428 {
   14429 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   14430 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14431 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   14432 	uint16_t phyreg;
   14433 
   14434 	if (link && (speed == STATUS_SPEED_1000)) {
   14435 		sc->phy.acquire(sc);
   14436 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14437 		    &phyreg);
   14438 		if (rv != 0)
   14439 			goto release;
   14440 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14441 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   14442 		if (rv != 0)
   14443 			goto release;
   14444 		delay(20);
   14445 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   14446 
   14447 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14448 		    &phyreg);
   14449 release:
   14450 		sc->phy.release(sc);
   14451 		return rv;
   14452 	}
   14453 
   14454 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   14455 
   14456 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14457 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   14458 	    || !link
   14459 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   14460 		goto update_fextnvm6;
   14461 
   14462 	phyreg = wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL);
   14463 
   14464 	/* Clear link status transmit timeout */
   14465 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   14466 	if (speed == STATUS_SPEED_100) {
   14467 		/* Set inband Tx timeout to 5x10us for 100Half */
   14468 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   14469 
   14470 		/* Do not extend the K1 entry latency for 100Half */
   14471 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   14472 	} else {
   14473 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   14474 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   14475 
   14476 		/* Extend the K1 entry latency for 10 Mbps */
   14477 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   14478 	}
   14479 
   14480 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   14481 
   14482 update_fextnvm6:
   14483 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   14484 	return 0;
   14485 }
   14486 
   14487 static int
   14488 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   14489 {
   14490 	int k1_enable = sc->sc_nvm_k1_enabled;
   14491 
   14492 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14493 		device_xname(sc->sc_dev), __func__));
   14494 
   14495 	if (sc->phy.acquire(sc) != 0)
   14496 		return -1;
   14497 
   14498 	if (link) {
   14499 		k1_enable = 0;
   14500 
   14501 		/* Link stall fix for link up */
   14502 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14503 		    0x0100);
   14504 	} else {
   14505 		/* Link stall fix for link down */
   14506 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14507 		    0x4100);
   14508 	}
   14509 
   14510 	wm_configure_k1_ich8lan(sc, k1_enable);
   14511 	sc->phy.release(sc);
   14512 
   14513 	return 0;
   14514 }
   14515 
   14516 static void
   14517 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   14518 {
   14519 	uint32_t reg;
   14520 
   14521 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   14522 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   14523 	    reg | HV_KMRN_MDIO_SLOW);
   14524 }
   14525 
   14526 static void
   14527 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   14528 {
   14529 	uint32_t ctrl, ctrl_ext, tmp;
   14530 	uint16_t kmreg;
   14531 	int rv;
   14532 
   14533 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   14534 	if (rv != 0)
   14535 		return;
   14536 
   14537 	if (k1_enable)
   14538 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   14539 	else
   14540 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   14541 
   14542 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   14543 	if (rv != 0)
   14544 		return;
   14545 
   14546 	delay(20);
   14547 
   14548 	ctrl = CSR_READ(sc, WMREG_CTRL);
   14549 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14550 
   14551 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   14552 	tmp |= CTRL_FRCSPD;
   14553 
   14554 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   14555 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   14556 	CSR_WRITE_FLUSH(sc);
   14557 	delay(20);
   14558 
   14559 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   14560 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14561 	CSR_WRITE_FLUSH(sc);
   14562 	delay(20);
   14563 
   14564 	return;
   14565 }
   14566 
   14567 /* special case - for 82575 - need to do manual init ... */
   14568 static void
   14569 wm_reset_init_script_82575(struct wm_softc *sc)
   14570 {
   14571 	/*
   14572 	 * remark: this is untested code - we have no board without EEPROM
   14573 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   14574 	 */
   14575 
   14576 	/* SerDes configuration via SERDESCTRL */
   14577 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   14578 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   14579 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   14580 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   14581 
   14582 	/* CCM configuration via CCMCTL register */
   14583 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   14584 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   14585 
   14586 	/* PCIe lanes configuration */
   14587 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   14588 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   14589 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   14590 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   14591 
   14592 	/* PCIe PLL Configuration */
   14593 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   14594 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   14595 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   14596 }
   14597 
   14598 static void
   14599 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   14600 {
   14601 	uint32_t reg;
   14602 	uint16_t nvmword;
   14603 	int rv;
   14604 
   14605 	if (sc->sc_type != WM_T_82580)
   14606 		return;
   14607 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   14608 		return;
   14609 
   14610 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   14611 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   14612 	if (rv != 0) {
   14613 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   14614 		    __func__);
   14615 		return;
   14616 	}
   14617 
   14618 	reg = CSR_READ(sc, WMREG_MDICNFG);
   14619 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   14620 		reg |= MDICNFG_DEST;
   14621 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   14622 		reg |= MDICNFG_COM_MDIO;
   14623 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14624 }
   14625 
   14626 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   14627 
   14628 static bool
   14629 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   14630 {
   14631 	int i;
   14632 	uint32_t reg;
   14633 	uint16_t id1, id2;
   14634 
   14635 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14636 		device_xname(sc->sc_dev), __func__));
   14637 	id1 = id2 = 0xffff;
   14638 	for (i = 0; i < 2; i++) {
   14639 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   14640 		if (MII_INVALIDID(id1))
   14641 			continue;
   14642 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   14643 		if (MII_INVALIDID(id2))
   14644 			continue;
   14645 		break;
   14646 	}
   14647 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   14648 		goto out;
   14649 
   14650 	if (sc->sc_type < WM_T_PCH_LPT) {
   14651 		sc->phy.release(sc);
   14652 		wm_set_mdio_slow_mode_hv(sc);
   14653 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   14654 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   14655 		sc->phy.acquire(sc);
   14656 	}
   14657 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   14658 		printf("XXX return with false\n");
   14659 		return false;
   14660 	}
   14661 out:
   14662 	if (sc->sc_type >= WM_T_PCH_LPT) {
   14663 		/* Only unforce SMBus if ME is not active */
   14664 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14665 			/* Unforce SMBus mode in PHY */
   14666 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   14667 			    CV_SMB_CTRL);
   14668 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14669 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   14670 			    CV_SMB_CTRL, reg);
   14671 
   14672 			/* Unforce SMBus mode in MAC */
   14673 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14674 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14675 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14676 		}
   14677 	}
   14678 	return true;
   14679 }
   14680 
   14681 static void
   14682 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   14683 {
   14684 	uint32_t reg;
   14685 	int i;
   14686 
   14687 	/* Set PHY Config Counter to 50msec */
   14688 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   14689 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   14690 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   14691 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   14692 
   14693 	/* Toggle LANPHYPC */
   14694 	reg = CSR_READ(sc, WMREG_CTRL);
   14695 	reg |= CTRL_LANPHYPC_OVERRIDE;
   14696 	reg &= ~CTRL_LANPHYPC_VALUE;
   14697 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14698 	CSR_WRITE_FLUSH(sc);
   14699 	delay(1000);
   14700 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   14701 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14702 	CSR_WRITE_FLUSH(sc);
   14703 
   14704 	if (sc->sc_type < WM_T_PCH_LPT)
   14705 		delay(50 * 1000);
   14706 	else {
   14707 		i = 20;
   14708 
   14709 		do {
   14710 			delay(5 * 1000);
   14711 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   14712 		    && i--);
   14713 
   14714 		delay(30 * 1000);
   14715 	}
   14716 }
   14717 
   14718 static int
   14719 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   14720 {
   14721 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   14722 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   14723 	uint32_t rxa;
   14724 	uint16_t scale = 0, lat_enc = 0;
   14725 	int32_t obff_hwm = 0;
   14726 	int64_t lat_ns, value;
   14727 
   14728 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14729 		device_xname(sc->sc_dev), __func__));
   14730 
   14731 	if (link) {
   14732 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   14733 		uint32_t status;
   14734 		uint16_t speed;
   14735 		pcireg_t preg;
   14736 
   14737 		status = CSR_READ(sc, WMREG_STATUS);
   14738 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   14739 		case STATUS_SPEED_10:
   14740 			speed = 10;
   14741 			break;
   14742 		case STATUS_SPEED_100:
   14743 			speed = 100;
   14744 			break;
   14745 		case STATUS_SPEED_1000:
   14746 			speed = 1000;
   14747 			break;
   14748 		default:
   14749 			device_printf(sc->sc_dev, "Unknown speed "
   14750 			    "(status = %08x)\n", status);
   14751 			return -1;
   14752 		}
   14753 
   14754 		/* Rx Packet Buffer Allocation size (KB) */
   14755 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   14756 
   14757 		/*
   14758 		 * Determine the maximum latency tolerated by the device.
   14759 		 *
   14760 		 * Per the PCIe spec, the tolerated latencies are encoded as
   14761 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   14762 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   14763 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14764 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14765 		 */
   14766 		lat_ns = ((int64_t)rxa * 1024 -
   14767 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14768 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14769 		if (lat_ns < 0)
   14770 			lat_ns = 0;
   14771 		else
   14772 			lat_ns /= speed;
   14773 		value = lat_ns;
   14774 
   14775 		while (value > LTRV_VALUE) {
   14776 			scale ++;
   14777 			value = howmany(value, __BIT(5));
   14778 		}
   14779 		if (scale > LTRV_SCALE_MAX) {
   14780 			printf("%s: Invalid LTR latency scale %d\n",
   14781 			    device_xname(sc->sc_dev), scale);
   14782 			return -1;
   14783 		}
   14784 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14785 
   14786 		/* Determine the maximum latency tolerated by the platform */
   14787 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14788 		    WM_PCI_LTR_CAP_LPT);
   14789 		max_snoop = preg & 0xffff;
   14790 		max_nosnoop = preg >> 16;
   14791 
   14792 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14793 
   14794 		if (lat_enc > max_ltr_enc) {
   14795 			lat_enc = max_ltr_enc;
   14796 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14797 			    * PCI_LTR_SCALETONS(
   14798 				    __SHIFTOUT(lat_enc,
   14799 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14800 		}
   14801 
   14802 		if (lat_ns) {
   14803 			lat_ns *= speed * 1000;
   14804 			lat_ns /= 8;
   14805 			lat_ns /= 1000000000;
   14806 			obff_hwm = (int32_t)(rxa - lat_ns);
   14807 		}
   14808 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14809 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14810 			    "(rxa = %d, lat_ns = %d)\n",
   14811 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14812 			return -1;
   14813 		}
   14814 	}
   14815 	/* Snoop and No-Snoop latencies the same */
   14816 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14817 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14818 
   14819 	/* Set OBFF high water mark */
   14820 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14821 	reg |= obff_hwm;
   14822 	CSR_WRITE(sc, WMREG_SVT, reg);
   14823 
   14824 	/* Enable OBFF */
   14825 	reg = CSR_READ(sc, WMREG_SVCR);
   14826 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14827 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14828 
   14829 	return 0;
   14830 }
   14831 
   14832 /*
   14833  * I210 Errata 25 and I211 Errata 10
   14834  * Slow System Clock.
   14835  */
   14836 static void
   14837 wm_pll_workaround_i210(struct wm_softc *sc)
   14838 {
   14839 	uint32_t mdicnfg, wuc;
   14840 	uint32_t reg;
   14841 	pcireg_t pcireg;
   14842 	uint32_t pmreg;
   14843 	uint16_t nvmword, tmp_nvmword;
   14844 	int phyval;
   14845 	bool wa_done = false;
   14846 	int i;
   14847 
   14848 	/* Save WUC and MDICNFG registers */
   14849 	wuc = CSR_READ(sc, WMREG_WUC);
   14850 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14851 
   14852 	reg = mdicnfg & ~MDICNFG_DEST;
   14853 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14854 
   14855 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14856 		nvmword = INVM_DEFAULT_AL;
   14857 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14858 
   14859 	/* Get Power Management cap offset */
   14860 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14861 		&pmreg, NULL) == 0)
   14862 		return;
   14863 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14864 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14865 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14866 
   14867 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14868 			break; /* OK */
   14869 		}
   14870 
   14871 		wa_done = true;
   14872 		/* Directly reset the internal PHY */
   14873 		reg = CSR_READ(sc, WMREG_CTRL);
   14874 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14875 
   14876 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14877 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14878 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14879 
   14880 		CSR_WRITE(sc, WMREG_WUC, 0);
   14881 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14882 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14883 
   14884 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14885 		    pmreg + PCI_PMCSR);
   14886 		pcireg |= PCI_PMCSR_STATE_D3;
   14887 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14888 		    pmreg + PCI_PMCSR, pcireg);
   14889 		delay(1000);
   14890 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14891 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14892 		    pmreg + PCI_PMCSR, pcireg);
   14893 
   14894 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14895 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14896 
   14897 		/* Restore WUC register */
   14898 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14899 	}
   14900 
   14901 	/* Restore MDICNFG setting */
   14902 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14903 	if (wa_done)
   14904 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14905 }
   14906 
   14907 static void
   14908 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14909 {
   14910 	uint32_t reg;
   14911 
   14912 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14913 		device_xname(sc->sc_dev), __func__));
   14914 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   14915 	    || (sc->sc_type == WM_T_PCH_CNP));
   14916 
   14917 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14918 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14919 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14920 
   14921 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14922 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14923 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14924 }
   14925