Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.593
      1 /*	$NetBSD: if_wm.c,v 1.593 2018/11/02 08:09:21 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.593 2018/11/02 08:09:21 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    161 #else
    162 #define	DPRINTF(x, y)	/* nothing */
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544. We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 64 DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    203  * m_defrag() is called to reduce it.
    204  */
    205 #define	WM_NTXSEGS		64
    206 #define	WM_IFQUEUELEN		256
    207 #define	WM_TXQUEUELEN_MAX	64
    208 #define	WM_TXQUEUELEN_MAX_82547	16
    209 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    210 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    211 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    212 #define	WM_NTXDESC_82542	256
    213 #define	WM_NTXDESC_82544	4096
    214 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    215 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    216 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    217 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    218 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    219 
    220 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    221 
    222 #define	WM_TXINTERQSIZE		256
    223 
    224 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    225 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    226 #endif
    227 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    228 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    229 #endif
    230 
    231 /*
    232  * Receive descriptor list size.  We have one Rx buffer for normal
    233  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    234  * packet.  We allocate 256 receive descriptors, each with a 2k
    235  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    236  */
    237 #define	WM_NRXDESC		256
    238 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    239 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    240 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    241 
    242 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 typedef union txdescs {
    250 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    251 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    252 } txdescs_t;
    253 
    254 typedef union rxdescs {
    255 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    256 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    257 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    258 } rxdescs_t;
    259 
    260 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    261 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    262 
    263 /*
    264  * Software state for transmit jobs.
    265  */
    266 struct wm_txsoft {
    267 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    268 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    269 	int txs_firstdesc;		/* first descriptor in packet */
    270 	int txs_lastdesc;		/* last descriptor in packet */
    271 	int txs_ndesc;			/* # of descriptors used */
    272 };
    273 
    274 /*
    275  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    276  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    277  * them together.
    278  */
    279 struct wm_rxsoft {
    280 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    281 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    282 };
    283 
    284 #define WM_LINKUP_TIMEOUT	50
    285 
    286 static uint16_t swfwphysem[] = {
    287 	SWFW_PHY0_SM,
    288 	SWFW_PHY1_SM,
    289 	SWFW_PHY2_SM,
    290 	SWFW_PHY3_SM
    291 };
    292 
    293 static const uint32_t wm_82580_rxpbs_table[] = {
    294 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    295 };
    296 
    297 struct wm_softc;
    298 
    299 #ifdef WM_EVENT_COUNTERS
    300 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    301 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    302 	struct evcnt qname##_ev_##evname;
    303 
    304 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    305 	do {								\
    306 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    307 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    308 		    "%s%02d%s", #qname, (qnum), #evname);		\
    309 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    310 		    (evtype), NULL, (xname),				\
    311 		    (q)->qname##_##evname##_evcnt_name);		\
    312 	} while (0)
    313 
    314 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    315 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    316 
    317 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    318 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    319 
    320 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    321 	evcnt_detach(&(q)->qname##_ev_##evname);
    322 #endif /* WM_EVENT_COUNTERS */
    323 
    324 struct wm_txqueue {
    325 	kmutex_t *txq_lock;		/* lock for tx operations */
    326 
    327 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    328 
    329 	/* Software state for the transmit descriptors. */
    330 	int txq_num;			/* must be a power of two */
    331 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    332 
    333 	/* TX control data structures. */
    334 	int txq_ndesc;			/* must be a power of two */
    335 	size_t txq_descsize;		/* a tx descriptor size */
    336 	txdescs_t *txq_descs_u;
    337 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    338 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    339 	int txq_desc_rseg;		/* real number of control segment */
    340 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    341 #define	txq_descs	txq_descs_u->sctxu_txdescs
    342 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    343 
    344 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    345 
    346 	int txq_free;			/* number of free Tx descriptors */
    347 	int txq_next;			/* next ready Tx descriptor */
    348 
    349 	int txq_sfree;			/* number of free Tx jobs */
    350 	int txq_snext;			/* next free Tx job */
    351 	int txq_sdirty;			/* dirty Tx jobs */
    352 
    353 	/* These 4 variables are used only on the 82547. */
    354 	int txq_fifo_size;		/* Tx FIFO size */
    355 	int txq_fifo_head;		/* current head of FIFO */
    356 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    357 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    358 
    359 	/*
    360 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    361 	 * CPUs. This queue intermediate them without block.
    362 	 */
    363 	pcq_t *txq_interq;
    364 
    365 	/*
    366 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    367 	 * to manage Tx H/W queue's busy flag.
    368 	 */
    369 	int txq_flags;			/* flags for H/W queue, see below */
    370 #define	WM_TXQ_NO_SPACE	0x1
    371 
    372 	bool txq_stopping;
    373 
    374 	bool txq_sending;
    375 	time_t txq_lastsent;
    376 
    377 	uint32_t txq_packets;		/* for AIM */
    378 	uint32_t txq_bytes;		/* for AIM */
    379 #ifdef WM_EVENT_COUNTERS
    380 	/* TX event counters */
    381 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    382 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    383 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    384 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    385 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    386 					    /* XXX not used? */
    387 
    388 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    389 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    392 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    393 	WM_Q_EVCNT_DEFINE(txq, tsopain)     /* Painful header manip. for TSO */
    394 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    395 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    396 					    /* other than toomanyseg */
    397 
    398 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    399 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    400 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    401 
    402 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    403 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    404 #endif /* WM_EVENT_COUNTERS */
    405 };
    406 
    407 struct wm_rxqueue {
    408 	kmutex_t *rxq_lock;		/* lock for rx operations */
    409 
    410 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    411 
    412 	/* Software state for the receive descriptors. */
    413 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    414 
    415 	/* RX control data structures. */
    416 	int rxq_ndesc;			/* must be a power of two */
    417 	size_t rxq_descsize;		/* a rx descriptor size */
    418 	rxdescs_t *rxq_descs_u;
    419 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    420 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    421 	int rxq_desc_rseg;		/* real number of control segment */
    422 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    423 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    424 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    425 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    426 
    427 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    428 
    429 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    430 	int rxq_discard;
    431 	int rxq_len;
    432 	struct mbuf *rxq_head;
    433 	struct mbuf *rxq_tail;
    434 	struct mbuf **rxq_tailp;
    435 
    436 	bool rxq_stopping;
    437 
    438 	uint32_t rxq_packets;		/* for AIM */
    439 	uint32_t rxq_bytes;		/* for AIM */
    440 #ifdef WM_EVENT_COUNTERS
    441 	/* RX event counters */
    442 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    443 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    444 
    445 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    446 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    447 #endif
    448 };
    449 
    450 struct wm_queue {
    451 	int wmq_id;			/* index of TX/RX queues */
    452 	int wmq_intr_idx;		/* index of MSI-X tables */
    453 
    454 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    455 	bool wmq_set_itr;
    456 
    457 	struct wm_txqueue wmq_txq;
    458 	struct wm_rxqueue wmq_rxq;
    459 
    460 	void *wmq_si;
    461 };
    462 
    463 struct wm_phyop {
    464 	int (*acquire)(struct wm_softc *);
    465 	void (*release)(struct wm_softc *);
    466 	int reset_delay_us;
    467 };
    468 
    469 struct wm_nvmop {
    470 	int (*acquire)(struct wm_softc *);
    471 	void (*release)(struct wm_softc *);
    472 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    473 };
    474 
    475 /*
    476  * Software state per device.
    477  */
    478 struct wm_softc {
    479 	device_t sc_dev;		/* generic device information */
    480 	bus_space_tag_t sc_st;		/* bus space tag */
    481 	bus_space_handle_t sc_sh;	/* bus space handle */
    482 	bus_size_t sc_ss;		/* bus space size */
    483 	bus_space_tag_t sc_iot;		/* I/O space tag */
    484 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    485 	bus_size_t sc_ios;		/* I/O space size */
    486 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    487 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    488 	bus_size_t sc_flashs;		/* flash registers space size */
    489 	off_t sc_flashreg_offset;	/*
    490 					 * offset to flash registers from
    491 					 * start of BAR
    492 					 */
    493 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    494 
    495 	struct ethercom sc_ethercom;	/* ethernet common data */
    496 	struct mii_data sc_mii;		/* MII/media information */
    497 
    498 	pci_chipset_tag_t sc_pc;
    499 	pcitag_t sc_pcitag;
    500 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    501 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    502 
    503 	uint16_t sc_pcidevid;		/* PCI device ID */
    504 	wm_chip_type sc_type;		/* MAC type */
    505 	int sc_rev;			/* MAC revision */
    506 	wm_phy_type sc_phytype;		/* PHY type */
    507 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    508 #define	WM_MEDIATYPE_UNKNOWN		0x00
    509 #define	WM_MEDIATYPE_FIBER		0x01
    510 #define	WM_MEDIATYPE_COPPER		0x02
    511 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    512 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    513 	int sc_flags;			/* flags; see below */
    514 	int sc_if_flags;		/* last if_flags */
    515 	int sc_flowflags;		/* 802.3x flow control flags */
    516 	int sc_align_tweak;
    517 
    518 	void *sc_ihs[WM_MAX_NINTR];	/*
    519 					 * interrupt cookie.
    520 					 * - legacy and msi use sc_ihs[0] only
    521 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    522 					 */
    523 	pci_intr_handle_t *sc_intrs;	/*
    524 					 * legacy and msi use sc_intrs[0] only
    525 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    526 					 */
    527 	int sc_nintrs;			/* number of interrupts */
    528 
    529 	int sc_link_intr_idx;		/* index of MSI-X tables */
    530 
    531 	callout_t sc_tick_ch;		/* tick callout */
    532 	bool sc_core_stopping;
    533 
    534 	int sc_nvm_ver_major;
    535 	int sc_nvm_ver_minor;
    536 	int sc_nvm_ver_build;
    537 	int sc_nvm_addrbits;		/* NVM address bits */
    538 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    539 	int sc_ich8_flash_base;
    540 	int sc_ich8_flash_bank_size;
    541 	int sc_nvm_k1_enabled;
    542 
    543 	int sc_nqueues;
    544 	struct wm_queue *sc_queue;
    545 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
    546 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
    547 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    548 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    549 
    550 	int sc_affinity_offset;
    551 
    552 #ifdef WM_EVENT_COUNTERS
    553 	/* Event counters. */
    554 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    555 
    556 	/* WM_T_82542_2_1 only */
    557 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    558 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    559 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    560 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    561 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    562 #endif /* WM_EVENT_COUNTERS */
    563 
    564 	/* This variable are used only on the 82547. */
    565 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    566 
    567 	uint32_t sc_ctrl;		/* prototype CTRL register */
    568 #if 0
    569 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    570 #endif
    571 	uint32_t sc_icr;		/* prototype interrupt bits */
    572 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    573 	uint32_t sc_tctl;		/* prototype TCTL register */
    574 	uint32_t sc_rctl;		/* prototype RCTL register */
    575 	uint32_t sc_txcw;		/* prototype TXCW register */
    576 	uint32_t sc_tipg;		/* prototype TIPG register */
    577 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    578 	uint32_t sc_pba;		/* prototype PBA register */
    579 
    580 	int sc_tbi_linkup;		/* TBI link status */
    581 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    582 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    583 
    584 	int sc_mchash_type;		/* multicast filter offset */
    585 
    586 	krndsource_t rnd_source;	/* random source */
    587 
    588 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    589 
    590 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    591 	kmutex_t *sc_ich_phymtx;	/*
    592 					 * 82574/82583/ICH/PCH specific PHY
    593 					 * mutex. For 82574/82583, the mutex
    594 					 * is used for both PHY and NVM.
    595 					 */
    596 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    597 
    598 	struct wm_phyop phy;
    599 	struct wm_nvmop nvm;
    600 };
    601 
    602 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    603 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    604 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    605 
    606 #define	WM_RXCHAIN_RESET(rxq)						\
    607 do {									\
    608 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    609 	*(rxq)->rxq_tailp = NULL;					\
    610 	(rxq)->rxq_len = 0;						\
    611 } while (/*CONSTCOND*/0)
    612 
    613 #define	WM_RXCHAIN_LINK(rxq, m)						\
    614 do {									\
    615 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    616 	(rxq)->rxq_tailp = &(m)->m_next;				\
    617 } while (/*CONSTCOND*/0)
    618 
    619 #ifdef WM_EVENT_COUNTERS
    620 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    621 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    622 
    623 #define WM_Q_EVCNT_INCR(qname, evname)			\
    624 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    625 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    626 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    627 #else /* !WM_EVENT_COUNTERS */
    628 #define	WM_EVCNT_INCR(ev)	/* nothing */
    629 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    630 
    631 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    632 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    633 #endif /* !WM_EVENT_COUNTERS */
    634 
    635 #define	CSR_READ(sc, reg)						\
    636 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    637 #define	CSR_WRITE(sc, reg, val)						\
    638 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    639 #define	CSR_WRITE_FLUSH(sc)						\
    640 	(void) CSR_READ((sc), WMREG_STATUS)
    641 
    642 #define ICH8_FLASH_READ32(sc, reg)					\
    643 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    644 	    (reg) + sc->sc_flashreg_offset)
    645 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    646 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    647 	    (reg) + sc->sc_flashreg_offset, (data))
    648 
    649 #define ICH8_FLASH_READ16(sc, reg)					\
    650 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    651 	    (reg) + sc->sc_flashreg_offset)
    652 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    653 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    654 	    (reg) + sc->sc_flashreg_offset, (data))
    655 
    656 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    657 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    658 
    659 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    660 #define	WM_CDTXADDR_HI(txq, x)						\
    661 	(sizeof(bus_addr_t) == 8 ?					\
    662 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    663 
    664 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    665 #define	WM_CDRXADDR_HI(rxq, x)						\
    666 	(sizeof(bus_addr_t) == 8 ?					\
    667 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    668 
    669 /*
    670  * Register read/write functions.
    671  * Other than CSR_{READ|WRITE}().
    672  */
    673 #if 0
    674 static inline uint32_t wm_io_read(struct wm_softc *, int);
    675 #endif
    676 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    677 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    678     uint32_t, uint32_t);
    679 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    680 
    681 /*
    682  * Descriptor sync/init functions.
    683  */
    684 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    685 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    686 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    687 
    688 /*
    689  * Device driver interface functions and commonly used functions.
    690  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    691  */
    692 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    693 static int	wm_match(device_t, cfdata_t, void *);
    694 static void	wm_attach(device_t, device_t, void *);
    695 static int	wm_detach(device_t, int);
    696 static bool	wm_suspend(device_t, const pmf_qual_t *);
    697 static bool	wm_resume(device_t, const pmf_qual_t *);
    698 static void	wm_watchdog(struct ifnet *);
    699 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    700     uint16_t *);
    701 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    702     uint16_t *);
    703 static void	wm_tick(void *);
    704 static int	wm_ifflags_cb(struct ethercom *);
    705 static int	wm_ioctl(struct ifnet *, u_long, void *);
    706 /* MAC address related */
    707 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    708 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    709 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    710 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    711 static void	wm_set_filter(struct wm_softc *);
    712 /* Reset and init related */
    713 static void	wm_set_vlan(struct wm_softc *);
    714 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    715 static void	wm_get_auto_rd_done(struct wm_softc *);
    716 static void	wm_lan_init_done(struct wm_softc *);
    717 static void	wm_get_cfg_done(struct wm_softc *);
    718 static void	wm_phy_post_reset(struct wm_softc *);
    719 static void	wm_write_smbus_addr(struct wm_softc *);
    720 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    721 static void	wm_initialize_hardware_bits(struct wm_softc *);
    722 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    723 static void	wm_reset_phy(struct wm_softc *);
    724 static void	wm_flush_desc_rings(struct wm_softc *);
    725 static void	wm_reset(struct wm_softc *);
    726 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    727 static void	wm_rxdrain(struct wm_rxqueue *);
    728 static void	wm_init_rss(struct wm_softc *);
    729 static void	wm_adjust_qnum(struct wm_softc *, int);
    730 static inline bool	wm_is_using_msix(struct wm_softc *);
    731 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    732 static int	wm_softint_establish(struct wm_softc *, int, int);
    733 static int	wm_setup_legacy(struct wm_softc *);
    734 static int	wm_setup_msix(struct wm_softc *);
    735 static int	wm_init(struct ifnet *);
    736 static int	wm_init_locked(struct ifnet *);
    737 static void	wm_unset_stopping_flags(struct wm_softc *);
    738 static void	wm_set_stopping_flags(struct wm_softc *);
    739 static void	wm_stop(struct ifnet *, int);
    740 static void	wm_stop_locked(struct ifnet *, int);
    741 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    742 static void	wm_82547_txfifo_stall(void *);
    743 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    744 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    745 /* DMA related */
    746 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    747 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    748 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    749 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    750     struct wm_txqueue *);
    751 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    752 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    753 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    754     struct wm_rxqueue *);
    755 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    756 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    757 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    758 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    759 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    760 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    761 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    762     struct wm_txqueue *);
    763 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    764     struct wm_rxqueue *);
    765 static int	wm_alloc_txrx_queues(struct wm_softc *);
    766 static void	wm_free_txrx_queues(struct wm_softc *);
    767 static int	wm_init_txrx_queues(struct wm_softc *);
    768 /* Start */
    769 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    770     struct wm_txsoft *, uint32_t *, uint8_t *);
    771 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    772 static void	wm_start(struct ifnet *);
    773 static void	wm_start_locked(struct ifnet *);
    774 static int	wm_transmit(struct ifnet *, struct mbuf *);
    775 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    776 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    777     bool);
    778 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    779     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    780 static void	wm_nq_start(struct ifnet *);
    781 static void	wm_nq_start_locked(struct ifnet *);
    782 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    783 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    784 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    785     bool);
    786 static void	wm_deferred_start_locked(struct wm_txqueue *);
    787 static void	wm_handle_queue(void *);
    788 /* Interrupt */
    789 static bool	wm_txeof(struct wm_txqueue *, u_int);
    790 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    791 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    792 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    793 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    794 static void	wm_linkintr(struct wm_softc *, uint32_t);
    795 static int	wm_intr_legacy(void *);
    796 static inline void	wm_txrxintr_disable(struct wm_queue *);
    797 static inline void	wm_txrxintr_enable(struct wm_queue *);
    798 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    799 static int	wm_txrxintr_msix(void *);
    800 static int	wm_linkintr_msix(void *);
    801 
    802 /*
    803  * Media related.
    804  * GMII, SGMII, TBI, SERDES and SFP.
    805  */
    806 /* Common */
    807 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    808 /* GMII related */
    809 static void	wm_gmii_reset(struct wm_softc *);
    810 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    811 static int	wm_get_phy_id_82575(struct wm_softc *);
    812 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    813 static int	wm_gmii_mediachange(struct ifnet *);
    814 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    815 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    816 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    817 static int	wm_gmii_i82543_readreg(device_t, int, int);
    818 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    819 static int	wm_gmii_mdic_readreg(device_t, int, int);
    820 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    821 static int	wm_gmii_i82544_readreg(device_t, int, int);
    822 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    823 static int	wm_gmii_i80003_readreg(device_t, int, int);
    824 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    825 static int	wm_gmii_bm_readreg(device_t, int, int);
    826 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    827 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    828 static int	wm_gmii_hv_readreg(device_t, int, int);
    829 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    830 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    831 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    832 static int	wm_gmii_82580_readreg(device_t, int, int);
    833 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    834 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    835 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    836 static void	wm_gmii_statchg(struct ifnet *);
    837 /*
    838  * kumeran related (80003, ICH* and PCH*).
    839  * These functions are not for accessing MII registers but for accessing
    840  * kumeran specific registers.
    841  */
    842 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    843 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    844 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    845 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    846 /* SGMII */
    847 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    848 static int	wm_sgmii_readreg(device_t, int, int);
    849 static void	wm_sgmii_writereg(device_t, int, int, int);
    850 /* TBI related */
    851 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    852 static void	wm_tbi_mediainit(struct wm_softc *);
    853 static int	wm_tbi_mediachange(struct ifnet *);
    854 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    855 static int	wm_check_for_link(struct wm_softc *);
    856 static void	wm_tbi_tick(struct wm_softc *);
    857 /* SERDES related */
    858 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    859 static int	wm_serdes_mediachange(struct ifnet *);
    860 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    861 static void	wm_serdes_tick(struct wm_softc *);
    862 /* SFP related */
    863 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    864 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    865 
    866 /*
    867  * NVM related.
    868  * Microwire, SPI (w/wo EERD) and Flash.
    869  */
    870 /* Misc functions */
    871 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    872 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    873 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    874 /* Microwire */
    875 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    876 /* SPI */
    877 static int	wm_nvm_ready_spi(struct wm_softc *);
    878 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    879 /* Using with EERD */
    880 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    881 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    882 /* Flash */
    883 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    884     unsigned int *);
    885 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    886 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    887 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    888     uint32_t *);
    889 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    890 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    891 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    892 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    893 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    894 /* iNVM */
    895 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    896 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    897 /* Lock, detecting NVM type, validate checksum and read */
    898 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    899 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    900 static int	wm_nvm_validate_checksum(struct wm_softc *);
    901 static void	wm_nvm_version_invm(struct wm_softc *);
    902 static void	wm_nvm_version(struct wm_softc *);
    903 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    904 
    905 /*
    906  * Hardware semaphores.
    907  * Very complexed...
    908  */
    909 static int	wm_get_null(struct wm_softc *);
    910 static void	wm_put_null(struct wm_softc *);
    911 static int	wm_get_eecd(struct wm_softc *);
    912 static void	wm_put_eecd(struct wm_softc *);
    913 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    914 static void	wm_put_swsm_semaphore(struct wm_softc *);
    915 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    916 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    917 static int	wm_get_nvm_80003(struct wm_softc *);
    918 static void	wm_put_nvm_80003(struct wm_softc *);
    919 static int	wm_get_nvm_82571(struct wm_softc *);
    920 static void	wm_put_nvm_82571(struct wm_softc *);
    921 static int	wm_get_phy_82575(struct wm_softc *);
    922 static void	wm_put_phy_82575(struct wm_softc *);
    923 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    924 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    925 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    926 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    927 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    928 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    929 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    930 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    931 
    932 /*
    933  * Management mode and power management related subroutines.
    934  * BMC, AMT, suspend/resume and EEE.
    935  */
    936 #if 0
    937 static int	wm_check_mng_mode(struct wm_softc *);
    938 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    939 static int	wm_check_mng_mode_82574(struct wm_softc *);
    940 static int	wm_check_mng_mode_generic(struct wm_softc *);
    941 #endif
    942 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    943 static bool	wm_phy_resetisblocked(struct wm_softc *);
    944 static void	wm_get_hw_control(struct wm_softc *);
    945 static void	wm_release_hw_control(struct wm_softc *);
    946 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    947 static void	wm_smbustopci(struct wm_softc *);
    948 static void	wm_init_manageability(struct wm_softc *);
    949 static void	wm_release_manageability(struct wm_softc *);
    950 static void	wm_get_wakeup(struct wm_softc *);
    951 static void	wm_ulp_disable(struct wm_softc *);
    952 static void	wm_enable_phy_wakeup(struct wm_softc *);
    953 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    954 static void	wm_enable_wakeup(struct wm_softc *);
    955 static void	wm_disable_aspm(struct wm_softc *);
    956 /* LPLU (Low Power Link Up) */
    957 static void	wm_lplu_d0_disable(struct wm_softc *);
    958 /* EEE */
    959 static void	wm_set_eee_i350(struct wm_softc *);
    960 
    961 /*
    962  * Workarounds (mainly PHY related).
    963  * Basically, PHY's workarounds are in the PHY drivers.
    964  */
    965 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    966 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    967 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    968 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    969 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    970 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    971 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    972 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    973 static void	wm_reset_init_script_82575(struct wm_softc *);
    974 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    975 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    976 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    977 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    978 static void	wm_pll_workaround_i210(struct wm_softc *);
    979 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    980 
    981 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    982     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    983 
    984 /*
    985  * Devices supported by this driver.
    986  */
    987 static const struct wm_product {
    988 	pci_vendor_id_t		wmp_vendor;
    989 	pci_product_id_t	wmp_product;
    990 	const char		*wmp_name;
    991 	wm_chip_type		wmp_type;
    992 	uint32_t		wmp_flags;
    993 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    994 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    995 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    996 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    997 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    998 } wm_products[] = {
    999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1000 	  "Intel i82542 1000BASE-X Ethernet",
   1001 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1002 
   1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1004 	  "Intel i82543GC 1000BASE-X Ethernet",
   1005 	  WM_T_82543,		WMP_F_FIBER },
   1006 
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1008 	  "Intel i82543GC 1000BASE-T Ethernet",
   1009 	  WM_T_82543,		WMP_F_COPPER },
   1010 
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1012 	  "Intel i82544EI 1000BASE-T Ethernet",
   1013 	  WM_T_82544,		WMP_F_COPPER },
   1014 
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1016 	  "Intel i82544EI 1000BASE-X Ethernet",
   1017 	  WM_T_82544,		WMP_F_FIBER },
   1018 
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1020 	  "Intel i82544GC 1000BASE-T Ethernet",
   1021 	  WM_T_82544,		WMP_F_COPPER },
   1022 
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1024 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1025 	  WM_T_82544,		WMP_F_COPPER },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1028 	  "Intel i82540EM 1000BASE-T Ethernet",
   1029 	  WM_T_82540,		WMP_F_COPPER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1032 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1033 	  WM_T_82540,		WMP_F_COPPER },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1036 	  "Intel i82540EP 1000BASE-T Ethernet",
   1037 	  WM_T_82540,		WMP_F_COPPER },
   1038 
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1040 	  "Intel i82540EP 1000BASE-T Ethernet",
   1041 	  WM_T_82540,		WMP_F_COPPER },
   1042 
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1044 	  "Intel i82540EP 1000BASE-T Ethernet",
   1045 	  WM_T_82540,		WMP_F_COPPER },
   1046 
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1048 	  "Intel i82545EM 1000BASE-T Ethernet",
   1049 	  WM_T_82545,		WMP_F_COPPER },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1052 	  "Intel i82545GM 1000BASE-T Ethernet",
   1053 	  WM_T_82545_3,		WMP_F_COPPER },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1056 	  "Intel i82545GM 1000BASE-X Ethernet",
   1057 	  WM_T_82545_3,		WMP_F_FIBER },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1060 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1061 	  WM_T_82545_3,		WMP_F_SERDES },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1064 	  "Intel i82546EB 1000BASE-T Ethernet",
   1065 	  WM_T_82546,		WMP_F_COPPER },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1068 	  "Intel i82546EB 1000BASE-T Ethernet",
   1069 	  WM_T_82546,		WMP_F_COPPER },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1072 	  "Intel i82545EM 1000BASE-X Ethernet",
   1073 	  WM_T_82545,		WMP_F_FIBER },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1076 	  "Intel i82546EB 1000BASE-X Ethernet",
   1077 	  WM_T_82546,		WMP_F_FIBER },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1080 	  "Intel i82546GB 1000BASE-T Ethernet",
   1081 	  WM_T_82546_3,		WMP_F_COPPER },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1084 	  "Intel i82546GB 1000BASE-X Ethernet",
   1085 	  WM_T_82546_3,		WMP_F_FIBER },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1088 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1089 	  WM_T_82546_3,		WMP_F_SERDES },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1092 	  "i82546GB quad-port Gigabit Ethernet",
   1093 	  WM_T_82546_3,		WMP_F_COPPER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1096 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1097 	  WM_T_82546_3,		WMP_F_COPPER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1100 	  "Intel PRO/1000MT (82546GB)",
   1101 	  WM_T_82546_3,		WMP_F_COPPER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1104 	  "Intel i82541EI 1000BASE-T Ethernet",
   1105 	  WM_T_82541,		WMP_F_COPPER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1108 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1109 	  WM_T_82541,		WMP_F_COPPER },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1112 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1113 	  WM_T_82541,		WMP_F_COPPER },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1116 	  "Intel i82541ER 1000BASE-T Ethernet",
   1117 	  WM_T_82541_2,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1120 	  "Intel i82541GI 1000BASE-T Ethernet",
   1121 	  WM_T_82541_2,		WMP_F_COPPER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1124 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1125 	  WM_T_82541_2,		WMP_F_COPPER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1128 	  "Intel i82541PI 1000BASE-T Ethernet",
   1129 	  WM_T_82541_2,		WMP_F_COPPER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1132 	  "Intel i82547EI 1000BASE-T Ethernet",
   1133 	  WM_T_82547,		WMP_F_COPPER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1136 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1137 	  WM_T_82547,		WMP_F_COPPER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1140 	  "Intel i82547GI 1000BASE-T Ethernet",
   1141 	  WM_T_82547_2,		WMP_F_COPPER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1144 	  "Intel PRO/1000 PT (82571EB)",
   1145 	  WM_T_82571,		WMP_F_COPPER },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1148 	  "Intel PRO/1000 PF (82571EB)",
   1149 	  WM_T_82571,		WMP_F_FIBER },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1152 	  "Intel PRO/1000 PB (82571EB)",
   1153 	  WM_T_82571,		WMP_F_SERDES },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1156 	  "Intel PRO/1000 QT (82571EB)",
   1157 	  WM_T_82571,		WMP_F_COPPER },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1160 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1161 	  WM_T_82571,		WMP_F_COPPER, },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1164 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1165 	  WM_T_82571,		WMP_F_COPPER, },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1168 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1169 	  WM_T_82571,		WMP_F_SERDES, },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1172 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1173 	  WM_T_82571,		WMP_F_SERDES, },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1176 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1177 	  WM_T_82571,		WMP_F_FIBER, },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1180 	  "Intel i82572EI 1000baseT Ethernet",
   1181 	  WM_T_82572,		WMP_F_COPPER },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1184 	  "Intel i82572EI 1000baseX Ethernet",
   1185 	  WM_T_82572,		WMP_F_FIBER },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1188 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1189 	  WM_T_82572,		WMP_F_SERDES },
   1190 
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1192 	  "Intel i82572EI 1000baseT Ethernet",
   1193 	  WM_T_82572,		WMP_F_COPPER },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1196 	  "Intel i82573E",
   1197 	  WM_T_82573,		WMP_F_COPPER },
   1198 
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1200 	  "Intel i82573E IAMT",
   1201 	  WM_T_82573,		WMP_F_COPPER },
   1202 
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1204 	  "Intel i82573L Gigabit Ethernet",
   1205 	  WM_T_82573,		WMP_F_COPPER },
   1206 
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1208 	  "Intel i82574L",
   1209 	  WM_T_82574,		WMP_F_COPPER },
   1210 
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1212 	  "Intel i82574L",
   1213 	  WM_T_82574,		WMP_F_COPPER },
   1214 
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1216 	  "Intel i82583V",
   1217 	  WM_T_82583,		WMP_F_COPPER },
   1218 
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1220 	  "i80003 dual 1000baseT Ethernet",
   1221 	  WM_T_80003,		WMP_F_COPPER },
   1222 
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1224 	  "i80003 dual 1000baseX Ethernet",
   1225 	  WM_T_80003,		WMP_F_COPPER },
   1226 
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1228 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1229 	  WM_T_80003,		WMP_F_SERDES },
   1230 
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1232 	  "Intel i80003 1000baseT Ethernet",
   1233 	  WM_T_80003,		WMP_F_COPPER },
   1234 
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1236 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1237 	  WM_T_80003,		WMP_F_SERDES },
   1238 
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1240 	  "Intel i82801H (M_AMT) LAN Controller",
   1241 	  WM_T_ICH8,		WMP_F_COPPER },
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1243 	  "Intel i82801H (AMT) LAN Controller",
   1244 	  WM_T_ICH8,		WMP_F_COPPER },
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1246 	  "Intel i82801H LAN Controller",
   1247 	  WM_T_ICH8,		WMP_F_COPPER },
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1249 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1250 	  WM_T_ICH8,		WMP_F_COPPER },
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1252 	  "Intel i82801H (M) LAN Controller",
   1253 	  WM_T_ICH8,		WMP_F_COPPER },
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1255 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1256 	  WM_T_ICH8,		WMP_F_COPPER },
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1258 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1259 	  WM_T_ICH8,		WMP_F_COPPER },
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1261 	  "82567V-3 LAN Controller",
   1262 	  WM_T_ICH8,		WMP_F_COPPER },
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1264 	  "82801I (AMT) LAN Controller",
   1265 	  WM_T_ICH9,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1267 	  "82801I 10/100 LAN Controller",
   1268 	  WM_T_ICH9,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1270 	  "82801I (G) 10/100 LAN Controller",
   1271 	  WM_T_ICH9,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1273 	  "82801I (GT) 10/100 LAN Controller",
   1274 	  WM_T_ICH9,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1276 	  "82801I (C) LAN Controller",
   1277 	  WM_T_ICH9,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1279 	  "82801I mobile LAN Controller",
   1280 	  WM_T_ICH9,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1282 	  "82801I mobile (V) LAN Controller",
   1283 	  WM_T_ICH9,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1285 	  "82801I mobile (AMT) LAN Controller",
   1286 	  WM_T_ICH9,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1288 	  "82567LM-4 LAN Controller",
   1289 	  WM_T_ICH9,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1291 	  "82567LM-2 LAN Controller",
   1292 	  WM_T_ICH10,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1294 	  "82567LF-2 LAN Controller",
   1295 	  WM_T_ICH10,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1297 	  "82567LM-3 LAN Controller",
   1298 	  WM_T_ICH10,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1300 	  "82567LF-3 LAN Controller",
   1301 	  WM_T_ICH10,		WMP_F_COPPER },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1303 	  "82567V-2 LAN Controller",
   1304 	  WM_T_ICH10,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1306 	  "82567V-3? LAN Controller",
   1307 	  WM_T_ICH10,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1309 	  "HANKSVILLE LAN Controller",
   1310 	  WM_T_ICH10,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1312 	  "PCH LAN (82577LM) Controller",
   1313 	  WM_T_PCH,		WMP_F_COPPER },
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1315 	  "PCH LAN (82577LC) Controller",
   1316 	  WM_T_PCH,		WMP_F_COPPER },
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1318 	  "PCH LAN (82578DM) Controller",
   1319 	  WM_T_PCH,		WMP_F_COPPER },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1321 	  "PCH LAN (82578DC) Controller",
   1322 	  WM_T_PCH,		WMP_F_COPPER },
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1324 	  "PCH2 LAN (82579LM) Controller",
   1325 	  WM_T_PCH2,		WMP_F_COPPER },
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1327 	  "PCH2 LAN (82579V) Controller",
   1328 	  WM_T_PCH2,		WMP_F_COPPER },
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1330 	  "82575EB dual-1000baseT Ethernet",
   1331 	  WM_T_82575,		WMP_F_COPPER },
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1333 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1334 	  WM_T_82575,		WMP_F_SERDES },
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1336 	  "82575GB quad-1000baseT Ethernet",
   1337 	  WM_T_82575,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1339 	  "82575GB quad-1000baseT Ethernet (PM)",
   1340 	  WM_T_82575,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1342 	  "82576 1000BaseT Ethernet",
   1343 	  WM_T_82576,		WMP_F_COPPER },
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1345 	  "82576 1000BaseX Ethernet",
   1346 	  WM_T_82576,		WMP_F_FIBER },
   1347 
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1349 	  "82576 gigabit Ethernet (SERDES)",
   1350 	  WM_T_82576,		WMP_F_SERDES },
   1351 
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1353 	  "82576 quad-1000BaseT Ethernet",
   1354 	  WM_T_82576,		WMP_F_COPPER },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1357 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1358 	  WM_T_82576,		WMP_F_COPPER },
   1359 
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1361 	  "82576 gigabit Ethernet",
   1362 	  WM_T_82576,		WMP_F_COPPER },
   1363 
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1365 	  "82576 gigabit Ethernet (SERDES)",
   1366 	  WM_T_82576,		WMP_F_SERDES },
   1367 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1368 	  "82576 quad-gigabit Ethernet (SERDES)",
   1369 	  WM_T_82576,		WMP_F_SERDES },
   1370 
   1371 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1372 	  "82580 1000BaseT Ethernet",
   1373 	  WM_T_82580,		WMP_F_COPPER },
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1375 	  "82580 1000BaseX Ethernet",
   1376 	  WM_T_82580,		WMP_F_FIBER },
   1377 
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1379 	  "82580 1000BaseT Ethernet (SERDES)",
   1380 	  WM_T_82580,		WMP_F_SERDES },
   1381 
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1383 	  "82580 gigabit Ethernet (SGMII)",
   1384 	  WM_T_82580,		WMP_F_COPPER },
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1386 	  "82580 dual-1000BaseT Ethernet",
   1387 	  WM_T_82580,		WMP_F_COPPER },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1390 	  "82580 quad-1000BaseX Ethernet",
   1391 	  WM_T_82580,		WMP_F_FIBER },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1394 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1395 	  WM_T_82580,		WMP_F_COPPER },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1398 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1399 	  WM_T_82580,		WMP_F_SERDES },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1402 	  "DH89XXCC 1000BASE-KX Ethernet",
   1403 	  WM_T_82580,		WMP_F_SERDES },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1406 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1407 	  WM_T_82580,		WMP_F_SERDES },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1410 	  "I350 Gigabit Network Connection",
   1411 	  WM_T_I350,		WMP_F_COPPER },
   1412 
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1414 	  "I350 Gigabit Fiber Network Connection",
   1415 	  WM_T_I350,		WMP_F_FIBER },
   1416 
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1418 	  "I350 Gigabit Backplane Connection",
   1419 	  WM_T_I350,		WMP_F_SERDES },
   1420 
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1422 	  "I350 Quad Port Gigabit Ethernet",
   1423 	  WM_T_I350,		WMP_F_SERDES },
   1424 
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1426 	  "I350 Gigabit Connection",
   1427 	  WM_T_I350,		WMP_F_COPPER },
   1428 
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1430 	  "I354 Gigabit Ethernet (KX)",
   1431 	  WM_T_I354,		WMP_F_SERDES },
   1432 
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1434 	  "I354 Gigabit Ethernet (SGMII)",
   1435 	  WM_T_I354,		WMP_F_COPPER },
   1436 
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1438 	  "I354 Gigabit Ethernet (2.5G)",
   1439 	  WM_T_I354,		WMP_F_COPPER },
   1440 
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1442 	  "I210-T1 Ethernet Server Adapter",
   1443 	  WM_T_I210,		WMP_F_COPPER },
   1444 
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1446 	  "I210 Ethernet (Copper OEM)",
   1447 	  WM_T_I210,		WMP_F_COPPER },
   1448 
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1450 	  "I210 Ethernet (Copper IT)",
   1451 	  WM_T_I210,		WMP_F_COPPER },
   1452 
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1454 	  "I210 Ethernet (FLASH less)",
   1455 	  WM_T_I210,		WMP_F_COPPER },
   1456 
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1458 	  "I210 Gigabit Ethernet (Fiber)",
   1459 	  WM_T_I210,		WMP_F_FIBER },
   1460 
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1462 	  "I210 Gigabit Ethernet (SERDES)",
   1463 	  WM_T_I210,		WMP_F_SERDES },
   1464 
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1466 	  "I210 Gigabit Ethernet (FLASH less)",
   1467 	  WM_T_I210,		WMP_F_SERDES },
   1468 
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1470 	  "I210 Gigabit Ethernet (SGMII)",
   1471 	  WM_T_I210,		WMP_F_COPPER },
   1472 
   1473 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1474 	  "I211 Ethernet (COPPER)",
   1475 	  WM_T_I211,		WMP_F_COPPER },
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1477 	  "I217 V Ethernet Connection",
   1478 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1480 	  "I217 LM Ethernet Connection",
   1481 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1483 	  "I218 V Ethernet Connection",
   1484 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1486 	  "I218 V Ethernet Connection",
   1487 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1488 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1489 	  "I218 V Ethernet Connection",
   1490 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1492 	  "I218 LM Ethernet Connection",
   1493 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1495 	  "I218 LM Ethernet Connection",
   1496 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1497 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1498 	  "I218 LM Ethernet Connection",
   1499 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1500 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1501 	  "I219 V Ethernet Connection",
   1502 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1504 	  "I219 V Ethernet Connection",
   1505 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1507 	  "I219 V Ethernet Connection",
   1508 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1509 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1510 	  "I219 V Ethernet Connection",
   1511 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1512 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1513 	  "I219 LM Ethernet Connection",
   1514 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1516 	  "I219 LM Ethernet Connection",
   1517 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1519 	  "I219 LM Ethernet Connection",
   1520 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1522 	  "I219 LM Ethernet Connection",
   1523 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1525 	  "I219 LM Ethernet Connection",
   1526 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1528 	  "I219 V Ethernet Connection",
   1529 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1530 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1531 	  "I219 V Ethernet Connection",
   1532 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1534 	  "I219 LM Ethernet Connection",
   1535 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1537 	  "I219 LM Ethernet Connection",
   1538 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1539 	{ 0,			0,
   1540 	  NULL,
   1541 	  0,			0 },
   1542 };
   1543 
   1544 /*
   1545  * Register read/write functions.
   1546  * Other than CSR_{READ|WRITE}().
   1547  */
   1548 
   1549 #if 0 /* Not currently used */
   1550 static inline uint32_t
   1551 wm_io_read(struct wm_softc *sc, int reg)
   1552 {
   1553 
   1554 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1555 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1556 }
   1557 #endif
   1558 
   1559 static inline void
   1560 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1561 {
   1562 
   1563 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1564 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1565 }
   1566 
   1567 static inline void
   1568 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1569     uint32_t data)
   1570 {
   1571 	uint32_t regval;
   1572 	int i;
   1573 
   1574 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1575 
   1576 	CSR_WRITE(sc, reg, regval);
   1577 
   1578 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1579 		delay(5);
   1580 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1581 			break;
   1582 	}
   1583 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1584 		aprint_error("%s: WARNING:"
   1585 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1586 		    device_xname(sc->sc_dev), reg);
   1587 	}
   1588 }
   1589 
   1590 static inline void
   1591 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1592 {
   1593 	wa->wa_low = htole32(v & 0xffffffffU);
   1594 	if (sizeof(bus_addr_t) == 8)
   1595 		wa->wa_high = htole32((uint64_t) v >> 32);
   1596 	else
   1597 		wa->wa_high = 0;
   1598 }
   1599 
   1600 /*
   1601  * Descriptor sync/init functions.
   1602  */
   1603 static inline void
   1604 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1605 {
   1606 	struct wm_softc *sc = txq->txq_sc;
   1607 
   1608 	/* If it will wrap around, sync to the end of the ring. */
   1609 	if ((start + num) > WM_NTXDESC(txq)) {
   1610 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1611 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1612 		    (WM_NTXDESC(txq) - start), ops);
   1613 		num -= (WM_NTXDESC(txq) - start);
   1614 		start = 0;
   1615 	}
   1616 
   1617 	/* Now sync whatever is left. */
   1618 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1619 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1620 }
   1621 
   1622 static inline void
   1623 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1624 {
   1625 	struct wm_softc *sc = rxq->rxq_sc;
   1626 
   1627 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1628 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1629 }
   1630 
   1631 static inline void
   1632 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1633 {
   1634 	struct wm_softc *sc = rxq->rxq_sc;
   1635 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1636 	struct mbuf *m = rxs->rxs_mbuf;
   1637 
   1638 	/*
   1639 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1640 	 * so that the payload after the Ethernet header is aligned
   1641 	 * to a 4-byte boundary.
   1642 
   1643 	 * XXX BRAINDAMAGE ALERT!
   1644 	 * The stupid chip uses the same size for every buffer, which
   1645 	 * is set in the Receive Control register.  We are using the 2K
   1646 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1647 	 * reason, we can't "scoot" packets longer than the standard
   1648 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1649 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1650 	 * the upper layer copy the headers.
   1651 	 */
   1652 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1653 
   1654 	if (sc->sc_type == WM_T_82574) {
   1655 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1656 		rxd->erx_data.erxd_addr =
   1657 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1658 		rxd->erx_data.erxd_dd = 0;
   1659 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1660 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1661 
   1662 		rxd->nqrx_data.nrxd_paddr =
   1663 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1664 		/* Currently, split header is not supported. */
   1665 		rxd->nqrx_data.nrxd_haddr = 0;
   1666 	} else {
   1667 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1668 
   1669 		wm_set_dma_addr(&rxd->wrx_addr,
   1670 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1671 		rxd->wrx_len = 0;
   1672 		rxd->wrx_cksum = 0;
   1673 		rxd->wrx_status = 0;
   1674 		rxd->wrx_errors = 0;
   1675 		rxd->wrx_special = 0;
   1676 	}
   1677 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1678 
   1679 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1680 }
   1681 
   1682 /*
   1683  * Device driver interface functions and commonly used functions.
   1684  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1685  */
   1686 
   1687 /* Lookup supported device table */
   1688 static const struct wm_product *
   1689 wm_lookup(const struct pci_attach_args *pa)
   1690 {
   1691 	const struct wm_product *wmp;
   1692 
   1693 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1694 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1695 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1696 			return wmp;
   1697 	}
   1698 	return NULL;
   1699 }
   1700 
   1701 /* The match function (ca_match) */
   1702 static int
   1703 wm_match(device_t parent, cfdata_t cf, void *aux)
   1704 {
   1705 	struct pci_attach_args *pa = aux;
   1706 
   1707 	if (wm_lookup(pa) != NULL)
   1708 		return 1;
   1709 
   1710 	return 0;
   1711 }
   1712 
   1713 /* The attach function (ca_attach) */
   1714 static void
   1715 wm_attach(device_t parent, device_t self, void *aux)
   1716 {
   1717 	struct wm_softc *sc = device_private(self);
   1718 	struct pci_attach_args *pa = aux;
   1719 	prop_dictionary_t dict;
   1720 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1721 	pci_chipset_tag_t pc = pa->pa_pc;
   1722 	int counts[PCI_INTR_TYPE_SIZE];
   1723 	pci_intr_type_t max_type;
   1724 	const char *eetype, *xname;
   1725 	bus_space_tag_t memt;
   1726 	bus_space_handle_t memh;
   1727 	bus_size_t memsize;
   1728 	int memh_valid;
   1729 	int i, error;
   1730 	const struct wm_product *wmp;
   1731 	prop_data_t ea;
   1732 	prop_number_t pn;
   1733 	uint8_t enaddr[ETHER_ADDR_LEN];
   1734 	char buf[256];
   1735 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1736 	pcireg_t preg, memtype;
   1737 	uint16_t eeprom_data, apme_mask;
   1738 	bool force_clear_smbi;
   1739 	uint32_t link_mode;
   1740 	uint32_t reg;
   1741 
   1742 	sc->sc_dev = self;
   1743 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1744 	sc->sc_core_stopping = false;
   1745 
   1746 	wmp = wm_lookup(pa);
   1747 #ifdef DIAGNOSTIC
   1748 	if (wmp == NULL) {
   1749 		printf("\n");
   1750 		panic("wm_attach: impossible");
   1751 	}
   1752 #endif
   1753 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1754 
   1755 	sc->sc_pc = pa->pa_pc;
   1756 	sc->sc_pcitag = pa->pa_tag;
   1757 
   1758 	if (pci_dma64_available(pa))
   1759 		sc->sc_dmat = pa->pa_dmat64;
   1760 	else
   1761 		sc->sc_dmat = pa->pa_dmat;
   1762 
   1763 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1764 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1765 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1766 
   1767 	sc->sc_type = wmp->wmp_type;
   1768 
   1769 	/* Set default function pointers */
   1770 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1771 	sc->phy.release = sc->nvm.release = wm_put_null;
   1772 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1773 
   1774 	if (sc->sc_type < WM_T_82543) {
   1775 		if (sc->sc_rev < 2) {
   1776 			aprint_error_dev(sc->sc_dev,
   1777 			    "i82542 must be at least rev. 2\n");
   1778 			return;
   1779 		}
   1780 		if (sc->sc_rev < 3)
   1781 			sc->sc_type = WM_T_82542_2_0;
   1782 	}
   1783 
   1784 	/*
   1785 	 * Disable MSI for Errata:
   1786 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1787 	 *
   1788 	 *  82544: Errata 25
   1789 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1790 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1791 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1792 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1793 	 *
   1794 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1795 	 *
   1796 	 *  82571 & 82572: Errata 63
   1797 	 */
   1798 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1799 	    || (sc->sc_type == WM_T_82572))
   1800 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1801 
   1802 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1803 	    || (sc->sc_type == WM_T_82580)
   1804 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1805 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1806 		sc->sc_flags |= WM_F_NEWQUEUE;
   1807 
   1808 	/* Set device properties (mactype) */
   1809 	dict = device_properties(sc->sc_dev);
   1810 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1811 
   1812 	/*
   1813 	 * Map the device.  All devices support memory-mapped acccess,
   1814 	 * and it is really required for normal operation.
   1815 	 */
   1816 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1817 	switch (memtype) {
   1818 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1819 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1820 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1821 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1822 		break;
   1823 	default:
   1824 		memh_valid = 0;
   1825 		break;
   1826 	}
   1827 
   1828 	if (memh_valid) {
   1829 		sc->sc_st = memt;
   1830 		sc->sc_sh = memh;
   1831 		sc->sc_ss = memsize;
   1832 	} else {
   1833 		aprint_error_dev(sc->sc_dev,
   1834 		    "unable to map device registers\n");
   1835 		return;
   1836 	}
   1837 
   1838 	/*
   1839 	 * In addition, i82544 and later support I/O mapped indirect
   1840 	 * register access.  It is not desirable (nor supported in
   1841 	 * this driver) to use it for normal operation, though it is
   1842 	 * required to work around bugs in some chip versions.
   1843 	 */
   1844 	if (sc->sc_type >= WM_T_82544) {
   1845 		/* First we have to find the I/O BAR. */
   1846 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1847 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1848 			if (memtype == PCI_MAPREG_TYPE_IO)
   1849 				break;
   1850 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1851 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1852 				i += 4;	/* skip high bits, too */
   1853 		}
   1854 		if (i < PCI_MAPREG_END) {
   1855 			/*
   1856 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1857 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1858 			 * It's no problem because newer chips has no this
   1859 			 * bug.
   1860 			 *
   1861 			 * The i8254x doesn't apparently respond when the
   1862 			 * I/O BAR is 0, which looks somewhat like it's not
   1863 			 * been configured.
   1864 			 */
   1865 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1866 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1867 				aprint_error_dev(sc->sc_dev,
   1868 				    "WARNING: I/O BAR at zero.\n");
   1869 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1870 					0, &sc->sc_iot, &sc->sc_ioh,
   1871 					NULL, &sc->sc_ios) == 0) {
   1872 				sc->sc_flags |= WM_F_IOH_VALID;
   1873 			} else {
   1874 				aprint_error_dev(sc->sc_dev,
   1875 				    "WARNING: unable to map I/O space\n");
   1876 			}
   1877 		}
   1878 
   1879 	}
   1880 
   1881 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1882 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1883 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1884 	if (sc->sc_type < WM_T_82542_2_1)
   1885 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1886 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1887 
   1888 	/* power up chip */
   1889 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1890 	    && error != EOPNOTSUPP) {
   1891 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1892 		return;
   1893 	}
   1894 
   1895 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1896 	/*
   1897 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1898 	 * resource.
   1899 	 */
   1900 	if (sc->sc_nqueues > 1) {
   1901 		max_type = PCI_INTR_TYPE_MSIX;
   1902 		/*
   1903 		 *  82583 has a MSI-X capability in the PCI configuration space
   1904 		 * but it doesn't support it. At least the document doesn't
   1905 		 * say anything about MSI-X.
   1906 		 */
   1907 		counts[PCI_INTR_TYPE_MSIX]
   1908 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1909 	} else {
   1910 		max_type = PCI_INTR_TYPE_MSI;
   1911 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1912 	}
   1913 
   1914 	/* Allocation settings */
   1915 	counts[PCI_INTR_TYPE_MSI] = 1;
   1916 	counts[PCI_INTR_TYPE_INTX] = 1;
   1917 	/* overridden by disable flags */
   1918 	if (wm_disable_msi != 0) {
   1919 		counts[PCI_INTR_TYPE_MSI] = 0;
   1920 		if (wm_disable_msix != 0) {
   1921 			max_type = PCI_INTR_TYPE_INTX;
   1922 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1923 		}
   1924 	} else if (wm_disable_msix != 0) {
   1925 		max_type = PCI_INTR_TYPE_MSI;
   1926 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1927 	}
   1928 
   1929 alloc_retry:
   1930 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1931 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1932 		return;
   1933 	}
   1934 
   1935 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1936 		error = wm_setup_msix(sc);
   1937 		if (error) {
   1938 			pci_intr_release(pc, sc->sc_intrs,
   1939 			    counts[PCI_INTR_TYPE_MSIX]);
   1940 
   1941 			/* Setup for MSI: Disable MSI-X */
   1942 			max_type = PCI_INTR_TYPE_MSI;
   1943 			counts[PCI_INTR_TYPE_MSI] = 1;
   1944 			counts[PCI_INTR_TYPE_INTX] = 1;
   1945 			goto alloc_retry;
   1946 		}
   1947 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1948 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1949 		error = wm_setup_legacy(sc);
   1950 		if (error) {
   1951 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1952 			    counts[PCI_INTR_TYPE_MSI]);
   1953 
   1954 			/* The next try is for INTx: Disable MSI */
   1955 			max_type = PCI_INTR_TYPE_INTX;
   1956 			counts[PCI_INTR_TYPE_INTX] = 1;
   1957 			goto alloc_retry;
   1958 		}
   1959 	} else {
   1960 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1961 		error = wm_setup_legacy(sc);
   1962 		if (error) {
   1963 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1964 			    counts[PCI_INTR_TYPE_INTX]);
   1965 			return;
   1966 		}
   1967 	}
   1968 
   1969 	/*
   1970 	 * Check the function ID (unit number of the chip).
   1971 	 */
   1972 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1973 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   1974 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1975 	    || (sc->sc_type == WM_T_82580)
   1976 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1977 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1978 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1979 	else
   1980 		sc->sc_funcid = 0;
   1981 
   1982 	/*
   1983 	 * Determine a few things about the bus we're connected to.
   1984 	 */
   1985 	if (sc->sc_type < WM_T_82543) {
   1986 		/* We don't really know the bus characteristics here. */
   1987 		sc->sc_bus_speed = 33;
   1988 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1989 		/*
   1990 		 * CSA (Communication Streaming Architecture) is about as fast
   1991 		 * a 32-bit 66MHz PCI Bus.
   1992 		 */
   1993 		sc->sc_flags |= WM_F_CSA;
   1994 		sc->sc_bus_speed = 66;
   1995 		aprint_verbose_dev(sc->sc_dev,
   1996 		    "Communication Streaming Architecture\n");
   1997 		if (sc->sc_type == WM_T_82547) {
   1998 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1999 			callout_setfunc(&sc->sc_txfifo_ch,
   2000 			    wm_82547_txfifo_stall, sc);
   2001 			aprint_verbose_dev(sc->sc_dev,
   2002 			    "using 82547 Tx FIFO stall work-around\n");
   2003 		}
   2004 	} else if (sc->sc_type >= WM_T_82571) {
   2005 		sc->sc_flags |= WM_F_PCIE;
   2006 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2007 		    && (sc->sc_type != WM_T_ICH10)
   2008 		    && (sc->sc_type != WM_T_PCH)
   2009 		    && (sc->sc_type != WM_T_PCH2)
   2010 		    && (sc->sc_type != WM_T_PCH_LPT)
   2011 		    && (sc->sc_type != WM_T_PCH_SPT)
   2012 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2013 			/* ICH* and PCH* have no PCIe capability registers */
   2014 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2015 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2016 				NULL) == 0)
   2017 				aprint_error_dev(sc->sc_dev,
   2018 				    "unable to find PCIe capability\n");
   2019 		}
   2020 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2021 	} else {
   2022 		reg = CSR_READ(sc, WMREG_STATUS);
   2023 		if (reg & STATUS_BUS64)
   2024 			sc->sc_flags |= WM_F_BUS64;
   2025 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2026 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2027 
   2028 			sc->sc_flags |= WM_F_PCIX;
   2029 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2030 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2031 				aprint_error_dev(sc->sc_dev,
   2032 				    "unable to find PCIX capability\n");
   2033 			else if (sc->sc_type != WM_T_82545_3 &&
   2034 				 sc->sc_type != WM_T_82546_3) {
   2035 				/*
   2036 				 * Work around a problem caused by the BIOS
   2037 				 * setting the max memory read byte count
   2038 				 * incorrectly.
   2039 				 */
   2040 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2041 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2042 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2043 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2044 
   2045 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2046 				    PCIX_CMD_BYTECNT_SHIFT;
   2047 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2048 				    PCIX_STATUS_MAXB_SHIFT;
   2049 				if (bytecnt > maxb) {
   2050 					aprint_verbose_dev(sc->sc_dev,
   2051 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2052 					    512 << bytecnt, 512 << maxb);
   2053 					pcix_cmd = (pcix_cmd &
   2054 					    ~PCIX_CMD_BYTECNT_MASK) |
   2055 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2056 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2057 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2058 					    pcix_cmd);
   2059 				}
   2060 			}
   2061 		}
   2062 		/*
   2063 		 * The quad port adapter is special; it has a PCIX-PCIX
   2064 		 * bridge on the board, and can run the secondary bus at
   2065 		 * a higher speed.
   2066 		 */
   2067 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2068 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2069 								      : 66;
   2070 		} else if (sc->sc_flags & WM_F_PCIX) {
   2071 			switch (reg & STATUS_PCIXSPD_MASK) {
   2072 			case STATUS_PCIXSPD_50_66:
   2073 				sc->sc_bus_speed = 66;
   2074 				break;
   2075 			case STATUS_PCIXSPD_66_100:
   2076 				sc->sc_bus_speed = 100;
   2077 				break;
   2078 			case STATUS_PCIXSPD_100_133:
   2079 				sc->sc_bus_speed = 133;
   2080 				break;
   2081 			default:
   2082 				aprint_error_dev(sc->sc_dev,
   2083 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2084 				    reg & STATUS_PCIXSPD_MASK);
   2085 				sc->sc_bus_speed = 66;
   2086 				break;
   2087 			}
   2088 		} else
   2089 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2090 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2091 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2092 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2093 	}
   2094 
   2095 	/* Disable ASPM L0s and/or L1 for workaround */
   2096 	wm_disable_aspm(sc);
   2097 
   2098 	/* clear interesting stat counters */
   2099 	CSR_READ(sc, WMREG_COLC);
   2100 	CSR_READ(sc, WMREG_RXERRC);
   2101 
   2102 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2103 	    || (sc->sc_type >= WM_T_ICH8))
   2104 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2105 	if (sc->sc_type >= WM_T_ICH8)
   2106 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2107 
   2108 	/* Set PHY, NVM mutex related stuff */
   2109 	switch (sc->sc_type) {
   2110 	case WM_T_82542_2_0:
   2111 	case WM_T_82542_2_1:
   2112 	case WM_T_82543:
   2113 	case WM_T_82544:
   2114 		/* Microwire */
   2115 		sc->nvm.read = wm_nvm_read_uwire;
   2116 		sc->sc_nvm_wordsize = 64;
   2117 		sc->sc_nvm_addrbits = 6;
   2118 		break;
   2119 	case WM_T_82540:
   2120 	case WM_T_82545:
   2121 	case WM_T_82545_3:
   2122 	case WM_T_82546:
   2123 	case WM_T_82546_3:
   2124 		/* Microwire */
   2125 		sc->nvm.read = wm_nvm_read_uwire;
   2126 		reg = CSR_READ(sc, WMREG_EECD);
   2127 		if (reg & EECD_EE_SIZE) {
   2128 			sc->sc_nvm_wordsize = 256;
   2129 			sc->sc_nvm_addrbits = 8;
   2130 		} else {
   2131 			sc->sc_nvm_wordsize = 64;
   2132 			sc->sc_nvm_addrbits = 6;
   2133 		}
   2134 		sc->sc_flags |= WM_F_LOCK_EECD;
   2135 		sc->nvm.acquire = wm_get_eecd;
   2136 		sc->nvm.release = wm_put_eecd;
   2137 		break;
   2138 	case WM_T_82541:
   2139 	case WM_T_82541_2:
   2140 	case WM_T_82547:
   2141 	case WM_T_82547_2:
   2142 		reg = CSR_READ(sc, WMREG_EECD);
   2143 		/*
   2144 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2145 		 * on 8254[17], so set flags and functios before calling it.
   2146 		 */
   2147 		sc->sc_flags |= WM_F_LOCK_EECD;
   2148 		sc->nvm.acquire = wm_get_eecd;
   2149 		sc->nvm.release = wm_put_eecd;
   2150 		if (reg & EECD_EE_TYPE) {
   2151 			/* SPI */
   2152 			sc->nvm.read = wm_nvm_read_spi;
   2153 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2154 			wm_nvm_set_addrbits_size_eecd(sc);
   2155 		} else {
   2156 			/* Microwire */
   2157 			sc->nvm.read = wm_nvm_read_uwire;
   2158 			if ((reg & EECD_EE_ABITS) != 0) {
   2159 				sc->sc_nvm_wordsize = 256;
   2160 				sc->sc_nvm_addrbits = 8;
   2161 			} else {
   2162 				sc->sc_nvm_wordsize = 64;
   2163 				sc->sc_nvm_addrbits = 6;
   2164 			}
   2165 		}
   2166 		break;
   2167 	case WM_T_82571:
   2168 	case WM_T_82572:
   2169 		/* SPI */
   2170 		sc->nvm.read = wm_nvm_read_eerd;
   2171 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2172 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2173 		wm_nvm_set_addrbits_size_eecd(sc);
   2174 		sc->phy.acquire = wm_get_swsm_semaphore;
   2175 		sc->phy.release = wm_put_swsm_semaphore;
   2176 		sc->nvm.acquire = wm_get_nvm_82571;
   2177 		sc->nvm.release = wm_put_nvm_82571;
   2178 		break;
   2179 	case WM_T_82573:
   2180 	case WM_T_82574:
   2181 	case WM_T_82583:
   2182 		sc->nvm.read = wm_nvm_read_eerd;
   2183 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2184 		if (sc->sc_type == WM_T_82573) {
   2185 			sc->phy.acquire = wm_get_swsm_semaphore;
   2186 			sc->phy.release = wm_put_swsm_semaphore;
   2187 			sc->nvm.acquire = wm_get_nvm_82571;
   2188 			sc->nvm.release = wm_put_nvm_82571;
   2189 		} else {
   2190 			/* Both PHY and NVM use the same semaphore. */
   2191 			sc->phy.acquire = sc->nvm.acquire
   2192 			    = wm_get_swfwhw_semaphore;
   2193 			sc->phy.release = sc->nvm.release
   2194 			    = wm_put_swfwhw_semaphore;
   2195 		}
   2196 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2197 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2198 			sc->sc_nvm_wordsize = 2048;
   2199 		} else {
   2200 			/* SPI */
   2201 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2202 			wm_nvm_set_addrbits_size_eecd(sc);
   2203 		}
   2204 		break;
   2205 	case WM_T_82575:
   2206 	case WM_T_82576:
   2207 	case WM_T_82580:
   2208 	case WM_T_I350:
   2209 	case WM_T_I354:
   2210 	case WM_T_80003:
   2211 		/* SPI */
   2212 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2213 		wm_nvm_set_addrbits_size_eecd(sc);
   2214 		if ((sc->sc_type == WM_T_80003)
   2215 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2216 			sc->nvm.read = wm_nvm_read_eerd;
   2217 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2218 		} else {
   2219 			sc->nvm.read = wm_nvm_read_spi;
   2220 			sc->sc_flags |= WM_F_LOCK_EECD;
   2221 		}
   2222 		sc->phy.acquire = wm_get_phy_82575;
   2223 		sc->phy.release = wm_put_phy_82575;
   2224 		sc->nvm.acquire = wm_get_nvm_80003;
   2225 		sc->nvm.release = wm_put_nvm_80003;
   2226 		break;
   2227 	case WM_T_ICH8:
   2228 	case WM_T_ICH9:
   2229 	case WM_T_ICH10:
   2230 	case WM_T_PCH:
   2231 	case WM_T_PCH2:
   2232 	case WM_T_PCH_LPT:
   2233 		sc->nvm.read = wm_nvm_read_ich8;
   2234 		/* FLASH */
   2235 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2236 		sc->sc_nvm_wordsize = 2048;
   2237 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2238 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2239 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2240 			aprint_error_dev(sc->sc_dev,
   2241 			    "can't map FLASH registers\n");
   2242 			goto out;
   2243 		}
   2244 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2245 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2246 		    ICH_FLASH_SECTOR_SIZE;
   2247 		sc->sc_ich8_flash_bank_size =
   2248 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2249 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2250 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2251 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2252 		sc->sc_flashreg_offset = 0;
   2253 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2254 		sc->phy.release = wm_put_swflag_ich8lan;
   2255 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2256 		sc->nvm.release = wm_put_nvm_ich8lan;
   2257 		break;
   2258 	case WM_T_PCH_SPT:
   2259 	case WM_T_PCH_CNP:
   2260 		sc->nvm.read = wm_nvm_read_spt;
   2261 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2262 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2263 		sc->sc_flasht = sc->sc_st;
   2264 		sc->sc_flashh = sc->sc_sh;
   2265 		sc->sc_ich8_flash_base = 0;
   2266 		sc->sc_nvm_wordsize =
   2267 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2268 		    * NVM_SIZE_MULTIPLIER;
   2269 		/* It is size in bytes, we want words */
   2270 		sc->sc_nvm_wordsize /= 2;
   2271 		/* assume 2 banks */
   2272 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2273 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2274 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2275 		sc->phy.release = wm_put_swflag_ich8lan;
   2276 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2277 		sc->nvm.release = wm_put_nvm_ich8lan;
   2278 		break;
   2279 	case WM_T_I210:
   2280 	case WM_T_I211:
   2281 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2282 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2283 		if (wm_nvm_flash_presence_i210(sc)) {
   2284 			sc->nvm.read = wm_nvm_read_eerd;
   2285 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2286 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2287 			wm_nvm_set_addrbits_size_eecd(sc);
   2288 		} else {
   2289 			sc->nvm.read = wm_nvm_read_invm;
   2290 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2291 			sc->sc_nvm_wordsize = INVM_SIZE;
   2292 		}
   2293 		sc->phy.acquire = wm_get_phy_82575;
   2294 		sc->phy.release = wm_put_phy_82575;
   2295 		sc->nvm.acquire = wm_get_nvm_80003;
   2296 		sc->nvm.release = wm_put_nvm_80003;
   2297 		break;
   2298 	default:
   2299 		break;
   2300 	}
   2301 
   2302 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2303 	switch (sc->sc_type) {
   2304 	case WM_T_82571:
   2305 	case WM_T_82572:
   2306 		reg = CSR_READ(sc, WMREG_SWSM2);
   2307 		if ((reg & SWSM2_LOCK) == 0) {
   2308 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2309 			force_clear_smbi = true;
   2310 		} else
   2311 			force_clear_smbi = false;
   2312 		break;
   2313 	case WM_T_82573:
   2314 	case WM_T_82574:
   2315 	case WM_T_82583:
   2316 		force_clear_smbi = true;
   2317 		break;
   2318 	default:
   2319 		force_clear_smbi = false;
   2320 		break;
   2321 	}
   2322 	if (force_clear_smbi) {
   2323 		reg = CSR_READ(sc, WMREG_SWSM);
   2324 		if ((reg & SWSM_SMBI) != 0)
   2325 			aprint_error_dev(sc->sc_dev,
   2326 			    "Please update the Bootagent\n");
   2327 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2328 	}
   2329 
   2330 	/*
   2331 	 * Defer printing the EEPROM type until after verifying the checksum
   2332 	 * This allows the EEPROM type to be printed correctly in the case
   2333 	 * that no EEPROM is attached.
   2334 	 */
   2335 	/*
   2336 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2337 	 * this for later, so we can fail future reads from the EEPROM.
   2338 	 */
   2339 	if (wm_nvm_validate_checksum(sc)) {
   2340 		/*
   2341 		 * Read twice again because some PCI-e parts fail the
   2342 		 * first check due to the link being in sleep state.
   2343 		 */
   2344 		if (wm_nvm_validate_checksum(sc))
   2345 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2346 	}
   2347 
   2348 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2349 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2350 	else {
   2351 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2352 		    sc->sc_nvm_wordsize);
   2353 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2354 			aprint_verbose("iNVM");
   2355 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2356 			aprint_verbose("FLASH(HW)");
   2357 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2358 			aprint_verbose("FLASH");
   2359 		else {
   2360 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2361 				eetype = "SPI";
   2362 			else
   2363 				eetype = "MicroWire";
   2364 			aprint_verbose("(%d address bits) %s EEPROM",
   2365 			    sc->sc_nvm_addrbits, eetype);
   2366 		}
   2367 	}
   2368 	wm_nvm_version(sc);
   2369 	aprint_verbose("\n");
   2370 
   2371 	/*
   2372 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2373 	 * incorrect.
   2374 	 */
   2375 	wm_gmii_setup_phytype(sc, 0, 0);
   2376 
   2377 	/* Reset the chip to a known state. */
   2378 	wm_reset(sc);
   2379 
   2380 	/*
   2381 	 * Check for I21[01] PLL workaround.
   2382 	 *
   2383 	 * Three cases:
   2384 	 * a) Chip is I211.
   2385 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2386 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2387 	 */
   2388 	if (sc->sc_type == WM_T_I211)
   2389 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2390 	if (sc->sc_type == WM_T_I210) {
   2391 		if (!wm_nvm_flash_presence_i210(sc))
   2392 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2393 		else if ((sc->sc_nvm_ver_major < 3)
   2394 		    || ((sc->sc_nvm_ver_major == 3)
   2395 			&& (sc->sc_nvm_ver_minor < 25))) {
   2396 			aprint_verbose_dev(sc->sc_dev,
   2397 			    "ROM image version %d.%d is older than 3.25\n",
   2398 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2399 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2400 		}
   2401 	}
   2402 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2403 		wm_pll_workaround_i210(sc);
   2404 
   2405 	wm_get_wakeup(sc);
   2406 
   2407 	/* Non-AMT based hardware can now take control from firmware */
   2408 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2409 		wm_get_hw_control(sc);
   2410 
   2411 	/*
   2412 	 * Read the Ethernet address from the EEPROM, if not first found
   2413 	 * in device properties.
   2414 	 */
   2415 	ea = prop_dictionary_get(dict, "mac-address");
   2416 	if (ea != NULL) {
   2417 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2418 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2419 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2420 	} else {
   2421 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2422 			aprint_error_dev(sc->sc_dev,
   2423 			    "unable to read Ethernet address\n");
   2424 			goto out;
   2425 		}
   2426 	}
   2427 
   2428 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2429 	    ether_sprintf(enaddr));
   2430 
   2431 	/*
   2432 	 * Read the config info from the EEPROM, and set up various
   2433 	 * bits in the control registers based on their contents.
   2434 	 */
   2435 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2436 	if (pn != NULL) {
   2437 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2438 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2439 	} else {
   2440 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2441 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2442 			goto out;
   2443 		}
   2444 	}
   2445 
   2446 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2447 	if (pn != NULL) {
   2448 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2449 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2450 	} else {
   2451 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2452 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2453 			goto out;
   2454 		}
   2455 	}
   2456 
   2457 	/* check for WM_F_WOL */
   2458 	switch (sc->sc_type) {
   2459 	case WM_T_82542_2_0:
   2460 	case WM_T_82542_2_1:
   2461 	case WM_T_82543:
   2462 		/* dummy? */
   2463 		eeprom_data = 0;
   2464 		apme_mask = NVM_CFG3_APME;
   2465 		break;
   2466 	case WM_T_82544:
   2467 		apme_mask = NVM_CFG2_82544_APM_EN;
   2468 		eeprom_data = cfg2;
   2469 		break;
   2470 	case WM_T_82546:
   2471 	case WM_T_82546_3:
   2472 	case WM_T_82571:
   2473 	case WM_T_82572:
   2474 	case WM_T_82573:
   2475 	case WM_T_82574:
   2476 	case WM_T_82583:
   2477 	case WM_T_80003:
   2478 	default:
   2479 		apme_mask = NVM_CFG3_APME;
   2480 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2481 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2482 		break;
   2483 	case WM_T_82575:
   2484 	case WM_T_82576:
   2485 	case WM_T_82580:
   2486 	case WM_T_I350:
   2487 	case WM_T_I354: /* XXX ok? */
   2488 	case WM_T_ICH8:
   2489 	case WM_T_ICH9:
   2490 	case WM_T_ICH10:
   2491 	case WM_T_PCH:
   2492 	case WM_T_PCH2:
   2493 	case WM_T_PCH_LPT:
   2494 	case WM_T_PCH_SPT:
   2495 	case WM_T_PCH_CNP:
   2496 		/* XXX The funcid should be checked on some devices */
   2497 		apme_mask = WUC_APME;
   2498 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2499 		break;
   2500 	}
   2501 
   2502 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2503 	if ((eeprom_data & apme_mask) != 0)
   2504 		sc->sc_flags |= WM_F_WOL;
   2505 
   2506 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2507 		/* Check NVM for autonegotiation */
   2508 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2509 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2510 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2511 		}
   2512 	}
   2513 
   2514 	/*
   2515 	 * XXX need special handling for some multiple port cards
   2516 	 * to disable a paticular port.
   2517 	 */
   2518 
   2519 	if (sc->sc_type >= WM_T_82544) {
   2520 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2521 		if (pn != NULL) {
   2522 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2523 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2524 		} else {
   2525 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2526 				aprint_error_dev(sc->sc_dev,
   2527 				    "unable to read SWDPIN\n");
   2528 				goto out;
   2529 			}
   2530 		}
   2531 	}
   2532 
   2533 	if (cfg1 & NVM_CFG1_ILOS)
   2534 		sc->sc_ctrl |= CTRL_ILOS;
   2535 
   2536 	/*
   2537 	 * XXX
   2538 	 * This code isn't correct because pin 2 and 3 are located
   2539 	 * in different position on newer chips. Check all datasheet.
   2540 	 *
   2541 	 * Until resolve this problem, check if a chip < 82580
   2542 	 */
   2543 	if (sc->sc_type <= WM_T_82580) {
   2544 		if (sc->sc_type >= WM_T_82544) {
   2545 			sc->sc_ctrl |=
   2546 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2547 			    CTRL_SWDPIO_SHIFT;
   2548 			sc->sc_ctrl |=
   2549 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2550 			    CTRL_SWDPINS_SHIFT;
   2551 		} else {
   2552 			sc->sc_ctrl |=
   2553 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2554 			    CTRL_SWDPIO_SHIFT;
   2555 		}
   2556 	}
   2557 
   2558 	/* XXX For other than 82580? */
   2559 	if (sc->sc_type == WM_T_82580) {
   2560 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2561 		if (nvmword & __BIT(13))
   2562 			sc->sc_ctrl |= CTRL_ILOS;
   2563 	}
   2564 
   2565 #if 0
   2566 	if (sc->sc_type >= WM_T_82544) {
   2567 		if (cfg1 & NVM_CFG1_IPS0)
   2568 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2569 		if (cfg1 & NVM_CFG1_IPS1)
   2570 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2571 		sc->sc_ctrl_ext |=
   2572 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2573 		    CTRL_EXT_SWDPIO_SHIFT;
   2574 		sc->sc_ctrl_ext |=
   2575 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2576 		    CTRL_EXT_SWDPINS_SHIFT;
   2577 	} else {
   2578 		sc->sc_ctrl_ext |=
   2579 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2580 		    CTRL_EXT_SWDPIO_SHIFT;
   2581 	}
   2582 #endif
   2583 
   2584 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2585 #if 0
   2586 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2587 #endif
   2588 
   2589 	if (sc->sc_type == WM_T_PCH) {
   2590 		uint16_t val;
   2591 
   2592 		/* Save the NVM K1 bit setting */
   2593 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2594 
   2595 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2596 			sc->sc_nvm_k1_enabled = 1;
   2597 		else
   2598 			sc->sc_nvm_k1_enabled = 0;
   2599 	}
   2600 
   2601 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2602 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2603 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2604 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2605 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2606 	    || sc->sc_type == WM_T_82573
   2607 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2608 		/* Copper only */
   2609 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2610 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2611 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2612 	    || (sc->sc_type ==WM_T_I211)) {
   2613 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2614 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2615 		switch (link_mode) {
   2616 		case CTRL_EXT_LINK_MODE_1000KX:
   2617 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2618 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2619 			break;
   2620 		case CTRL_EXT_LINK_MODE_SGMII:
   2621 			if (wm_sgmii_uses_mdio(sc)) {
   2622 				aprint_verbose_dev(sc->sc_dev,
   2623 				    "SGMII(MDIO)\n");
   2624 				sc->sc_flags |= WM_F_SGMII;
   2625 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2626 				break;
   2627 			}
   2628 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2629 			/*FALLTHROUGH*/
   2630 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2631 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2632 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2633 				if (link_mode
   2634 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2635 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2636 					sc->sc_flags |= WM_F_SGMII;
   2637 				} else {
   2638 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2639 					aprint_verbose_dev(sc->sc_dev,
   2640 					    "SERDES\n");
   2641 				}
   2642 				break;
   2643 			}
   2644 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2645 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2646 
   2647 			/* Change current link mode setting */
   2648 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2649 			switch (sc->sc_mediatype) {
   2650 			case WM_MEDIATYPE_COPPER:
   2651 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2652 				break;
   2653 			case WM_MEDIATYPE_SERDES:
   2654 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2655 				break;
   2656 			default:
   2657 				break;
   2658 			}
   2659 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2660 			break;
   2661 		case CTRL_EXT_LINK_MODE_GMII:
   2662 		default:
   2663 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2664 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2665 			break;
   2666 		}
   2667 
   2668 		reg &= ~CTRL_EXT_I2C_ENA;
   2669 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2670 			reg |= CTRL_EXT_I2C_ENA;
   2671 		else
   2672 			reg &= ~CTRL_EXT_I2C_ENA;
   2673 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2674 	} else if (sc->sc_type < WM_T_82543 ||
   2675 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2676 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2677 			aprint_error_dev(sc->sc_dev,
   2678 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2679 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2680 		}
   2681 	} else {
   2682 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2683 			aprint_error_dev(sc->sc_dev,
   2684 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2685 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2686 		}
   2687 	}
   2688 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2689 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2690 
   2691 	/* Set device properties (macflags) */
   2692 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2693 
   2694 	/* Initialize the media structures accordingly. */
   2695 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2696 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2697 	else
   2698 		wm_tbi_mediainit(sc); /* All others */
   2699 
   2700 	ifp = &sc->sc_ethercom.ec_if;
   2701 	xname = device_xname(sc->sc_dev);
   2702 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2703 	ifp->if_softc = sc;
   2704 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2705 #ifdef WM_MPSAFE
   2706 	ifp->if_extflags = IFEF_MPSAFE;
   2707 #endif
   2708 	ifp->if_ioctl = wm_ioctl;
   2709 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2710 		ifp->if_start = wm_nq_start;
   2711 		/*
   2712 		 * When the number of CPUs is one and the controller can use
   2713 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2714 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2715 		 * and the other is used for link status changing.
   2716 		 * In this situation, wm_nq_transmit() is disadvantageous
   2717 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2718 		 */
   2719 		if (wm_is_using_multiqueue(sc))
   2720 			ifp->if_transmit = wm_nq_transmit;
   2721 	} else {
   2722 		ifp->if_start = wm_start;
   2723 		/*
   2724 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2725 		 */
   2726 		if (wm_is_using_multiqueue(sc))
   2727 			ifp->if_transmit = wm_transmit;
   2728 	}
   2729 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2730 	ifp->if_init = wm_init;
   2731 	ifp->if_stop = wm_stop;
   2732 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2733 	IFQ_SET_READY(&ifp->if_snd);
   2734 
   2735 	/* Check for jumbo frame */
   2736 	switch (sc->sc_type) {
   2737 	case WM_T_82573:
   2738 		/* XXX limited to 9234 if ASPM is disabled */
   2739 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2740 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2741 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2742 		break;
   2743 	case WM_T_82571:
   2744 	case WM_T_82572:
   2745 	case WM_T_82574:
   2746 	case WM_T_82583:
   2747 	case WM_T_82575:
   2748 	case WM_T_82576:
   2749 	case WM_T_82580:
   2750 	case WM_T_I350:
   2751 	case WM_T_I354:
   2752 	case WM_T_I210:
   2753 	case WM_T_I211:
   2754 	case WM_T_80003:
   2755 	case WM_T_ICH9:
   2756 	case WM_T_ICH10:
   2757 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2758 	case WM_T_PCH_LPT:
   2759 	case WM_T_PCH_SPT:
   2760 	case WM_T_PCH_CNP:
   2761 		/* XXX limited to 9234 */
   2762 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2763 		break;
   2764 	case WM_T_PCH:
   2765 		/* XXX limited to 4096 */
   2766 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2767 		break;
   2768 	case WM_T_82542_2_0:
   2769 	case WM_T_82542_2_1:
   2770 	case WM_T_ICH8:
   2771 		/* No support for jumbo frame */
   2772 		break;
   2773 	default:
   2774 		/* ETHER_MAX_LEN_JUMBO */
   2775 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2776 		break;
   2777 	}
   2778 
   2779 	/* If we're a i82543 or greater, we can support VLANs. */
   2780 	if (sc->sc_type >= WM_T_82543)
   2781 		sc->sc_ethercom.ec_capabilities |=
   2782 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2783 
   2784 	/*
   2785 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2786 	 * on i82543 and later.
   2787 	 */
   2788 	if (sc->sc_type >= WM_T_82543) {
   2789 		ifp->if_capabilities |=
   2790 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2791 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2792 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2793 		    IFCAP_CSUM_TCPv6_Tx |
   2794 		    IFCAP_CSUM_UDPv6_Tx;
   2795 	}
   2796 
   2797 	/*
   2798 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2799 	 *
   2800 	 *	82541GI (8086:1076) ... no
   2801 	 *	82572EI (8086:10b9) ... yes
   2802 	 */
   2803 	if (sc->sc_type >= WM_T_82571) {
   2804 		ifp->if_capabilities |=
   2805 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2806 	}
   2807 
   2808 	/*
   2809 	 * If we're a i82544 or greater (except i82547), we can do
   2810 	 * TCP segmentation offload.
   2811 	 */
   2812 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2813 		ifp->if_capabilities |= IFCAP_TSOv4;
   2814 	}
   2815 
   2816 	if (sc->sc_type >= WM_T_82571) {
   2817 		ifp->if_capabilities |= IFCAP_TSOv6;
   2818 	}
   2819 
   2820 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2821 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2822 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2823 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2824 
   2825 #ifdef WM_MPSAFE
   2826 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2827 #else
   2828 	sc->sc_core_lock = NULL;
   2829 #endif
   2830 
   2831 	/* Attach the interface. */
   2832 	error = if_initialize(ifp);
   2833 	if (error != 0) {
   2834 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2835 		    error);
   2836 		return; /* Error */
   2837 	}
   2838 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2839 	ether_ifattach(ifp, enaddr);
   2840 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2841 	if_register(ifp);
   2842 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2843 	    RND_FLAG_DEFAULT);
   2844 
   2845 #ifdef WM_EVENT_COUNTERS
   2846 	/* Attach event counters. */
   2847 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2848 	    NULL, xname, "linkintr");
   2849 
   2850 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2851 	    NULL, xname, "tx_xoff");
   2852 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2853 	    NULL, xname, "tx_xon");
   2854 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2855 	    NULL, xname, "rx_xoff");
   2856 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2857 	    NULL, xname, "rx_xon");
   2858 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2859 	    NULL, xname, "rx_macctl");
   2860 #endif /* WM_EVENT_COUNTERS */
   2861 
   2862 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2863 		pmf_class_network_register(self, ifp);
   2864 	else
   2865 		aprint_error_dev(self, "couldn't establish power handler\n");
   2866 
   2867 	sc->sc_flags |= WM_F_ATTACHED;
   2868  out:
   2869 	return;
   2870 }
   2871 
   2872 /* The detach function (ca_detach) */
   2873 static int
   2874 wm_detach(device_t self, int flags __unused)
   2875 {
   2876 	struct wm_softc *sc = device_private(self);
   2877 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2878 	int i;
   2879 
   2880 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2881 		return 0;
   2882 
   2883 	/* Stop the interface. Callouts are stopped in it. */
   2884 	wm_stop(ifp, 1);
   2885 
   2886 	pmf_device_deregister(self);
   2887 
   2888 #ifdef WM_EVENT_COUNTERS
   2889 	evcnt_detach(&sc->sc_ev_linkintr);
   2890 
   2891 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2892 	evcnt_detach(&sc->sc_ev_tx_xon);
   2893 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2894 	evcnt_detach(&sc->sc_ev_rx_xon);
   2895 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2896 #endif /* WM_EVENT_COUNTERS */
   2897 
   2898 	/* Tell the firmware about the release */
   2899 	WM_CORE_LOCK(sc);
   2900 	wm_release_manageability(sc);
   2901 	wm_release_hw_control(sc);
   2902 	wm_enable_wakeup(sc);
   2903 	WM_CORE_UNLOCK(sc);
   2904 
   2905 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2906 
   2907 	/* Delete all remaining media. */
   2908 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2909 
   2910 	ether_ifdetach(ifp);
   2911 	if_detach(ifp);
   2912 	if_percpuq_destroy(sc->sc_ipq);
   2913 
   2914 	/* Unload RX dmamaps and free mbufs */
   2915 	for (i = 0; i < sc->sc_nqueues; i++) {
   2916 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2917 		mutex_enter(rxq->rxq_lock);
   2918 		wm_rxdrain(rxq);
   2919 		mutex_exit(rxq->rxq_lock);
   2920 	}
   2921 	/* Must unlock here */
   2922 
   2923 	/* Disestablish the interrupt handler */
   2924 	for (i = 0; i < sc->sc_nintrs; i++) {
   2925 		if (sc->sc_ihs[i] != NULL) {
   2926 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2927 			sc->sc_ihs[i] = NULL;
   2928 		}
   2929 	}
   2930 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2931 
   2932 	wm_free_txrx_queues(sc);
   2933 
   2934 	/* Unmap the registers */
   2935 	if (sc->sc_ss) {
   2936 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2937 		sc->sc_ss = 0;
   2938 	}
   2939 	if (sc->sc_ios) {
   2940 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2941 		sc->sc_ios = 0;
   2942 	}
   2943 	if (sc->sc_flashs) {
   2944 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2945 		sc->sc_flashs = 0;
   2946 	}
   2947 
   2948 	if (sc->sc_core_lock)
   2949 		mutex_obj_free(sc->sc_core_lock);
   2950 	if (sc->sc_ich_phymtx)
   2951 		mutex_obj_free(sc->sc_ich_phymtx);
   2952 	if (sc->sc_ich_nvmmtx)
   2953 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2954 
   2955 	return 0;
   2956 }
   2957 
   2958 static bool
   2959 wm_suspend(device_t self, const pmf_qual_t *qual)
   2960 {
   2961 	struct wm_softc *sc = device_private(self);
   2962 
   2963 	wm_release_manageability(sc);
   2964 	wm_release_hw_control(sc);
   2965 	wm_enable_wakeup(sc);
   2966 
   2967 	return true;
   2968 }
   2969 
   2970 static bool
   2971 wm_resume(device_t self, const pmf_qual_t *qual)
   2972 {
   2973 	struct wm_softc *sc = device_private(self);
   2974 
   2975 	/* Disable ASPM L0s and/or L1 for workaround */
   2976 	wm_disable_aspm(sc);
   2977 	wm_init_manageability(sc);
   2978 
   2979 	return true;
   2980 }
   2981 
   2982 /*
   2983  * wm_watchdog:		[ifnet interface function]
   2984  *
   2985  *	Watchdog timer handler.
   2986  */
   2987 static void
   2988 wm_watchdog(struct ifnet *ifp)
   2989 {
   2990 	int qid;
   2991 	struct wm_softc *sc = ifp->if_softc;
   2992 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   2993 
   2994 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2995 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2996 
   2997 		wm_watchdog_txq(ifp, txq, &hang_queue);
   2998 	}
   2999 
   3000 	/*
   3001 	 * IF any of queues hanged up, reset the interface.
   3002 	 */
   3003 	if (hang_queue != 0) {
   3004 		(void) wm_init(ifp);
   3005 
   3006 		/*
   3007 		 * There are still some upper layer processing which call
   3008 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3009 		 */
   3010 		/* Try to get more packets going. */
   3011 		ifp->if_start(ifp);
   3012 	}
   3013 }
   3014 
   3015 
   3016 static void
   3017 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3018 {
   3019 
   3020 	mutex_enter(txq->txq_lock);
   3021 	if (txq->txq_sending &&
   3022 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
   3023 		wm_watchdog_txq_locked(ifp, txq, hang);
   3024 	}
   3025 	mutex_exit(txq->txq_lock);
   3026 }
   3027 
   3028 static void
   3029 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3030     uint16_t *hang)
   3031 {
   3032 	struct wm_softc *sc = ifp->if_softc;
   3033 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3034 
   3035 	KASSERT(mutex_owned(txq->txq_lock));
   3036 
   3037 	/*
   3038 	 * Since we're using delayed interrupts, sweep up
   3039 	 * before we report an error.
   3040 	 */
   3041 	wm_txeof(txq, UINT_MAX);
   3042 
   3043 	if (txq->txq_sending)
   3044 		*hang |= __BIT(wmq->wmq_id);
   3045 
   3046 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3047 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3048 		    device_xname(sc->sc_dev));
   3049 	} else {
   3050 #ifdef WM_DEBUG
   3051 		int i, j;
   3052 		struct wm_txsoft *txs;
   3053 #endif
   3054 		log(LOG_ERR,
   3055 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3056 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3057 		    txq->txq_next);
   3058 		ifp->if_oerrors++;
   3059 #ifdef WM_DEBUG
   3060 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3061 		    i = WM_NEXTTXS(txq, i)) {
   3062 		    txs = &txq->txq_soft[i];
   3063 		    printf("txs %d tx %d -> %d\n",
   3064 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3065 		    for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3066 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3067 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3068 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3069 				    printf("\t %#08x%08x\n",
   3070 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3071 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3072 			    } else {
   3073 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3074 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3075 					txq->txq_descs[j].wtx_addr.wa_low);
   3076 				    printf("\t %#04x%02x%02x%08x\n",
   3077 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3078 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3079 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3080 					txq->txq_descs[j].wtx_cmdlen);
   3081 			    }
   3082 			if (j == txs->txs_lastdesc)
   3083 				break;
   3084 			}
   3085 		}
   3086 #endif
   3087 	}
   3088 }
   3089 
   3090 /*
   3091  * wm_tick:
   3092  *
   3093  *	One second timer, used to check link status, sweep up
   3094  *	completed transmit jobs, etc.
   3095  */
   3096 static void
   3097 wm_tick(void *arg)
   3098 {
   3099 	struct wm_softc *sc = arg;
   3100 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3101 #ifndef WM_MPSAFE
   3102 	int s = splnet();
   3103 #endif
   3104 
   3105 	WM_CORE_LOCK(sc);
   3106 
   3107 	if (sc->sc_core_stopping) {
   3108 		WM_CORE_UNLOCK(sc);
   3109 #ifndef WM_MPSAFE
   3110 		splx(s);
   3111 #endif
   3112 		return;
   3113 	}
   3114 
   3115 	if (sc->sc_type >= WM_T_82542_2_1) {
   3116 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3117 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3118 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3119 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3120 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3121 	}
   3122 
   3123 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3124 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3125 	    + CSR_READ(sc, WMREG_CRCERRS)
   3126 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3127 	    + CSR_READ(sc, WMREG_SYMERRC)
   3128 	    + CSR_READ(sc, WMREG_RXERRC)
   3129 	    + CSR_READ(sc, WMREG_SEC)
   3130 	    + CSR_READ(sc, WMREG_CEXTERR)
   3131 	    + CSR_READ(sc, WMREG_RLEC);
   3132 	/*
   3133 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3134 	 * memory. It does not mean the number of dropped packet. Because
   3135 	 * ethernet controller can receive packets in such case if there is
   3136 	 * space in phy's FIFO.
   3137 	 *
   3138 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3139 	 * own EVCNT instead of if_iqdrops.
   3140 	 */
   3141 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3142 
   3143 	if (sc->sc_flags & WM_F_HAS_MII)
   3144 		mii_tick(&sc->sc_mii);
   3145 	else if ((sc->sc_type >= WM_T_82575)
   3146 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3147 		wm_serdes_tick(sc);
   3148 	else
   3149 		wm_tbi_tick(sc);
   3150 
   3151 	WM_CORE_UNLOCK(sc);
   3152 
   3153 	wm_watchdog(ifp);
   3154 
   3155 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3156 }
   3157 
   3158 static int
   3159 wm_ifflags_cb(struct ethercom *ec)
   3160 {
   3161 	struct ifnet *ifp = &ec->ec_if;
   3162 	struct wm_softc *sc = ifp->if_softc;
   3163 	int rc = 0;
   3164 
   3165 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3166 		device_xname(sc->sc_dev), __func__));
   3167 
   3168 	WM_CORE_LOCK(sc);
   3169 
   3170 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3171 	sc->sc_if_flags = ifp->if_flags;
   3172 
   3173 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3174 		rc = ENETRESET;
   3175 		goto out;
   3176 	}
   3177 
   3178 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3179 		wm_set_filter(sc);
   3180 
   3181 	wm_set_vlan(sc);
   3182 
   3183 out:
   3184 	WM_CORE_UNLOCK(sc);
   3185 
   3186 	return rc;
   3187 }
   3188 
   3189 /*
   3190  * wm_ioctl:		[ifnet interface function]
   3191  *
   3192  *	Handle control requests from the operator.
   3193  */
   3194 static int
   3195 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3196 {
   3197 	struct wm_softc *sc = ifp->if_softc;
   3198 	struct ifreq *ifr = (struct ifreq *) data;
   3199 	struct ifaddr *ifa = (struct ifaddr *)data;
   3200 	struct sockaddr_dl *sdl;
   3201 	int s, error;
   3202 
   3203 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3204 		device_xname(sc->sc_dev), __func__));
   3205 
   3206 #ifndef WM_MPSAFE
   3207 	s = splnet();
   3208 #endif
   3209 	switch (cmd) {
   3210 	case SIOCSIFMEDIA:
   3211 	case SIOCGIFMEDIA:
   3212 		WM_CORE_LOCK(sc);
   3213 		/* Flow control requires full-duplex mode. */
   3214 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3215 		    (ifr->ifr_media & IFM_FDX) == 0)
   3216 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3217 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3218 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3219 				/* We can do both TXPAUSE and RXPAUSE. */
   3220 				ifr->ifr_media |=
   3221 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3222 			}
   3223 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3224 		}
   3225 		WM_CORE_UNLOCK(sc);
   3226 #ifdef WM_MPSAFE
   3227 		s = splnet();
   3228 #endif
   3229 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3230 #ifdef WM_MPSAFE
   3231 		splx(s);
   3232 #endif
   3233 		break;
   3234 	case SIOCINITIFADDR:
   3235 		WM_CORE_LOCK(sc);
   3236 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3237 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3238 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3239 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3240 			/* unicast address is first multicast entry */
   3241 			wm_set_filter(sc);
   3242 			error = 0;
   3243 			WM_CORE_UNLOCK(sc);
   3244 			break;
   3245 		}
   3246 		WM_CORE_UNLOCK(sc);
   3247 		/*FALLTHROUGH*/
   3248 	default:
   3249 #ifdef WM_MPSAFE
   3250 		s = splnet();
   3251 #endif
   3252 		/* It may call wm_start, so unlock here */
   3253 		error = ether_ioctl(ifp, cmd, data);
   3254 #ifdef WM_MPSAFE
   3255 		splx(s);
   3256 #endif
   3257 		if (error != ENETRESET)
   3258 			break;
   3259 
   3260 		error = 0;
   3261 
   3262 		if (cmd == SIOCSIFCAP) {
   3263 			error = (*ifp->if_init)(ifp);
   3264 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3265 			;
   3266 		else if (ifp->if_flags & IFF_RUNNING) {
   3267 			/*
   3268 			 * Multicast list has changed; set the hardware filter
   3269 			 * accordingly.
   3270 			 */
   3271 			WM_CORE_LOCK(sc);
   3272 			wm_set_filter(sc);
   3273 			WM_CORE_UNLOCK(sc);
   3274 		}
   3275 		break;
   3276 	}
   3277 
   3278 #ifndef WM_MPSAFE
   3279 	splx(s);
   3280 #endif
   3281 	return error;
   3282 }
   3283 
   3284 /* MAC address related */
   3285 
   3286 /*
   3287  * Get the offset of MAC address and return it.
   3288  * If error occured, use offset 0.
   3289  */
   3290 static uint16_t
   3291 wm_check_alt_mac_addr(struct wm_softc *sc)
   3292 {
   3293 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3294 	uint16_t offset = NVM_OFF_MACADDR;
   3295 
   3296 	/* Try to read alternative MAC address pointer */
   3297 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3298 		return 0;
   3299 
   3300 	/* Check pointer if it's valid or not. */
   3301 	if ((offset == 0x0000) || (offset == 0xffff))
   3302 		return 0;
   3303 
   3304 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3305 	/*
   3306 	 * Check whether alternative MAC address is valid or not.
   3307 	 * Some cards have non 0xffff pointer but those don't use
   3308 	 * alternative MAC address in reality.
   3309 	 *
   3310 	 * Check whether the broadcast bit is set or not.
   3311 	 */
   3312 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3313 		if (((myea[0] & 0xff) & 0x01) == 0)
   3314 			return offset; /* Found */
   3315 
   3316 	/* Not found */
   3317 	return 0;
   3318 }
   3319 
   3320 static int
   3321 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3322 {
   3323 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3324 	uint16_t offset = NVM_OFF_MACADDR;
   3325 	int do_invert = 0;
   3326 
   3327 	switch (sc->sc_type) {
   3328 	case WM_T_82580:
   3329 	case WM_T_I350:
   3330 	case WM_T_I354:
   3331 		/* EEPROM Top Level Partitioning */
   3332 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3333 		break;
   3334 	case WM_T_82571:
   3335 	case WM_T_82575:
   3336 	case WM_T_82576:
   3337 	case WM_T_80003:
   3338 	case WM_T_I210:
   3339 	case WM_T_I211:
   3340 		offset = wm_check_alt_mac_addr(sc);
   3341 		if (offset == 0)
   3342 			if ((sc->sc_funcid & 0x01) == 1)
   3343 				do_invert = 1;
   3344 		break;
   3345 	default:
   3346 		if ((sc->sc_funcid & 0x01) == 1)
   3347 			do_invert = 1;
   3348 		break;
   3349 	}
   3350 
   3351 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3352 		goto bad;
   3353 
   3354 	enaddr[0] = myea[0] & 0xff;
   3355 	enaddr[1] = myea[0] >> 8;
   3356 	enaddr[2] = myea[1] & 0xff;
   3357 	enaddr[3] = myea[1] >> 8;
   3358 	enaddr[4] = myea[2] & 0xff;
   3359 	enaddr[5] = myea[2] >> 8;
   3360 
   3361 	/*
   3362 	 * Toggle the LSB of the MAC address on the second port
   3363 	 * of some dual port cards.
   3364 	 */
   3365 	if (do_invert != 0)
   3366 		enaddr[5] ^= 1;
   3367 
   3368 	return 0;
   3369 
   3370  bad:
   3371 	return -1;
   3372 }
   3373 
   3374 /*
   3375  * wm_set_ral:
   3376  *
   3377  *	Set an entery in the receive address list.
   3378  */
   3379 static void
   3380 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3381 {
   3382 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3383 	uint32_t wlock_mac;
   3384 	int rv;
   3385 
   3386 	if (enaddr != NULL) {
   3387 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3388 		    (enaddr[3] << 24);
   3389 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3390 		ral_hi |= RAL_AV;
   3391 	} else {
   3392 		ral_lo = 0;
   3393 		ral_hi = 0;
   3394 	}
   3395 
   3396 	switch (sc->sc_type) {
   3397 	case WM_T_82542_2_0:
   3398 	case WM_T_82542_2_1:
   3399 	case WM_T_82543:
   3400 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3401 		CSR_WRITE_FLUSH(sc);
   3402 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3403 		CSR_WRITE_FLUSH(sc);
   3404 		break;
   3405 	case WM_T_PCH2:
   3406 	case WM_T_PCH_LPT:
   3407 	case WM_T_PCH_SPT:
   3408 	case WM_T_PCH_CNP:
   3409 		if (idx == 0) {
   3410 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3411 			CSR_WRITE_FLUSH(sc);
   3412 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3413 			CSR_WRITE_FLUSH(sc);
   3414 			return;
   3415 		}
   3416 		if (sc->sc_type != WM_T_PCH2) {
   3417 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3418 			    FWSM_WLOCK_MAC);
   3419 			addrl = WMREG_SHRAL(idx - 1);
   3420 			addrh = WMREG_SHRAH(idx - 1);
   3421 		} else {
   3422 			wlock_mac = 0;
   3423 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3424 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3425 		}
   3426 
   3427 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3428 			rv = wm_get_swflag_ich8lan(sc);
   3429 			if (rv != 0)
   3430 				return;
   3431 			CSR_WRITE(sc, addrl, ral_lo);
   3432 			CSR_WRITE_FLUSH(sc);
   3433 			CSR_WRITE(sc, addrh, ral_hi);
   3434 			CSR_WRITE_FLUSH(sc);
   3435 			wm_put_swflag_ich8lan(sc);
   3436 		}
   3437 
   3438 		break;
   3439 	default:
   3440 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3441 		CSR_WRITE_FLUSH(sc);
   3442 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3443 		CSR_WRITE_FLUSH(sc);
   3444 		break;
   3445 	}
   3446 }
   3447 
   3448 /*
   3449  * wm_mchash:
   3450  *
   3451  *	Compute the hash of the multicast address for the 4096-bit
   3452  *	multicast filter.
   3453  */
   3454 static uint32_t
   3455 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3456 {
   3457 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3458 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3459 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3460 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3461 	uint32_t hash;
   3462 
   3463 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3464 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3465 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3466 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3467 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3468 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3469 		return (hash & 0x3ff);
   3470 	}
   3471 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3472 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3473 
   3474 	return (hash & 0xfff);
   3475 }
   3476 
   3477 /*
   3478  * wm_set_filter:
   3479  *
   3480  *	Set up the receive filter.
   3481  */
   3482 static void
   3483 wm_set_filter(struct wm_softc *sc)
   3484 {
   3485 	struct ethercom *ec = &sc->sc_ethercom;
   3486 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3487 	struct ether_multi *enm;
   3488 	struct ether_multistep step;
   3489 	bus_addr_t mta_reg;
   3490 	uint32_t hash, reg, bit;
   3491 	int i, size, ralmax;
   3492 
   3493 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3494 		device_xname(sc->sc_dev), __func__));
   3495 
   3496 	if (sc->sc_type >= WM_T_82544)
   3497 		mta_reg = WMREG_CORDOVA_MTA;
   3498 	else
   3499 		mta_reg = WMREG_MTA;
   3500 
   3501 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3502 
   3503 	if (ifp->if_flags & IFF_BROADCAST)
   3504 		sc->sc_rctl |= RCTL_BAM;
   3505 	if (ifp->if_flags & IFF_PROMISC) {
   3506 		sc->sc_rctl |= RCTL_UPE;
   3507 		goto allmulti;
   3508 	}
   3509 
   3510 	/*
   3511 	 * Set the station address in the first RAL slot, and
   3512 	 * clear the remaining slots.
   3513 	 */
   3514 	if (sc->sc_type == WM_T_ICH8)
   3515 		size = WM_RAL_TABSIZE_ICH8 -1;
   3516 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3517 	    || (sc->sc_type == WM_T_PCH))
   3518 		size = WM_RAL_TABSIZE_ICH8;
   3519 	else if (sc->sc_type == WM_T_PCH2)
   3520 		size = WM_RAL_TABSIZE_PCH2;
   3521 	else if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3522 	    || (sc->sc_type == WM_T_PCH_CNP))
   3523 		size = WM_RAL_TABSIZE_PCH_LPT;
   3524 	else if (sc->sc_type == WM_T_82575)
   3525 		size = WM_RAL_TABSIZE_82575;
   3526 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3527 		size = WM_RAL_TABSIZE_82576;
   3528 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3529 		size = WM_RAL_TABSIZE_I350;
   3530 	else
   3531 		size = WM_RAL_TABSIZE;
   3532 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3533 
   3534 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3535 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3536 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3537 		switch (i) {
   3538 		case 0:
   3539 			/* We can use all entries */
   3540 			ralmax = size;
   3541 			break;
   3542 		case 1:
   3543 			/* Only RAR[0] */
   3544 			ralmax = 1;
   3545 			break;
   3546 		default:
   3547 			/* available SHRA + RAR[0] */
   3548 			ralmax = i + 1;
   3549 		}
   3550 	} else
   3551 		ralmax = size;
   3552 	for (i = 1; i < size; i++) {
   3553 		if (i < ralmax)
   3554 			wm_set_ral(sc, NULL, i);
   3555 	}
   3556 
   3557 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3558 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3559 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3560 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3561 		size = WM_ICH8_MC_TABSIZE;
   3562 	else
   3563 		size = WM_MC_TABSIZE;
   3564 	/* Clear out the multicast table. */
   3565 	for (i = 0; i < size; i++) {
   3566 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3567 		CSR_WRITE_FLUSH(sc);
   3568 	}
   3569 
   3570 	ETHER_LOCK(ec);
   3571 	ETHER_FIRST_MULTI(step, ec, enm);
   3572 	while (enm != NULL) {
   3573 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3574 			ETHER_UNLOCK(ec);
   3575 			/*
   3576 			 * We must listen to a range of multicast addresses.
   3577 			 * For now, just accept all multicasts, rather than
   3578 			 * trying to set only those filter bits needed to match
   3579 			 * the range.  (At this time, the only use of address
   3580 			 * ranges is for IP multicast routing, for which the
   3581 			 * range is big enough to require all bits set.)
   3582 			 */
   3583 			goto allmulti;
   3584 		}
   3585 
   3586 		hash = wm_mchash(sc, enm->enm_addrlo);
   3587 
   3588 		reg = (hash >> 5);
   3589 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3590 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3591 		    || (sc->sc_type == WM_T_PCH2)
   3592 		    || (sc->sc_type == WM_T_PCH_LPT)
   3593 		    || (sc->sc_type == WM_T_PCH_SPT)
   3594 		    || (sc->sc_type == WM_T_PCH_CNP))
   3595 			reg &= 0x1f;
   3596 		else
   3597 			reg &= 0x7f;
   3598 		bit = hash & 0x1f;
   3599 
   3600 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3601 		hash |= 1U << bit;
   3602 
   3603 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3604 			/*
   3605 			 * 82544 Errata 9: Certain register cannot be written
   3606 			 * with particular alignments in PCI-X bus operation
   3607 			 * (FCAH, MTA and VFTA).
   3608 			 */
   3609 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3610 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3611 			CSR_WRITE_FLUSH(sc);
   3612 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3613 			CSR_WRITE_FLUSH(sc);
   3614 		} else {
   3615 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3616 			CSR_WRITE_FLUSH(sc);
   3617 		}
   3618 
   3619 		ETHER_NEXT_MULTI(step, enm);
   3620 	}
   3621 	ETHER_UNLOCK(ec);
   3622 
   3623 	ifp->if_flags &= ~IFF_ALLMULTI;
   3624 	goto setit;
   3625 
   3626  allmulti:
   3627 	ifp->if_flags |= IFF_ALLMULTI;
   3628 	sc->sc_rctl |= RCTL_MPE;
   3629 
   3630  setit:
   3631 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3632 }
   3633 
   3634 /* Reset and init related */
   3635 
   3636 static void
   3637 wm_set_vlan(struct wm_softc *sc)
   3638 {
   3639 
   3640 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3641 		device_xname(sc->sc_dev), __func__));
   3642 
   3643 	/* Deal with VLAN enables. */
   3644 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3645 		sc->sc_ctrl |= CTRL_VME;
   3646 	else
   3647 		sc->sc_ctrl &= ~CTRL_VME;
   3648 
   3649 	/* Write the control registers. */
   3650 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3651 }
   3652 
   3653 static void
   3654 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3655 {
   3656 	uint32_t gcr;
   3657 	pcireg_t ctrl2;
   3658 
   3659 	gcr = CSR_READ(sc, WMREG_GCR);
   3660 
   3661 	/* Only take action if timeout value is defaulted to 0 */
   3662 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3663 		goto out;
   3664 
   3665 	if ((gcr & GCR_CAP_VER2) == 0) {
   3666 		gcr |= GCR_CMPL_TMOUT_10MS;
   3667 		goto out;
   3668 	}
   3669 
   3670 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3671 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3672 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3673 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3674 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3675 
   3676 out:
   3677 	/* Disable completion timeout resend */
   3678 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3679 
   3680 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3681 }
   3682 
   3683 void
   3684 wm_get_auto_rd_done(struct wm_softc *sc)
   3685 {
   3686 	int i;
   3687 
   3688 	/* wait for eeprom to reload */
   3689 	switch (sc->sc_type) {
   3690 	case WM_T_82571:
   3691 	case WM_T_82572:
   3692 	case WM_T_82573:
   3693 	case WM_T_82574:
   3694 	case WM_T_82583:
   3695 	case WM_T_82575:
   3696 	case WM_T_82576:
   3697 	case WM_T_82580:
   3698 	case WM_T_I350:
   3699 	case WM_T_I354:
   3700 	case WM_T_I210:
   3701 	case WM_T_I211:
   3702 	case WM_T_80003:
   3703 	case WM_T_ICH8:
   3704 	case WM_T_ICH9:
   3705 		for (i = 0; i < 10; i++) {
   3706 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3707 				break;
   3708 			delay(1000);
   3709 		}
   3710 		if (i == 10) {
   3711 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3712 			    "complete\n", device_xname(sc->sc_dev));
   3713 		}
   3714 		break;
   3715 	default:
   3716 		break;
   3717 	}
   3718 }
   3719 
   3720 void
   3721 wm_lan_init_done(struct wm_softc *sc)
   3722 {
   3723 	uint32_t reg = 0;
   3724 	int i;
   3725 
   3726 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3727 		device_xname(sc->sc_dev), __func__));
   3728 
   3729 	/* Wait for eeprom to reload */
   3730 	switch (sc->sc_type) {
   3731 	case WM_T_ICH10:
   3732 	case WM_T_PCH:
   3733 	case WM_T_PCH2:
   3734 	case WM_T_PCH_LPT:
   3735 	case WM_T_PCH_SPT:
   3736 	case WM_T_PCH_CNP:
   3737 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3738 			reg = CSR_READ(sc, WMREG_STATUS);
   3739 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3740 				break;
   3741 			delay(100);
   3742 		}
   3743 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3744 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3745 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3746 		}
   3747 		break;
   3748 	default:
   3749 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3750 		    __func__);
   3751 		break;
   3752 	}
   3753 
   3754 	reg &= ~STATUS_LAN_INIT_DONE;
   3755 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3756 }
   3757 
   3758 void
   3759 wm_get_cfg_done(struct wm_softc *sc)
   3760 {
   3761 	int mask;
   3762 	uint32_t reg;
   3763 	int i;
   3764 
   3765 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3766 		device_xname(sc->sc_dev), __func__));
   3767 
   3768 	/* Wait for eeprom to reload */
   3769 	switch (sc->sc_type) {
   3770 	case WM_T_82542_2_0:
   3771 	case WM_T_82542_2_1:
   3772 		/* null */
   3773 		break;
   3774 	case WM_T_82543:
   3775 	case WM_T_82544:
   3776 	case WM_T_82540:
   3777 	case WM_T_82545:
   3778 	case WM_T_82545_3:
   3779 	case WM_T_82546:
   3780 	case WM_T_82546_3:
   3781 	case WM_T_82541:
   3782 	case WM_T_82541_2:
   3783 	case WM_T_82547:
   3784 	case WM_T_82547_2:
   3785 	case WM_T_82573:
   3786 	case WM_T_82574:
   3787 	case WM_T_82583:
   3788 		/* generic */
   3789 		delay(10*1000);
   3790 		break;
   3791 	case WM_T_80003:
   3792 	case WM_T_82571:
   3793 	case WM_T_82572:
   3794 	case WM_T_82575:
   3795 	case WM_T_82576:
   3796 	case WM_T_82580:
   3797 	case WM_T_I350:
   3798 	case WM_T_I354:
   3799 	case WM_T_I210:
   3800 	case WM_T_I211:
   3801 		if (sc->sc_type == WM_T_82571) {
   3802 			/* Only 82571 shares port 0 */
   3803 			mask = EEMNGCTL_CFGDONE_0;
   3804 		} else
   3805 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3806 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3807 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3808 				break;
   3809 			delay(1000);
   3810 		}
   3811 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3812 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3813 				device_xname(sc->sc_dev), __func__));
   3814 		}
   3815 		break;
   3816 	case WM_T_ICH8:
   3817 	case WM_T_ICH9:
   3818 	case WM_T_ICH10:
   3819 	case WM_T_PCH:
   3820 	case WM_T_PCH2:
   3821 	case WM_T_PCH_LPT:
   3822 	case WM_T_PCH_SPT:
   3823 	case WM_T_PCH_CNP:
   3824 		delay(10*1000);
   3825 		if (sc->sc_type >= WM_T_ICH10)
   3826 			wm_lan_init_done(sc);
   3827 		else
   3828 			wm_get_auto_rd_done(sc);
   3829 
   3830 		reg = CSR_READ(sc, WMREG_STATUS);
   3831 		if ((reg & STATUS_PHYRA) != 0)
   3832 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3833 		break;
   3834 	default:
   3835 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3836 		    __func__);
   3837 		break;
   3838 	}
   3839 }
   3840 
   3841 void
   3842 wm_phy_post_reset(struct wm_softc *sc)
   3843 {
   3844 	uint32_t reg;
   3845 
   3846 	/* This function is only for ICH8 and newer. */
   3847 	if (sc->sc_type < WM_T_ICH8)
   3848 		return;
   3849 
   3850 	if (wm_phy_resetisblocked(sc)) {
   3851 		/* XXX */
   3852 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3853 		return;
   3854 	}
   3855 
   3856 	/* Allow time for h/w to get to quiescent state after reset */
   3857 	delay(10*1000);
   3858 
   3859 	/* Perform any necessary post-reset workarounds */
   3860 	if (sc->sc_type == WM_T_PCH)
   3861 		wm_hv_phy_workaround_ich8lan(sc);
   3862 	if (sc->sc_type == WM_T_PCH2)
   3863 		wm_lv_phy_workaround_ich8lan(sc);
   3864 
   3865 	/* Clear the host wakeup bit after lcd reset */
   3866 	if (sc->sc_type >= WM_T_PCH) {
   3867 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3868 		    BM_PORT_GEN_CFG);
   3869 		reg &= ~BM_WUC_HOST_WU_BIT;
   3870 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3871 		    BM_PORT_GEN_CFG, reg);
   3872 	}
   3873 
   3874 	/* Configure the LCD with the extended configuration region in NVM */
   3875 	wm_init_lcd_from_nvm(sc);
   3876 
   3877 	/* Configure the LCD with the OEM bits in NVM */
   3878 }
   3879 
   3880 /* Only for PCH and newer */
   3881 static void
   3882 wm_write_smbus_addr(struct wm_softc *sc)
   3883 {
   3884 	uint32_t strap, freq;
   3885 	uint32_t phy_data;
   3886 
   3887 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3888 		device_xname(sc->sc_dev), __func__));
   3889 
   3890 	strap = CSR_READ(sc, WMREG_STRAP);
   3891 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3892 
   3893 	phy_data = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR);
   3894 
   3895 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3896 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3897 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3898 
   3899 	if (sc->sc_phytype == WMPHY_I217) {
   3900 		/* Restore SMBus frequency */
   3901 		if (freq --) {
   3902 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3903 			    | HV_SMB_ADDR_FREQ_HIGH);
   3904 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3905 			    HV_SMB_ADDR_FREQ_LOW);
   3906 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3907 			    HV_SMB_ADDR_FREQ_HIGH);
   3908 		} else {
   3909 			DPRINTF(WM_DEBUG_INIT,
   3910 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3911 				device_xname(sc->sc_dev), __func__));
   3912 		}
   3913 	}
   3914 
   3915 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, phy_data);
   3916 }
   3917 
   3918 void
   3919 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3920 {
   3921 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3922 	uint16_t phy_page = 0;
   3923 
   3924 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3925 		device_xname(sc->sc_dev), __func__));
   3926 
   3927 	switch (sc->sc_type) {
   3928 	case WM_T_ICH8:
   3929 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3930 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3931 			return;
   3932 
   3933 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3934 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3935 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3936 			break;
   3937 		}
   3938 		/* FALLTHROUGH */
   3939 	case WM_T_PCH:
   3940 	case WM_T_PCH2:
   3941 	case WM_T_PCH_LPT:
   3942 	case WM_T_PCH_SPT:
   3943 	case WM_T_PCH_CNP:
   3944 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3945 		break;
   3946 	default:
   3947 		return;
   3948 	}
   3949 
   3950 	sc->phy.acquire(sc);
   3951 
   3952 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3953 	if ((reg & sw_cfg_mask) == 0)
   3954 		goto release;
   3955 
   3956 	/*
   3957 	 * Make sure HW does not configure LCD from PHY extended configuration
   3958 	 * before SW configuration
   3959 	 */
   3960 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3961 	if ((sc->sc_type < WM_T_PCH2)
   3962 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3963 		goto release;
   3964 
   3965 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3966 		device_xname(sc->sc_dev), __func__));
   3967 	/* word_addr is in DWORD */
   3968 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3969 
   3970 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3971 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3972 	if (cnf_size == 0)
   3973 		goto release;
   3974 
   3975 	if (((sc->sc_type == WM_T_PCH)
   3976 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3977 	    || (sc->sc_type > WM_T_PCH)) {
   3978 		/*
   3979 		 * HW configures the SMBus address and LEDs when the OEM and
   3980 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3981 		 * are cleared, SW will configure them instead.
   3982 		 */
   3983 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   3984 			device_xname(sc->sc_dev), __func__));
   3985 		wm_write_smbus_addr(sc);
   3986 
   3987 		reg = CSR_READ(sc, WMREG_LEDCTL);
   3988 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   3989 	}
   3990 
   3991 	/* Configure LCD from extended configuration region. */
   3992 	for (i = 0; i < cnf_size; i++) {
   3993 		uint16_t reg_data, reg_addr;
   3994 
   3995 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   3996 			goto release;
   3997 
   3998 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   3999 			goto release;
   4000 
   4001 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4002 			phy_page = reg_data;
   4003 
   4004 		reg_addr &= IGPHY_MAXREGADDR;
   4005 		reg_addr |= phy_page;
   4006 
   4007 		sc->phy.release(sc); /* XXX */
   4008 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
   4009 		sc->phy.acquire(sc); /* XXX */
   4010 	}
   4011 
   4012 release:
   4013 	sc->phy.release(sc);
   4014 	return;
   4015 }
   4016 
   4017 
   4018 /* Init hardware bits */
   4019 void
   4020 wm_initialize_hardware_bits(struct wm_softc *sc)
   4021 {
   4022 	uint32_t tarc0, tarc1, reg;
   4023 
   4024 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4025 		device_xname(sc->sc_dev), __func__));
   4026 
   4027 	/* For 82571 variant, 80003 and ICHs */
   4028 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4029 	    || (sc->sc_type >= WM_T_80003)) {
   4030 
   4031 		/* Transmit Descriptor Control 0 */
   4032 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4033 		reg |= TXDCTL_COUNT_DESC;
   4034 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4035 
   4036 		/* Transmit Descriptor Control 1 */
   4037 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4038 		reg |= TXDCTL_COUNT_DESC;
   4039 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4040 
   4041 		/* TARC0 */
   4042 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4043 		switch (sc->sc_type) {
   4044 		case WM_T_82571:
   4045 		case WM_T_82572:
   4046 		case WM_T_82573:
   4047 		case WM_T_82574:
   4048 		case WM_T_82583:
   4049 		case WM_T_80003:
   4050 			/* Clear bits 30..27 */
   4051 			tarc0 &= ~__BITS(30, 27);
   4052 			break;
   4053 		default:
   4054 			break;
   4055 		}
   4056 
   4057 		switch (sc->sc_type) {
   4058 		case WM_T_82571:
   4059 		case WM_T_82572:
   4060 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4061 
   4062 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4063 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4064 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4065 			/* 8257[12] Errata No.7 */
   4066 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4067 
   4068 			/* TARC1 bit 28 */
   4069 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4070 				tarc1 &= ~__BIT(28);
   4071 			else
   4072 				tarc1 |= __BIT(28);
   4073 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4074 
   4075 			/*
   4076 			 * 8257[12] Errata No.13
   4077 			 * Disable Dyamic Clock Gating.
   4078 			 */
   4079 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4080 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4081 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4082 			break;
   4083 		case WM_T_82573:
   4084 		case WM_T_82574:
   4085 		case WM_T_82583:
   4086 			if ((sc->sc_type == WM_T_82574)
   4087 			    || (sc->sc_type == WM_T_82583))
   4088 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4089 
   4090 			/* Extended Device Control */
   4091 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4092 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4093 			reg |= __BIT(22);	/* Set bit 22 */
   4094 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4095 
   4096 			/* Device Control */
   4097 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4098 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4099 
   4100 			/* PCIe Control Register */
   4101 			/*
   4102 			 * 82573 Errata (unknown).
   4103 			 *
   4104 			 * 82574 Errata 25 and 82583 Errata 12
   4105 			 * "Dropped Rx Packets":
   4106 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4107 			 */
   4108 			reg = CSR_READ(sc, WMREG_GCR);
   4109 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4110 			CSR_WRITE(sc, WMREG_GCR, reg);
   4111 
   4112 			if ((sc->sc_type == WM_T_82574)
   4113 			    || (sc->sc_type == WM_T_82583)) {
   4114 				/*
   4115 				 * Document says this bit must be set for
   4116 				 * proper operation.
   4117 				 */
   4118 				reg = CSR_READ(sc, WMREG_GCR);
   4119 				reg |= __BIT(22);
   4120 				CSR_WRITE(sc, WMREG_GCR, reg);
   4121 
   4122 				/*
   4123 				 * Apply workaround for hardware errata
   4124 				 * documented in errata docs Fixes issue where
   4125 				 * some error prone or unreliable PCIe
   4126 				 * completions are occurring, particularly
   4127 				 * with ASPM enabled. Without fix, issue can
   4128 				 * cause Tx timeouts.
   4129 				 */
   4130 				reg = CSR_READ(sc, WMREG_GCR2);
   4131 				reg |= __BIT(0);
   4132 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4133 			}
   4134 			break;
   4135 		case WM_T_80003:
   4136 			/* TARC0 */
   4137 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4138 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4139 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4140 
   4141 			/* TARC1 bit 28 */
   4142 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4143 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4144 				tarc1 &= ~__BIT(28);
   4145 			else
   4146 				tarc1 |= __BIT(28);
   4147 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4148 			break;
   4149 		case WM_T_ICH8:
   4150 		case WM_T_ICH9:
   4151 		case WM_T_ICH10:
   4152 		case WM_T_PCH:
   4153 		case WM_T_PCH2:
   4154 		case WM_T_PCH_LPT:
   4155 		case WM_T_PCH_SPT:
   4156 		case WM_T_PCH_CNP:
   4157 			/* TARC0 */
   4158 			if (sc->sc_type == WM_T_ICH8) {
   4159 				/* Set TARC0 bits 29 and 28 */
   4160 				tarc0 |= __BITS(29, 28);
   4161 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4162 				tarc0 |= __BIT(29);
   4163 				/*
   4164 				 *  Drop bit 28. From Linux.
   4165 				 * See I218/I219 spec update
   4166 				 * "5. Buffer Overrun While the I219 is
   4167 				 * Processing DMA Transactions"
   4168 				 */
   4169 				tarc0 &= ~__BIT(28);
   4170 			}
   4171 			/* Set TARC0 bits 23,24,26,27 */
   4172 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4173 
   4174 			/* CTRL_EXT */
   4175 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4176 			reg |= __BIT(22);	/* Set bit 22 */
   4177 			/*
   4178 			 * Enable PHY low-power state when MAC is at D3
   4179 			 * w/o WoL
   4180 			 */
   4181 			if (sc->sc_type >= WM_T_PCH)
   4182 				reg |= CTRL_EXT_PHYPDEN;
   4183 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4184 
   4185 			/* TARC1 */
   4186 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4187 			/* bit 28 */
   4188 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4189 				tarc1 &= ~__BIT(28);
   4190 			else
   4191 				tarc1 |= __BIT(28);
   4192 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4193 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4194 
   4195 			/* Device Status */
   4196 			if (sc->sc_type == WM_T_ICH8) {
   4197 				reg = CSR_READ(sc, WMREG_STATUS);
   4198 				reg &= ~__BIT(31);
   4199 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4200 
   4201 			}
   4202 
   4203 			/* IOSFPC */
   4204 			if (sc->sc_type == WM_T_PCH_SPT) {
   4205 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4206 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4207 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4208 			}
   4209 			/*
   4210 			 * Work-around descriptor data corruption issue during
   4211 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4212 			 * capability.
   4213 			 */
   4214 			reg = CSR_READ(sc, WMREG_RFCTL);
   4215 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4216 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4217 			break;
   4218 		default:
   4219 			break;
   4220 		}
   4221 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4222 
   4223 		switch (sc->sc_type) {
   4224 		/*
   4225 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4226 		 * Avoid RSS Hash Value bug.
   4227 		 */
   4228 		case WM_T_82571:
   4229 		case WM_T_82572:
   4230 		case WM_T_82573:
   4231 		case WM_T_80003:
   4232 		case WM_T_ICH8:
   4233 			reg = CSR_READ(sc, WMREG_RFCTL);
   4234 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4235 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4236 			break;
   4237 		case WM_T_82574:
   4238 			/* use extened Rx descriptor. */
   4239 			reg = CSR_READ(sc, WMREG_RFCTL);
   4240 			reg |= WMREG_RFCTL_EXSTEN;
   4241 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4242 			break;
   4243 		default:
   4244 			break;
   4245 		}
   4246 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4247 		/*
   4248 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4249 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4250 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4251 		 * Correctly by the Device"
   4252 		 *
   4253 		 * I354(C2000) Errata AVR53:
   4254 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4255 		 * Hang"
   4256 		 */
   4257 		reg = CSR_READ(sc, WMREG_RFCTL);
   4258 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4259 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4260 	}
   4261 }
   4262 
   4263 static uint32_t
   4264 wm_rxpbs_adjust_82580(uint32_t val)
   4265 {
   4266 	uint32_t rv = 0;
   4267 
   4268 	if (val < __arraycount(wm_82580_rxpbs_table))
   4269 		rv = wm_82580_rxpbs_table[val];
   4270 
   4271 	return rv;
   4272 }
   4273 
   4274 /*
   4275  * wm_reset_phy:
   4276  *
   4277  *	generic PHY reset function.
   4278  *	Same as e1000_phy_hw_reset_generic()
   4279  */
   4280 static void
   4281 wm_reset_phy(struct wm_softc *sc)
   4282 {
   4283 	uint32_t reg;
   4284 
   4285 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4286 		device_xname(sc->sc_dev), __func__));
   4287 	if (wm_phy_resetisblocked(sc))
   4288 		return;
   4289 
   4290 	sc->phy.acquire(sc);
   4291 
   4292 	reg = CSR_READ(sc, WMREG_CTRL);
   4293 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4294 	CSR_WRITE_FLUSH(sc);
   4295 
   4296 	delay(sc->phy.reset_delay_us);
   4297 
   4298 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4299 	CSR_WRITE_FLUSH(sc);
   4300 
   4301 	delay(150);
   4302 
   4303 	sc->phy.release(sc);
   4304 
   4305 	wm_get_cfg_done(sc);
   4306 	wm_phy_post_reset(sc);
   4307 }
   4308 
   4309 /*
   4310  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4311  * so it is enough to check sc->sc_queue[0] only.
   4312  */
   4313 static void
   4314 wm_flush_desc_rings(struct wm_softc *sc)
   4315 {
   4316 	pcireg_t preg;
   4317 	uint32_t reg;
   4318 	struct wm_txqueue *txq;
   4319 	wiseman_txdesc_t *txd;
   4320 	int nexttx;
   4321 	uint32_t rctl;
   4322 
   4323 	/* First, disable MULR fix in FEXTNVM11 */
   4324 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4325 	reg |= FEXTNVM11_DIS_MULRFIX;
   4326 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4327 
   4328 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4329 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4330 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4331 		return;
   4332 
   4333 	/* TX */
   4334 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4335 	    device_xname(sc->sc_dev), preg, reg);
   4336 	reg = CSR_READ(sc, WMREG_TCTL);
   4337 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4338 
   4339 	txq = &sc->sc_queue[0].wmq_txq;
   4340 	nexttx = txq->txq_next;
   4341 	txd = &txq->txq_descs[nexttx];
   4342 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4343 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4344 	txd->wtx_fields.wtxu_status = 0;
   4345 	txd->wtx_fields.wtxu_options = 0;
   4346 	txd->wtx_fields.wtxu_vlan = 0;
   4347 
   4348 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4349 	    BUS_SPACE_BARRIER_WRITE);
   4350 
   4351 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4352 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4353 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4354 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4355 	delay(250);
   4356 
   4357 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4358 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4359 		return;
   4360 
   4361 	/* RX */
   4362 	printf("%s: Need RX flush (reg = %08x)\n",
   4363 	    device_xname(sc->sc_dev), preg);
   4364 	rctl = CSR_READ(sc, WMREG_RCTL);
   4365 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4366 	CSR_WRITE_FLUSH(sc);
   4367 	delay(150);
   4368 
   4369 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4370 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4371 	reg &= 0xffffc000;
   4372 	/*
   4373 	 * update thresholds: prefetch threshold to 31, host threshold
   4374 	 * to 1 and make sure the granularity is "descriptors" and not
   4375 	 * "cache lines"
   4376 	 */
   4377 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4378 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4379 
   4380 	/*
   4381 	 * momentarily enable the RX ring for the changes to take
   4382 	 * effect
   4383 	 */
   4384 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4385 	CSR_WRITE_FLUSH(sc);
   4386 	delay(150);
   4387 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4388 }
   4389 
   4390 /*
   4391  * wm_reset:
   4392  *
   4393  *	Reset the i82542 chip.
   4394  */
   4395 static void
   4396 wm_reset(struct wm_softc *sc)
   4397 {
   4398 	int phy_reset = 0;
   4399 	int i, error = 0;
   4400 	uint32_t reg;
   4401 	uint16_t kmreg;
   4402 	int rv;
   4403 
   4404 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4405 		device_xname(sc->sc_dev), __func__));
   4406 	KASSERT(sc->sc_type != 0);
   4407 
   4408 	/*
   4409 	 * Allocate on-chip memory according to the MTU size.
   4410 	 * The Packet Buffer Allocation register must be written
   4411 	 * before the chip is reset.
   4412 	 */
   4413 	switch (sc->sc_type) {
   4414 	case WM_T_82547:
   4415 	case WM_T_82547_2:
   4416 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4417 		    PBA_22K : PBA_30K;
   4418 		for (i = 0; i < sc->sc_nqueues; i++) {
   4419 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4420 			txq->txq_fifo_head = 0;
   4421 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4422 			txq->txq_fifo_size =
   4423 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4424 			txq->txq_fifo_stall = 0;
   4425 		}
   4426 		break;
   4427 	case WM_T_82571:
   4428 	case WM_T_82572:
   4429 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4430 	case WM_T_80003:
   4431 		sc->sc_pba = PBA_32K;
   4432 		break;
   4433 	case WM_T_82573:
   4434 		sc->sc_pba = PBA_12K;
   4435 		break;
   4436 	case WM_T_82574:
   4437 	case WM_T_82583:
   4438 		sc->sc_pba = PBA_20K;
   4439 		break;
   4440 	case WM_T_82576:
   4441 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4442 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4443 		break;
   4444 	case WM_T_82580:
   4445 	case WM_T_I350:
   4446 	case WM_T_I354:
   4447 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4448 		break;
   4449 	case WM_T_I210:
   4450 	case WM_T_I211:
   4451 		sc->sc_pba = PBA_34K;
   4452 		break;
   4453 	case WM_T_ICH8:
   4454 		/* Workaround for a bit corruption issue in FIFO memory */
   4455 		sc->sc_pba = PBA_8K;
   4456 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4457 		break;
   4458 	case WM_T_ICH9:
   4459 	case WM_T_ICH10:
   4460 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4461 		    PBA_14K : PBA_10K;
   4462 		break;
   4463 	case WM_T_PCH:
   4464 	case WM_T_PCH2:	/* XXX 14K? */
   4465 	case WM_T_PCH_LPT:
   4466 	case WM_T_PCH_SPT:
   4467 	case WM_T_PCH_CNP:
   4468 		sc->sc_pba = PBA_26K;
   4469 		break;
   4470 	default:
   4471 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4472 		    PBA_40K : PBA_48K;
   4473 		break;
   4474 	}
   4475 	/*
   4476 	 * Only old or non-multiqueue devices have the PBA register
   4477 	 * XXX Need special handling for 82575.
   4478 	 */
   4479 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4480 	    || (sc->sc_type == WM_T_82575))
   4481 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4482 
   4483 	/* Prevent the PCI-E bus from sticking */
   4484 	if (sc->sc_flags & WM_F_PCIE) {
   4485 		int timeout = 800;
   4486 
   4487 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4488 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4489 
   4490 		while (timeout--) {
   4491 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4492 			    == 0)
   4493 				break;
   4494 			delay(100);
   4495 		}
   4496 		if (timeout == 0)
   4497 			device_printf(sc->sc_dev,
   4498 			    "failed to disable busmastering\n");
   4499 	}
   4500 
   4501 	/* Set the completion timeout for interface */
   4502 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4503 	    || (sc->sc_type == WM_T_82580)
   4504 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4505 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4506 		wm_set_pcie_completion_timeout(sc);
   4507 
   4508 	/* Clear interrupt */
   4509 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4510 	if (wm_is_using_msix(sc)) {
   4511 		if (sc->sc_type != WM_T_82574) {
   4512 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4513 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4514 		} else {
   4515 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4516 		}
   4517 	}
   4518 
   4519 	/* Stop the transmit and receive processes. */
   4520 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4521 	sc->sc_rctl &= ~RCTL_EN;
   4522 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4523 	CSR_WRITE_FLUSH(sc);
   4524 
   4525 	/* XXX set_tbi_sbp_82543() */
   4526 
   4527 	delay(10*1000);
   4528 
   4529 	/* Must acquire the MDIO ownership before MAC reset */
   4530 	switch (sc->sc_type) {
   4531 	case WM_T_82573:
   4532 	case WM_T_82574:
   4533 	case WM_T_82583:
   4534 		error = wm_get_hw_semaphore_82573(sc);
   4535 		break;
   4536 	default:
   4537 		break;
   4538 	}
   4539 
   4540 	/*
   4541 	 * 82541 Errata 29? & 82547 Errata 28?
   4542 	 * See also the description about PHY_RST bit in CTRL register
   4543 	 * in 8254x_GBe_SDM.pdf.
   4544 	 */
   4545 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4546 		CSR_WRITE(sc, WMREG_CTRL,
   4547 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4548 		CSR_WRITE_FLUSH(sc);
   4549 		delay(5000);
   4550 	}
   4551 
   4552 	switch (sc->sc_type) {
   4553 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4554 	case WM_T_82541:
   4555 	case WM_T_82541_2:
   4556 	case WM_T_82547:
   4557 	case WM_T_82547_2:
   4558 		/*
   4559 		 * On some chipsets, a reset through a memory-mapped write
   4560 		 * cycle can cause the chip to reset before completing the
   4561 		 * write cycle. This causes major headache that can be avoided
   4562 		 * by issuing the reset via indirect register writes through
   4563 		 * I/O space.
   4564 		 *
   4565 		 * So, if we successfully mapped the I/O BAR at attach time,
   4566 		 * use that. Otherwise, try our luck with a memory-mapped
   4567 		 * reset.
   4568 		 */
   4569 		if (sc->sc_flags & WM_F_IOH_VALID)
   4570 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4571 		else
   4572 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4573 		break;
   4574 	case WM_T_82545_3:
   4575 	case WM_T_82546_3:
   4576 		/* Use the shadow control register on these chips. */
   4577 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4578 		break;
   4579 	case WM_T_80003:
   4580 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4581 		sc->phy.acquire(sc);
   4582 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4583 		sc->phy.release(sc);
   4584 		break;
   4585 	case WM_T_ICH8:
   4586 	case WM_T_ICH9:
   4587 	case WM_T_ICH10:
   4588 	case WM_T_PCH:
   4589 	case WM_T_PCH2:
   4590 	case WM_T_PCH_LPT:
   4591 	case WM_T_PCH_SPT:
   4592 	case WM_T_PCH_CNP:
   4593 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4594 		if (wm_phy_resetisblocked(sc) == false) {
   4595 			/*
   4596 			 * Gate automatic PHY configuration by hardware on
   4597 			 * non-managed 82579
   4598 			 */
   4599 			if ((sc->sc_type == WM_T_PCH2)
   4600 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4601 				== 0))
   4602 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4603 
   4604 			reg |= CTRL_PHY_RESET;
   4605 			phy_reset = 1;
   4606 		} else
   4607 			printf("XXX reset is blocked!!!\n");
   4608 		sc->phy.acquire(sc);
   4609 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4610 		/* Don't insert a completion barrier when reset */
   4611 		delay(20*1000);
   4612 		mutex_exit(sc->sc_ich_phymtx);
   4613 		break;
   4614 	case WM_T_82580:
   4615 	case WM_T_I350:
   4616 	case WM_T_I354:
   4617 	case WM_T_I210:
   4618 	case WM_T_I211:
   4619 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4620 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4621 			CSR_WRITE_FLUSH(sc);
   4622 		delay(5000);
   4623 		break;
   4624 	case WM_T_82542_2_0:
   4625 	case WM_T_82542_2_1:
   4626 	case WM_T_82543:
   4627 	case WM_T_82540:
   4628 	case WM_T_82545:
   4629 	case WM_T_82546:
   4630 	case WM_T_82571:
   4631 	case WM_T_82572:
   4632 	case WM_T_82573:
   4633 	case WM_T_82574:
   4634 	case WM_T_82575:
   4635 	case WM_T_82576:
   4636 	case WM_T_82583:
   4637 	default:
   4638 		/* Everything else can safely use the documented method. */
   4639 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4640 		break;
   4641 	}
   4642 
   4643 	/* Must release the MDIO ownership after MAC reset */
   4644 	switch (sc->sc_type) {
   4645 	case WM_T_82573:
   4646 	case WM_T_82574:
   4647 	case WM_T_82583:
   4648 		if (error == 0)
   4649 			wm_put_hw_semaphore_82573(sc);
   4650 		break;
   4651 	default:
   4652 		break;
   4653 	}
   4654 
   4655 	if (phy_reset != 0)
   4656 		wm_get_cfg_done(sc);
   4657 
   4658 	/* reload EEPROM */
   4659 	switch (sc->sc_type) {
   4660 	case WM_T_82542_2_0:
   4661 	case WM_T_82542_2_1:
   4662 	case WM_T_82543:
   4663 	case WM_T_82544:
   4664 		delay(10);
   4665 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4666 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4667 		CSR_WRITE_FLUSH(sc);
   4668 		delay(2000);
   4669 		break;
   4670 	case WM_T_82540:
   4671 	case WM_T_82545:
   4672 	case WM_T_82545_3:
   4673 	case WM_T_82546:
   4674 	case WM_T_82546_3:
   4675 		delay(5*1000);
   4676 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4677 		break;
   4678 	case WM_T_82541:
   4679 	case WM_T_82541_2:
   4680 	case WM_T_82547:
   4681 	case WM_T_82547_2:
   4682 		delay(20000);
   4683 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4684 		break;
   4685 	case WM_T_82571:
   4686 	case WM_T_82572:
   4687 	case WM_T_82573:
   4688 	case WM_T_82574:
   4689 	case WM_T_82583:
   4690 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4691 			delay(10);
   4692 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4693 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4694 			CSR_WRITE_FLUSH(sc);
   4695 		}
   4696 		/* check EECD_EE_AUTORD */
   4697 		wm_get_auto_rd_done(sc);
   4698 		/*
   4699 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4700 		 * is set.
   4701 		 */
   4702 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4703 		    || (sc->sc_type == WM_T_82583))
   4704 			delay(25*1000);
   4705 		break;
   4706 	case WM_T_82575:
   4707 	case WM_T_82576:
   4708 	case WM_T_82580:
   4709 	case WM_T_I350:
   4710 	case WM_T_I354:
   4711 	case WM_T_I210:
   4712 	case WM_T_I211:
   4713 	case WM_T_80003:
   4714 		/* check EECD_EE_AUTORD */
   4715 		wm_get_auto_rd_done(sc);
   4716 		break;
   4717 	case WM_T_ICH8:
   4718 	case WM_T_ICH9:
   4719 	case WM_T_ICH10:
   4720 	case WM_T_PCH:
   4721 	case WM_T_PCH2:
   4722 	case WM_T_PCH_LPT:
   4723 	case WM_T_PCH_SPT:
   4724 	case WM_T_PCH_CNP:
   4725 		break;
   4726 	default:
   4727 		panic("%s: unknown type\n", __func__);
   4728 	}
   4729 
   4730 	/* Check whether EEPROM is present or not */
   4731 	switch (sc->sc_type) {
   4732 	case WM_T_82575:
   4733 	case WM_T_82576:
   4734 	case WM_T_82580:
   4735 	case WM_T_I350:
   4736 	case WM_T_I354:
   4737 	case WM_T_ICH8:
   4738 	case WM_T_ICH9:
   4739 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4740 			/* Not found */
   4741 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4742 			if (sc->sc_type == WM_T_82575)
   4743 				wm_reset_init_script_82575(sc);
   4744 		}
   4745 		break;
   4746 	default:
   4747 		break;
   4748 	}
   4749 
   4750 	if (phy_reset != 0)
   4751 		wm_phy_post_reset(sc);
   4752 
   4753 	if ((sc->sc_type == WM_T_82580)
   4754 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4755 		/* clear global device reset status bit */
   4756 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4757 	}
   4758 
   4759 	/* Clear any pending interrupt events. */
   4760 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4761 	reg = CSR_READ(sc, WMREG_ICR);
   4762 	if (wm_is_using_msix(sc)) {
   4763 		if (sc->sc_type != WM_T_82574) {
   4764 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4765 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4766 		} else
   4767 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4768 	}
   4769 
   4770 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4771 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4772 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4773 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4774 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4775 		reg |= KABGTXD_BGSQLBIAS;
   4776 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4777 	}
   4778 
   4779 	/* reload sc_ctrl */
   4780 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4781 
   4782 	if (sc->sc_type == WM_T_I354) {
   4783 #if 0
   4784 		/* I354 uses an external PHY */
   4785 		wm_set_eee_i354(sc);
   4786 #endif
   4787 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4788 		wm_set_eee_i350(sc);
   4789 
   4790 	/*
   4791 	 * For PCH, this write will make sure that any noise will be detected
   4792 	 * as a CRC error and be dropped rather than show up as a bad packet
   4793 	 * to the DMA engine
   4794 	 */
   4795 	if (sc->sc_type == WM_T_PCH)
   4796 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4797 
   4798 	if (sc->sc_type >= WM_T_82544)
   4799 		CSR_WRITE(sc, WMREG_WUC, 0);
   4800 
   4801 	wm_reset_mdicnfg_82580(sc);
   4802 
   4803 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4804 		wm_pll_workaround_i210(sc);
   4805 
   4806 	if (sc->sc_type == WM_T_80003) {
   4807 		/* default to TRUE to enable the MDIC W/A */
   4808 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   4809 
   4810 		rv = wm_kmrn_readreg(sc,
   4811 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   4812 		if (rv == 0) {
   4813 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   4814 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   4815 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   4816 			else
   4817 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   4818 		}
   4819 	}
   4820 }
   4821 
   4822 /*
   4823  * wm_add_rxbuf:
   4824  *
   4825  *	Add a receive buffer to the indiciated descriptor.
   4826  */
   4827 static int
   4828 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4829 {
   4830 	struct wm_softc *sc = rxq->rxq_sc;
   4831 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4832 	struct mbuf *m;
   4833 	int error;
   4834 
   4835 	KASSERT(mutex_owned(rxq->rxq_lock));
   4836 
   4837 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4838 	if (m == NULL)
   4839 		return ENOBUFS;
   4840 
   4841 	MCLGET(m, M_DONTWAIT);
   4842 	if ((m->m_flags & M_EXT) == 0) {
   4843 		m_freem(m);
   4844 		return ENOBUFS;
   4845 	}
   4846 
   4847 	if (rxs->rxs_mbuf != NULL)
   4848 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4849 
   4850 	rxs->rxs_mbuf = m;
   4851 
   4852 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4853 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4854 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4855 	if (error) {
   4856 		/* XXX XXX XXX */
   4857 		aprint_error_dev(sc->sc_dev,
   4858 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   4859 		panic("wm_add_rxbuf");
   4860 	}
   4861 
   4862 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4863 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4864 
   4865 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4866 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4867 			wm_init_rxdesc(rxq, idx);
   4868 	} else
   4869 		wm_init_rxdesc(rxq, idx);
   4870 
   4871 	return 0;
   4872 }
   4873 
   4874 /*
   4875  * wm_rxdrain:
   4876  *
   4877  *	Drain the receive queue.
   4878  */
   4879 static void
   4880 wm_rxdrain(struct wm_rxqueue *rxq)
   4881 {
   4882 	struct wm_softc *sc = rxq->rxq_sc;
   4883 	struct wm_rxsoft *rxs;
   4884 	int i;
   4885 
   4886 	KASSERT(mutex_owned(rxq->rxq_lock));
   4887 
   4888 	for (i = 0; i < WM_NRXDESC; i++) {
   4889 		rxs = &rxq->rxq_soft[i];
   4890 		if (rxs->rxs_mbuf != NULL) {
   4891 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4892 			m_freem(rxs->rxs_mbuf);
   4893 			rxs->rxs_mbuf = NULL;
   4894 		}
   4895 	}
   4896 }
   4897 
   4898 /*
   4899  * Setup registers for RSS.
   4900  *
   4901  * XXX not yet VMDq support
   4902  */
   4903 static void
   4904 wm_init_rss(struct wm_softc *sc)
   4905 {
   4906 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4907 	int i;
   4908 
   4909 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   4910 
   4911 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4912 		int qid, reta_ent;
   4913 
   4914 		qid  = i % sc->sc_nqueues;
   4915 		switch (sc->sc_type) {
   4916 		case WM_T_82574:
   4917 			reta_ent = __SHIFTIN(qid,
   4918 			    RETA_ENT_QINDEX_MASK_82574);
   4919 			break;
   4920 		case WM_T_82575:
   4921 			reta_ent = __SHIFTIN(qid,
   4922 			    RETA_ENT_QINDEX1_MASK_82575);
   4923 			break;
   4924 		default:
   4925 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4926 			break;
   4927 		}
   4928 
   4929 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4930 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4931 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4932 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4933 	}
   4934 
   4935 	rss_getkey((uint8_t *)rss_key);
   4936 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4937 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4938 
   4939 	if (sc->sc_type == WM_T_82574)
   4940 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4941 	else
   4942 		mrqc = MRQC_ENABLE_RSS_MQ;
   4943 
   4944 	/*
   4945 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4946 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4947 	 */
   4948 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4949 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4950 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4951 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4952 
   4953 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4954 }
   4955 
   4956 /*
   4957  * Adjust TX and RX queue numbers which the system actulally uses.
   4958  *
   4959  * The numbers are affected by below parameters.
   4960  *     - The nubmer of hardware queues
   4961  *     - The number of MSI-X vectors (= "nvectors" argument)
   4962  *     - ncpu
   4963  */
   4964 static void
   4965 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4966 {
   4967 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4968 
   4969 	if (nvectors < 2) {
   4970 		sc->sc_nqueues = 1;
   4971 		return;
   4972 	}
   4973 
   4974 	switch (sc->sc_type) {
   4975 	case WM_T_82572:
   4976 		hw_ntxqueues = 2;
   4977 		hw_nrxqueues = 2;
   4978 		break;
   4979 	case WM_T_82574:
   4980 		hw_ntxqueues = 2;
   4981 		hw_nrxqueues = 2;
   4982 		break;
   4983 	case WM_T_82575:
   4984 		hw_ntxqueues = 4;
   4985 		hw_nrxqueues = 4;
   4986 		break;
   4987 	case WM_T_82576:
   4988 		hw_ntxqueues = 16;
   4989 		hw_nrxqueues = 16;
   4990 		break;
   4991 	case WM_T_82580:
   4992 	case WM_T_I350:
   4993 	case WM_T_I354:
   4994 		hw_ntxqueues = 8;
   4995 		hw_nrxqueues = 8;
   4996 		break;
   4997 	case WM_T_I210:
   4998 		hw_ntxqueues = 4;
   4999 		hw_nrxqueues = 4;
   5000 		break;
   5001 	case WM_T_I211:
   5002 		hw_ntxqueues = 2;
   5003 		hw_nrxqueues = 2;
   5004 		break;
   5005 		/*
   5006 		 * As below ethernet controllers does not support MSI-X,
   5007 		 * this driver let them not use multiqueue.
   5008 		 *     - WM_T_80003
   5009 		 *     - WM_T_ICH8
   5010 		 *     - WM_T_ICH9
   5011 		 *     - WM_T_ICH10
   5012 		 *     - WM_T_PCH
   5013 		 *     - WM_T_PCH2
   5014 		 *     - WM_T_PCH_LPT
   5015 		 */
   5016 	default:
   5017 		hw_ntxqueues = 1;
   5018 		hw_nrxqueues = 1;
   5019 		break;
   5020 	}
   5021 
   5022 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5023 
   5024 	/*
   5025 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5026 	 * the number of queues used actually.
   5027 	 */
   5028 	if (nvectors < hw_nqueues + 1)
   5029 		sc->sc_nqueues = nvectors - 1;
   5030 	else
   5031 		sc->sc_nqueues = hw_nqueues;
   5032 
   5033 	/*
   5034 	 * As queues more then cpus cannot improve scaling, we limit
   5035 	 * the number of queues used actually.
   5036 	 */
   5037 	if (ncpu < sc->sc_nqueues)
   5038 		sc->sc_nqueues = ncpu;
   5039 }
   5040 
   5041 static inline bool
   5042 wm_is_using_msix(struct wm_softc *sc)
   5043 {
   5044 
   5045 	return (sc->sc_nintrs > 1);
   5046 }
   5047 
   5048 static inline bool
   5049 wm_is_using_multiqueue(struct wm_softc *sc)
   5050 {
   5051 
   5052 	return (sc->sc_nqueues > 1);
   5053 }
   5054 
   5055 static int
   5056 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5057 {
   5058 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5059 	wmq->wmq_id = qidx;
   5060 	wmq->wmq_intr_idx = intr_idx;
   5061 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5062 #ifdef WM_MPSAFE
   5063 	    | SOFTINT_MPSAFE
   5064 #endif
   5065 	    , wm_handle_queue, wmq);
   5066 	if (wmq->wmq_si != NULL)
   5067 		return 0;
   5068 
   5069 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5070 	    wmq->wmq_id);
   5071 
   5072 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5073 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5074 	return ENOMEM;
   5075 }
   5076 
   5077 /*
   5078  * Both single interrupt MSI and INTx can use this function.
   5079  */
   5080 static int
   5081 wm_setup_legacy(struct wm_softc *sc)
   5082 {
   5083 	pci_chipset_tag_t pc = sc->sc_pc;
   5084 	const char *intrstr = NULL;
   5085 	char intrbuf[PCI_INTRSTR_LEN];
   5086 	int error;
   5087 
   5088 	error = wm_alloc_txrx_queues(sc);
   5089 	if (error) {
   5090 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5091 		    error);
   5092 		return ENOMEM;
   5093 	}
   5094 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5095 	    sizeof(intrbuf));
   5096 #ifdef WM_MPSAFE
   5097 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5098 #endif
   5099 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5100 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5101 	if (sc->sc_ihs[0] == NULL) {
   5102 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5103 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5104 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5105 		return ENOMEM;
   5106 	}
   5107 
   5108 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5109 	sc->sc_nintrs = 1;
   5110 
   5111 	return wm_softint_establish(sc, 0, 0);
   5112 }
   5113 
   5114 static int
   5115 wm_setup_msix(struct wm_softc *sc)
   5116 {
   5117 	void *vih;
   5118 	kcpuset_t *affinity;
   5119 	int qidx, error, intr_idx, txrx_established;
   5120 	pci_chipset_tag_t pc = sc->sc_pc;
   5121 	const char *intrstr = NULL;
   5122 	char intrbuf[PCI_INTRSTR_LEN];
   5123 	char intr_xname[INTRDEVNAMEBUF];
   5124 
   5125 	if (sc->sc_nqueues < ncpu) {
   5126 		/*
   5127 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5128 		 * interrupts start from CPU#1.
   5129 		 */
   5130 		sc->sc_affinity_offset = 1;
   5131 	} else {
   5132 		/*
   5133 		 * In this case, this device use all CPUs. So, we unify
   5134 		 * affinitied cpu_index to msix vector number for readability.
   5135 		 */
   5136 		sc->sc_affinity_offset = 0;
   5137 	}
   5138 
   5139 	error = wm_alloc_txrx_queues(sc);
   5140 	if (error) {
   5141 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5142 		    error);
   5143 		return ENOMEM;
   5144 	}
   5145 
   5146 	kcpuset_create(&affinity, false);
   5147 	intr_idx = 0;
   5148 
   5149 	/*
   5150 	 * TX and RX
   5151 	 */
   5152 	txrx_established = 0;
   5153 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5154 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5155 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5156 
   5157 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5158 		    sizeof(intrbuf));
   5159 #ifdef WM_MPSAFE
   5160 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5161 		    PCI_INTR_MPSAFE, true);
   5162 #endif
   5163 		memset(intr_xname, 0, sizeof(intr_xname));
   5164 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5165 		    device_xname(sc->sc_dev), qidx);
   5166 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5167 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5168 		if (vih == NULL) {
   5169 			aprint_error_dev(sc->sc_dev,
   5170 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5171 			    intrstr ? " at " : "",
   5172 			    intrstr ? intrstr : "");
   5173 
   5174 			goto fail;
   5175 		}
   5176 		kcpuset_zero(affinity);
   5177 		/* Round-robin affinity */
   5178 		kcpuset_set(affinity, affinity_to);
   5179 		error = interrupt_distribute(vih, affinity, NULL);
   5180 		if (error == 0) {
   5181 			aprint_normal_dev(sc->sc_dev,
   5182 			    "for TX and RX interrupting at %s affinity to %u\n",
   5183 			    intrstr, affinity_to);
   5184 		} else {
   5185 			aprint_normal_dev(sc->sc_dev,
   5186 			    "for TX and RX interrupting at %s\n", intrstr);
   5187 		}
   5188 		sc->sc_ihs[intr_idx] = vih;
   5189 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5190 			goto fail;
   5191 		txrx_established++;
   5192 		intr_idx++;
   5193 	}
   5194 
   5195 	/*
   5196 	 * LINK
   5197 	 */
   5198 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5199 	    sizeof(intrbuf));
   5200 #ifdef WM_MPSAFE
   5201 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5202 #endif
   5203 	memset(intr_xname, 0, sizeof(intr_xname));
   5204 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5205 	    device_xname(sc->sc_dev));
   5206 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5207 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5208 	if (vih == NULL) {
   5209 		aprint_error_dev(sc->sc_dev,
   5210 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5211 		    intrstr ? " at " : "",
   5212 		    intrstr ? intrstr : "");
   5213 
   5214 		goto fail;
   5215 	}
   5216 	/* keep default affinity to LINK interrupt */
   5217 	aprint_normal_dev(sc->sc_dev,
   5218 	    "for LINK interrupting at %s\n", intrstr);
   5219 	sc->sc_ihs[intr_idx] = vih;
   5220 	sc->sc_link_intr_idx = intr_idx;
   5221 
   5222 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5223 	kcpuset_destroy(affinity);
   5224 	return 0;
   5225 
   5226  fail:
   5227 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5228 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5229 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5230 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5231 	}
   5232 
   5233 	kcpuset_destroy(affinity);
   5234 	return ENOMEM;
   5235 }
   5236 
   5237 static void
   5238 wm_unset_stopping_flags(struct wm_softc *sc)
   5239 {
   5240 	int i;
   5241 
   5242 	KASSERT(WM_CORE_LOCKED(sc));
   5243 
   5244 	/*
   5245 	 * must unset stopping flags in ascending order.
   5246 	 */
   5247 	for (i = 0; i < sc->sc_nqueues; i++) {
   5248 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5249 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5250 
   5251 		mutex_enter(txq->txq_lock);
   5252 		txq->txq_stopping = false;
   5253 		mutex_exit(txq->txq_lock);
   5254 
   5255 		mutex_enter(rxq->rxq_lock);
   5256 		rxq->rxq_stopping = false;
   5257 		mutex_exit(rxq->rxq_lock);
   5258 	}
   5259 
   5260 	sc->sc_core_stopping = false;
   5261 }
   5262 
   5263 static void
   5264 wm_set_stopping_flags(struct wm_softc *sc)
   5265 {
   5266 	int i;
   5267 
   5268 	KASSERT(WM_CORE_LOCKED(sc));
   5269 
   5270 	sc->sc_core_stopping = true;
   5271 
   5272 	/*
   5273 	 * must set stopping flags in ascending order.
   5274 	 */
   5275 	for (i = 0; i < sc->sc_nqueues; i++) {
   5276 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5277 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5278 
   5279 		mutex_enter(rxq->rxq_lock);
   5280 		rxq->rxq_stopping = true;
   5281 		mutex_exit(rxq->rxq_lock);
   5282 
   5283 		mutex_enter(txq->txq_lock);
   5284 		txq->txq_stopping = true;
   5285 		mutex_exit(txq->txq_lock);
   5286 	}
   5287 }
   5288 
   5289 /*
   5290  * write interrupt interval value to ITR or EITR
   5291  */
   5292 static void
   5293 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5294 {
   5295 
   5296 	if (!wmq->wmq_set_itr)
   5297 		return;
   5298 
   5299 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5300 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5301 
   5302 		/*
   5303 		 * 82575 doesn't have CNT_INGR field.
   5304 		 * So, overwrite counter field by software.
   5305 		 */
   5306 		if (sc->sc_type == WM_T_82575)
   5307 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5308 		else
   5309 			eitr |= EITR_CNT_INGR;
   5310 
   5311 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5312 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5313 		/*
   5314 		 * 82574 has both ITR and EITR. SET EITR when we use
   5315 		 * the multi queue function with MSI-X.
   5316 		 */
   5317 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5318 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5319 	} else {
   5320 		KASSERT(wmq->wmq_id == 0);
   5321 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5322 	}
   5323 
   5324 	wmq->wmq_set_itr = false;
   5325 }
   5326 
   5327 /*
   5328  * TODO
   5329  * Below dynamic calculation of itr is almost the same as linux igb,
   5330  * however it does not fit to wm(4). So, we will have been disable AIM
   5331  * until we will find appropriate calculation of itr.
   5332  */
   5333 /*
   5334  * calculate interrupt interval value to be going to write register in
   5335  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5336  */
   5337 static void
   5338 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5339 {
   5340 #ifdef NOTYET
   5341 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5342 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5343 	uint32_t avg_size = 0;
   5344 	uint32_t new_itr;
   5345 
   5346 	if (rxq->rxq_packets)
   5347 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5348 	if (txq->txq_packets)
   5349 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5350 
   5351 	if (avg_size == 0) {
   5352 		new_itr = 450; /* restore default value */
   5353 		goto out;
   5354 	}
   5355 
   5356 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5357 	avg_size += 24;
   5358 
   5359 	/* Don't starve jumbo frames */
   5360 	avg_size = uimin(avg_size, 3000);
   5361 
   5362 	/* Give a little boost to mid-size frames */
   5363 	if ((avg_size > 300) && (avg_size < 1200))
   5364 		new_itr = avg_size / 3;
   5365 	else
   5366 		new_itr = avg_size / 2;
   5367 
   5368 out:
   5369 	/*
   5370 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5371 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5372 	 */
   5373 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5374 		new_itr *= 4;
   5375 
   5376 	if (new_itr != wmq->wmq_itr) {
   5377 		wmq->wmq_itr = new_itr;
   5378 		wmq->wmq_set_itr = true;
   5379 	} else
   5380 		wmq->wmq_set_itr = false;
   5381 
   5382 	rxq->rxq_packets = 0;
   5383 	rxq->rxq_bytes = 0;
   5384 	txq->txq_packets = 0;
   5385 	txq->txq_bytes = 0;
   5386 #endif
   5387 }
   5388 
   5389 /*
   5390  * wm_init:		[ifnet interface function]
   5391  *
   5392  *	Initialize the interface.
   5393  */
   5394 static int
   5395 wm_init(struct ifnet *ifp)
   5396 {
   5397 	struct wm_softc *sc = ifp->if_softc;
   5398 	int ret;
   5399 
   5400 	WM_CORE_LOCK(sc);
   5401 	ret = wm_init_locked(ifp);
   5402 	WM_CORE_UNLOCK(sc);
   5403 
   5404 	return ret;
   5405 }
   5406 
   5407 static int
   5408 wm_init_locked(struct ifnet *ifp)
   5409 {
   5410 	struct wm_softc *sc = ifp->if_softc;
   5411 	int i, j, trynum, error = 0;
   5412 	uint32_t reg;
   5413 
   5414 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5415 		device_xname(sc->sc_dev), __func__));
   5416 	KASSERT(WM_CORE_LOCKED(sc));
   5417 
   5418 	/*
   5419 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5420 	 * There is a small but measurable benefit to avoiding the adjusment
   5421 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5422 	 * on such platforms.  One possibility is that the DMA itself is
   5423 	 * slightly more efficient if the front of the entire packet (instead
   5424 	 * of the front of the headers) is aligned.
   5425 	 *
   5426 	 * Note we must always set align_tweak to 0 if we are using
   5427 	 * jumbo frames.
   5428 	 */
   5429 #ifdef __NO_STRICT_ALIGNMENT
   5430 	sc->sc_align_tweak = 0;
   5431 #else
   5432 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5433 		sc->sc_align_tweak = 0;
   5434 	else
   5435 		sc->sc_align_tweak = 2;
   5436 #endif /* __NO_STRICT_ALIGNMENT */
   5437 
   5438 	/* Cancel any pending I/O. */
   5439 	wm_stop_locked(ifp, 0);
   5440 
   5441 	/* update statistics before reset */
   5442 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5443 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5444 
   5445 	/* PCH_SPT hardware workaround */
   5446 	if (sc->sc_type == WM_T_PCH_SPT)
   5447 		wm_flush_desc_rings(sc);
   5448 
   5449 	/* Reset the chip to a known state. */
   5450 	wm_reset(sc);
   5451 
   5452 	/*
   5453 	 * AMT based hardware can now take control from firmware
   5454 	 * Do this after reset.
   5455 	 */
   5456 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5457 		wm_get_hw_control(sc);
   5458 
   5459 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5460 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5461 		wm_legacy_irq_quirk_spt(sc);
   5462 
   5463 	/* Init hardware bits */
   5464 	wm_initialize_hardware_bits(sc);
   5465 
   5466 	/* Reset the PHY. */
   5467 	if (sc->sc_flags & WM_F_HAS_MII)
   5468 		wm_gmii_reset(sc);
   5469 
   5470 	/* Calculate (E)ITR value */
   5471 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5472 		/*
   5473 		 * For NEWQUEUE's EITR (except for 82575).
   5474 		 * 82575's EITR should be set same throttling value as other
   5475 		 * old controllers' ITR because the interrupt/sec calculation
   5476 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5477 		 *
   5478 		 * 82574's EITR should be set same throttling value as ITR.
   5479 		 *
   5480 		 * For N interrupts/sec, set this value to:
   5481 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5482 		 */
   5483 		sc->sc_itr_init = 450;
   5484 	} else if (sc->sc_type >= WM_T_82543) {
   5485 		/*
   5486 		 * Set up the interrupt throttling register (units of 256ns)
   5487 		 * Note that a footnote in Intel's documentation says this
   5488 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5489 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5490 		 * that that is also true for the 1024ns units of the other
   5491 		 * interrupt-related timer registers -- so, really, we ought
   5492 		 * to divide this value by 4 when the link speed is low.
   5493 		 *
   5494 		 * XXX implement this division at link speed change!
   5495 		 */
   5496 
   5497 		/*
   5498 		 * For N interrupts/sec, set this value to:
   5499 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5500 		 * absolute and packet timer values to this value
   5501 		 * divided by 4 to get "simple timer" behavior.
   5502 		 */
   5503 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5504 	}
   5505 
   5506 	error = wm_init_txrx_queues(sc);
   5507 	if (error)
   5508 		goto out;
   5509 
   5510 	/*
   5511 	 * Clear out the VLAN table -- we don't use it (yet).
   5512 	 */
   5513 	CSR_WRITE(sc, WMREG_VET, 0);
   5514 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5515 		trynum = 10; /* Due to hw errata */
   5516 	else
   5517 		trynum = 1;
   5518 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5519 		for (j = 0; j < trynum; j++)
   5520 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5521 
   5522 	/*
   5523 	 * Set up flow-control parameters.
   5524 	 *
   5525 	 * XXX Values could probably stand some tuning.
   5526 	 */
   5527 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5528 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5529 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5530 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5531 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5532 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5533 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5534 	}
   5535 
   5536 	sc->sc_fcrtl = FCRTL_DFLT;
   5537 	if (sc->sc_type < WM_T_82543) {
   5538 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5539 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5540 	} else {
   5541 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5542 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5543 	}
   5544 
   5545 	if (sc->sc_type == WM_T_80003)
   5546 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5547 	else
   5548 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5549 
   5550 	/* Writes the control register. */
   5551 	wm_set_vlan(sc);
   5552 
   5553 	if (sc->sc_flags & WM_F_HAS_MII) {
   5554 		uint16_t kmreg;
   5555 
   5556 		switch (sc->sc_type) {
   5557 		case WM_T_80003:
   5558 		case WM_T_ICH8:
   5559 		case WM_T_ICH9:
   5560 		case WM_T_ICH10:
   5561 		case WM_T_PCH:
   5562 		case WM_T_PCH2:
   5563 		case WM_T_PCH_LPT:
   5564 		case WM_T_PCH_SPT:
   5565 		case WM_T_PCH_CNP:
   5566 			/*
   5567 			 * Set the mac to wait the maximum time between each
   5568 			 * iteration and increase the max iterations when
   5569 			 * polling the phy; this fixes erroneous timeouts at
   5570 			 * 10Mbps.
   5571 			 */
   5572 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5573 			    0xFFFF);
   5574 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5575 			    &kmreg);
   5576 			kmreg |= 0x3F;
   5577 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5578 			    kmreg);
   5579 			break;
   5580 		default:
   5581 			break;
   5582 		}
   5583 
   5584 		if (sc->sc_type == WM_T_80003) {
   5585 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5586 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5587 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5588 
   5589 			/* Bypass RX and TX FIFO's */
   5590 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5591 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5592 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5593 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5594 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5595 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5596 		}
   5597 	}
   5598 #if 0
   5599 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5600 #endif
   5601 
   5602 	/* Set up checksum offload parameters. */
   5603 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5604 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5605 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5606 		reg |= RXCSUM_IPOFL;
   5607 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5608 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5609 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5610 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5611 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5612 
   5613 	/* Set registers about MSI-X */
   5614 	if (wm_is_using_msix(sc)) {
   5615 		uint32_t ivar;
   5616 		struct wm_queue *wmq;
   5617 		int qid, qintr_idx;
   5618 
   5619 		if (sc->sc_type == WM_T_82575) {
   5620 			/* Interrupt control */
   5621 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5622 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5623 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5624 
   5625 			/* TX and RX */
   5626 			for (i = 0; i < sc->sc_nqueues; i++) {
   5627 				wmq = &sc->sc_queue[i];
   5628 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5629 				    EITR_TX_QUEUE(wmq->wmq_id)
   5630 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5631 			}
   5632 			/* Link status */
   5633 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5634 			    EITR_OTHER);
   5635 		} else if (sc->sc_type == WM_T_82574) {
   5636 			/* Interrupt control */
   5637 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5638 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5639 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5640 
   5641 			/*
   5642 			 * workaround issue with spurious interrupts
   5643 			 * in MSI-X mode.
   5644 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5645 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5646 			 */
   5647 			reg = CSR_READ(sc, WMREG_RFCTL);
   5648 			reg |= WMREG_RFCTL_ACKDIS;
   5649 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5650 
   5651 			ivar = 0;
   5652 			/* TX and RX */
   5653 			for (i = 0; i < sc->sc_nqueues; i++) {
   5654 				wmq = &sc->sc_queue[i];
   5655 				qid = wmq->wmq_id;
   5656 				qintr_idx = wmq->wmq_intr_idx;
   5657 
   5658 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5659 				    IVAR_TX_MASK_Q_82574(qid));
   5660 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5661 				    IVAR_RX_MASK_Q_82574(qid));
   5662 			}
   5663 			/* Link status */
   5664 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5665 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5666 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5667 		} else {
   5668 			/* Interrupt control */
   5669 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5670 			    | GPIE_EIAME | GPIE_PBA);
   5671 
   5672 			switch (sc->sc_type) {
   5673 			case WM_T_82580:
   5674 			case WM_T_I350:
   5675 			case WM_T_I354:
   5676 			case WM_T_I210:
   5677 			case WM_T_I211:
   5678 				/* TX and RX */
   5679 				for (i = 0; i < sc->sc_nqueues; i++) {
   5680 					wmq = &sc->sc_queue[i];
   5681 					qid = wmq->wmq_id;
   5682 					qintr_idx = wmq->wmq_intr_idx;
   5683 
   5684 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5685 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5686 					ivar |= __SHIFTIN((qintr_idx
   5687 						| IVAR_VALID),
   5688 					    IVAR_TX_MASK_Q(qid));
   5689 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5690 					ivar |= __SHIFTIN((qintr_idx
   5691 						| IVAR_VALID),
   5692 					    IVAR_RX_MASK_Q(qid));
   5693 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5694 				}
   5695 				break;
   5696 			case WM_T_82576:
   5697 				/* TX and RX */
   5698 				for (i = 0; i < sc->sc_nqueues; i++) {
   5699 					wmq = &sc->sc_queue[i];
   5700 					qid = wmq->wmq_id;
   5701 					qintr_idx = wmq->wmq_intr_idx;
   5702 
   5703 					ivar = CSR_READ(sc,
   5704 					    WMREG_IVAR_Q_82576(qid));
   5705 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5706 					ivar |= __SHIFTIN((qintr_idx
   5707 						| IVAR_VALID),
   5708 					    IVAR_TX_MASK_Q_82576(qid));
   5709 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5710 					ivar |= __SHIFTIN((qintr_idx
   5711 						| IVAR_VALID),
   5712 					    IVAR_RX_MASK_Q_82576(qid));
   5713 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5714 					    ivar);
   5715 				}
   5716 				break;
   5717 			default:
   5718 				break;
   5719 			}
   5720 
   5721 			/* Link status */
   5722 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5723 			    IVAR_MISC_OTHER);
   5724 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5725 		}
   5726 
   5727 		if (wm_is_using_multiqueue(sc)) {
   5728 			wm_init_rss(sc);
   5729 
   5730 			/*
   5731 			** NOTE: Receive Full-Packet Checksum Offload
   5732 			** is mutually exclusive with Multiqueue. However
   5733 			** this is not the same as TCP/IP checksums which
   5734 			** still work.
   5735 			*/
   5736 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5737 			reg |= RXCSUM_PCSD;
   5738 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5739 		}
   5740 	}
   5741 
   5742 	/* Set up the interrupt registers. */
   5743 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5744 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5745 	    ICR_RXO | ICR_RXT0;
   5746 	if (wm_is_using_msix(sc)) {
   5747 		uint32_t mask;
   5748 		struct wm_queue *wmq;
   5749 
   5750 		switch (sc->sc_type) {
   5751 		case WM_T_82574:
   5752 			mask = 0;
   5753 			for (i = 0; i < sc->sc_nqueues; i++) {
   5754 				wmq = &sc->sc_queue[i];
   5755 				mask |= ICR_TXQ(wmq->wmq_id);
   5756 				mask |= ICR_RXQ(wmq->wmq_id);
   5757 			}
   5758 			mask |= ICR_OTHER;
   5759 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5760 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5761 			break;
   5762 		default:
   5763 			if (sc->sc_type == WM_T_82575) {
   5764 				mask = 0;
   5765 				for (i = 0; i < sc->sc_nqueues; i++) {
   5766 					wmq = &sc->sc_queue[i];
   5767 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5768 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5769 				}
   5770 				mask |= EITR_OTHER;
   5771 			} else {
   5772 				mask = 0;
   5773 				for (i = 0; i < sc->sc_nqueues; i++) {
   5774 					wmq = &sc->sc_queue[i];
   5775 					mask |= 1 << wmq->wmq_intr_idx;
   5776 				}
   5777 				mask |= 1 << sc->sc_link_intr_idx;
   5778 			}
   5779 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5780 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5781 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5782 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5783 			break;
   5784 		}
   5785 	} else
   5786 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5787 
   5788 	/* Set up the inter-packet gap. */
   5789 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5790 
   5791 	if (sc->sc_type >= WM_T_82543) {
   5792 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5793 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5794 			wm_itrs_writereg(sc, wmq);
   5795 		}
   5796 		/*
   5797 		 * Link interrupts occur much less than TX
   5798 		 * interrupts and RX interrupts. So, we don't
   5799 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5800 		 * FreeBSD's if_igb.
   5801 		 */
   5802 	}
   5803 
   5804 	/* Set the VLAN ethernetype. */
   5805 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5806 
   5807 	/*
   5808 	 * Set up the transmit control register; we start out with
   5809 	 * a collision distance suitable for FDX, but update it whe
   5810 	 * we resolve the media type.
   5811 	 */
   5812 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5813 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5814 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5815 	if (sc->sc_type >= WM_T_82571)
   5816 		sc->sc_tctl |= TCTL_MULR;
   5817 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5818 
   5819 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5820 		/* Write TDT after TCTL.EN is set. See the document. */
   5821 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5822 	}
   5823 
   5824 	if (sc->sc_type == WM_T_80003) {
   5825 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5826 		reg &= ~TCTL_EXT_GCEX_MASK;
   5827 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5828 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5829 	}
   5830 
   5831 	/* Set the media. */
   5832 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5833 		goto out;
   5834 
   5835 	/* Configure for OS presence */
   5836 	wm_init_manageability(sc);
   5837 
   5838 	/*
   5839 	 * Set up the receive control register; we actually program the
   5840 	 * register when we set the receive filter. Use multicast address
   5841 	 * offset type 0.
   5842 	 *
   5843 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   5844 	 * don't enable that feature.
   5845 	 */
   5846 	sc->sc_mchash_type = 0;
   5847 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5848 	    | RCTL_MO(sc->sc_mchash_type);
   5849 
   5850 	/*
   5851 	 * 82574 use one buffer extended Rx descriptor.
   5852 	 */
   5853 	if (sc->sc_type == WM_T_82574)
   5854 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5855 
   5856 	/*
   5857 	 * The I350 has a bug where it always strips the CRC whether
   5858 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5859 	 */
   5860 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5861 	    || (sc->sc_type == WM_T_I210))
   5862 		sc->sc_rctl |= RCTL_SECRC;
   5863 
   5864 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5865 	    && (ifp->if_mtu > ETHERMTU)) {
   5866 		sc->sc_rctl |= RCTL_LPE;
   5867 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5868 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5869 	}
   5870 
   5871 	if (MCLBYTES == 2048) {
   5872 		sc->sc_rctl |= RCTL_2k;
   5873 	} else {
   5874 		if (sc->sc_type >= WM_T_82543) {
   5875 			switch (MCLBYTES) {
   5876 			case 4096:
   5877 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5878 				break;
   5879 			case 8192:
   5880 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5881 				break;
   5882 			case 16384:
   5883 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5884 				break;
   5885 			default:
   5886 				panic("wm_init: MCLBYTES %d unsupported",
   5887 				    MCLBYTES);
   5888 				break;
   5889 			}
   5890 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5891 	}
   5892 
   5893 	/* Enable ECC */
   5894 	switch (sc->sc_type) {
   5895 	case WM_T_82571:
   5896 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5897 		reg |= PBA_ECC_CORR_EN;
   5898 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5899 		break;
   5900 	case WM_T_PCH_LPT:
   5901 	case WM_T_PCH_SPT:
   5902 	case WM_T_PCH_CNP:
   5903 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5904 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5905 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5906 
   5907 		sc->sc_ctrl |= CTRL_MEHE;
   5908 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5909 		break;
   5910 	default:
   5911 		break;
   5912 	}
   5913 
   5914 	/*
   5915 	 * Set the receive filter.
   5916 	 *
   5917 	 * For 82575 and 82576, the RX descriptors must be initialized after
   5918 	 * the setting of RCTL.EN in wm_set_filter()
   5919 	 */
   5920 	wm_set_filter(sc);
   5921 
   5922 	/* On 575 and later set RDT only if RX enabled */
   5923 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5924 		int qidx;
   5925 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5926 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5927 			for (i = 0; i < WM_NRXDESC; i++) {
   5928 				mutex_enter(rxq->rxq_lock);
   5929 				wm_init_rxdesc(rxq, i);
   5930 				mutex_exit(rxq->rxq_lock);
   5931 
   5932 			}
   5933 		}
   5934 	}
   5935 
   5936 	wm_unset_stopping_flags(sc);
   5937 
   5938 	/* Start the one second link check clock. */
   5939 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5940 
   5941 	/* ...all done! */
   5942 	ifp->if_flags |= IFF_RUNNING;
   5943 	ifp->if_flags &= ~IFF_OACTIVE;
   5944 
   5945  out:
   5946 	sc->sc_if_flags = ifp->if_flags;
   5947 	if (error)
   5948 		log(LOG_ERR, "%s: interface not running\n",
   5949 		    device_xname(sc->sc_dev));
   5950 	return error;
   5951 }
   5952 
   5953 /*
   5954  * wm_stop:		[ifnet interface function]
   5955  *
   5956  *	Stop transmission on the interface.
   5957  */
   5958 static void
   5959 wm_stop(struct ifnet *ifp, int disable)
   5960 {
   5961 	struct wm_softc *sc = ifp->if_softc;
   5962 
   5963 	WM_CORE_LOCK(sc);
   5964 	wm_stop_locked(ifp, disable);
   5965 	WM_CORE_UNLOCK(sc);
   5966 }
   5967 
   5968 static void
   5969 wm_stop_locked(struct ifnet *ifp, int disable)
   5970 {
   5971 	struct wm_softc *sc = ifp->if_softc;
   5972 	struct wm_txsoft *txs;
   5973 	int i, qidx;
   5974 
   5975 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5976 		device_xname(sc->sc_dev), __func__));
   5977 	KASSERT(WM_CORE_LOCKED(sc));
   5978 
   5979 	wm_set_stopping_flags(sc);
   5980 
   5981 	/* Stop the one second clock. */
   5982 	callout_stop(&sc->sc_tick_ch);
   5983 
   5984 	/* Stop the 82547 Tx FIFO stall check timer. */
   5985 	if (sc->sc_type == WM_T_82547)
   5986 		callout_stop(&sc->sc_txfifo_ch);
   5987 
   5988 	if (sc->sc_flags & WM_F_HAS_MII) {
   5989 		/* Down the MII. */
   5990 		mii_down(&sc->sc_mii);
   5991 	} else {
   5992 #if 0
   5993 		/* Should we clear PHY's status properly? */
   5994 		wm_reset(sc);
   5995 #endif
   5996 	}
   5997 
   5998 	/* Stop the transmit and receive processes. */
   5999 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6000 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6001 	sc->sc_rctl &= ~RCTL_EN;
   6002 
   6003 	/*
   6004 	 * Clear the interrupt mask to ensure the device cannot assert its
   6005 	 * interrupt line.
   6006 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6007 	 * service any currently pending or shared interrupt.
   6008 	 */
   6009 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6010 	sc->sc_icr = 0;
   6011 	if (wm_is_using_msix(sc)) {
   6012 		if (sc->sc_type != WM_T_82574) {
   6013 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6014 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6015 		} else
   6016 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6017 	}
   6018 
   6019 	/* Release any queued transmit buffers. */
   6020 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6021 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6022 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6023 		mutex_enter(txq->txq_lock);
   6024 		txq->txq_sending = false; /* ensure watchdog disabled */
   6025 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6026 			txs = &txq->txq_soft[i];
   6027 			if (txs->txs_mbuf != NULL) {
   6028 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6029 				m_freem(txs->txs_mbuf);
   6030 				txs->txs_mbuf = NULL;
   6031 			}
   6032 		}
   6033 		mutex_exit(txq->txq_lock);
   6034 	}
   6035 
   6036 	/* Mark the interface as down and cancel the watchdog timer. */
   6037 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6038 
   6039 	if (disable) {
   6040 		for (i = 0; i < sc->sc_nqueues; i++) {
   6041 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6042 			mutex_enter(rxq->rxq_lock);
   6043 			wm_rxdrain(rxq);
   6044 			mutex_exit(rxq->rxq_lock);
   6045 		}
   6046 	}
   6047 
   6048 #if 0 /* notyet */
   6049 	if (sc->sc_type >= WM_T_82544)
   6050 		CSR_WRITE(sc, WMREG_WUC, 0);
   6051 #endif
   6052 }
   6053 
   6054 static void
   6055 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6056 {
   6057 	struct mbuf *m;
   6058 	int i;
   6059 
   6060 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6061 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6062 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6063 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6064 		    m->m_data, m->m_len, m->m_flags);
   6065 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6066 	    i, i == 1 ? "" : "s");
   6067 }
   6068 
   6069 /*
   6070  * wm_82547_txfifo_stall:
   6071  *
   6072  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6073  *	reset the FIFO pointers, and restart packet transmission.
   6074  */
   6075 static void
   6076 wm_82547_txfifo_stall(void *arg)
   6077 {
   6078 	struct wm_softc *sc = arg;
   6079 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6080 
   6081 	mutex_enter(txq->txq_lock);
   6082 
   6083 	if (txq->txq_stopping)
   6084 		goto out;
   6085 
   6086 	if (txq->txq_fifo_stall) {
   6087 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6088 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6089 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6090 			/*
   6091 			 * Packets have drained.  Stop transmitter, reset
   6092 			 * FIFO pointers, restart transmitter, and kick
   6093 			 * the packet queue.
   6094 			 */
   6095 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6096 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6097 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6098 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6099 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6100 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6101 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6102 			CSR_WRITE_FLUSH(sc);
   6103 
   6104 			txq->txq_fifo_head = 0;
   6105 			txq->txq_fifo_stall = 0;
   6106 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6107 		} else {
   6108 			/*
   6109 			 * Still waiting for packets to drain; try again in
   6110 			 * another tick.
   6111 			 */
   6112 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6113 		}
   6114 	}
   6115 
   6116 out:
   6117 	mutex_exit(txq->txq_lock);
   6118 }
   6119 
   6120 /*
   6121  * wm_82547_txfifo_bugchk:
   6122  *
   6123  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6124  *	prevent enqueueing a packet that would wrap around the end
   6125  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6126  *
   6127  *	We do this by checking the amount of space before the end
   6128  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6129  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6130  *	the internal FIFO pointers to the beginning, and restart
   6131  *	transmission on the interface.
   6132  */
   6133 #define	WM_FIFO_HDR		0x10
   6134 #define	WM_82547_PAD_LEN	0x3e0
   6135 static int
   6136 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6137 {
   6138 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6139 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6140 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6141 
   6142 	/* Just return if already stalled. */
   6143 	if (txq->txq_fifo_stall)
   6144 		return 1;
   6145 
   6146 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6147 		/* Stall only occurs in half-duplex mode. */
   6148 		goto send_packet;
   6149 	}
   6150 
   6151 	if (len >= WM_82547_PAD_LEN + space) {
   6152 		txq->txq_fifo_stall = 1;
   6153 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6154 		return 1;
   6155 	}
   6156 
   6157  send_packet:
   6158 	txq->txq_fifo_head += len;
   6159 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6160 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6161 
   6162 	return 0;
   6163 }
   6164 
   6165 static int
   6166 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6167 {
   6168 	int error;
   6169 
   6170 	/*
   6171 	 * Allocate the control data structures, and create and load the
   6172 	 * DMA map for it.
   6173 	 *
   6174 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6175 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6176 	 * both sets within the same 4G segment.
   6177 	 */
   6178 	if (sc->sc_type < WM_T_82544)
   6179 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6180 	else
   6181 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6182 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6183 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6184 	else
   6185 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6186 
   6187 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6188 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6189 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6190 		aprint_error_dev(sc->sc_dev,
   6191 		    "unable to allocate TX control data, error = %d\n",
   6192 		    error);
   6193 		goto fail_0;
   6194 	}
   6195 
   6196 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6197 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6198 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6199 		aprint_error_dev(sc->sc_dev,
   6200 		    "unable to map TX control data, error = %d\n", error);
   6201 		goto fail_1;
   6202 	}
   6203 
   6204 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6205 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6206 		aprint_error_dev(sc->sc_dev,
   6207 		    "unable to create TX control data DMA map, error = %d\n",
   6208 		    error);
   6209 		goto fail_2;
   6210 	}
   6211 
   6212 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6213 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6214 		aprint_error_dev(sc->sc_dev,
   6215 		    "unable to load TX control data DMA map, error = %d\n",
   6216 		    error);
   6217 		goto fail_3;
   6218 	}
   6219 
   6220 	return 0;
   6221 
   6222  fail_3:
   6223 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6224  fail_2:
   6225 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6226 	    WM_TXDESCS_SIZE(txq));
   6227  fail_1:
   6228 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6229  fail_0:
   6230 	return error;
   6231 }
   6232 
   6233 static void
   6234 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6235 {
   6236 
   6237 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6238 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6239 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6240 	    WM_TXDESCS_SIZE(txq));
   6241 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6242 }
   6243 
   6244 static int
   6245 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6246 {
   6247 	int error;
   6248 	size_t rxq_descs_size;
   6249 
   6250 	/*
   6251 	 * Allocate the control data structures, and create and load the
   6252 	 * DMA map for it.
   6253 	 *
   6254 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6255 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6256 	 * both sets within the same 4G segment.
   6257 	 */
   6258 	rxq->rxq_ndesc = WM_NRXDESC;
   6259 	if (sc->sc_type == WM_T_82574)
   6260 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6261 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6262 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6263 	else
   6264 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6265 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6266 
   6267 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6268 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6269 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6270 		aprint_error_dev(sc->sc_dev,
   6271 		    "unable to allocate RX control data, error = %d\n",
   6272 		    error);
   6273 		goto fail_0;
   6274 	}
   6275 
   6276 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6277 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6278 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6279 		aprint_error_dev(sc->sc_dev,
   6280 		    "unable to map RX control data, error = %d\n", error);
   6281 		goto fail_1;
   6282 	}
   6283 
   6284 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6285 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6286 		aprint_error_dev(sc->sc_dev,
   6287 		    "unable to create RX control data DMA map, error = %d\n",
   6288 		    error);
   6289 		goto fail_2;
   6290 	}
   6291 
   6292 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6293 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6294 		aprint_error_dev(sc->sc_dev,
   6295 		    "unable to load RX control data DMA map, error = %d\n",
   6296 		    error);
   6297 		goto fail_3;
   6298 	}
   6299 
   6300 	return 0;
   6301 
   6302  fail_3:
   6303 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6304  fail_2:
   6305 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6306 	    rxq_descs_size);
   6307  fail_1:
   6308 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6309  fail_0:
   6310 	return error;
   6311 }
   6312 
   6313 static void
   6314 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6315 {
   6316 
   6317 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6318 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6319 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6320 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6321 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6322 }
   6323 
   6324 
   6325 static int
   6326 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6327 {
   6328 	int i, error;
   6329 
   6330 	/* Create the transmit buffer DMA maps. */
   6331 	WM_TXQUEUELEN(txq) =
   6332 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6333 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6334 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6335 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6336 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6337 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6338 			aprint_error_dev(sc->sc_dev,
   6339 			    "unable to create Tx DMA map %d, error = %d\n",
   6340 			    i, error);
   6341 			goto fail;
   6342 		}
   6343 	}
   6344 
   6345 	return 0;
   6346 
   6347  fail:
   6348 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6349 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6350 			bus_dmamap_destroy(sc->sc_dmat,
   6351 			    txq->txq_soft[i].txs_dmamap);
   6352 	}
   6353 	return error;
   6354 }
   6355 
   6356 static void
   6357 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6358 {
   6359 	int i;
   6360 
   6361 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6362 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6363 			bus_dmamap_destroy(sc->sc_dmat,
   6364 			    txq->txq_soft[i].txs_dmamap);
   6365 	}
   6366 }
   6367 
   6368 static int
   6369 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6370 {
   6371 	int i, error;
   6372 
   6373 	/* Create the receive buffer DMA maps. */
   6374 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6375 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6376 			    MCLBYTES, 0, 0,
   6377 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6378 			aprint_error_dev(sc->sc_dev,
   6379 			    "unable to create Rx DMA map %d error = %d\n",
   6380 			    i, error);
   6381 			goto fail;
   6382 		}
   6383 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6384 	}
   6385 
   6386 	return 0;
   6387 
   6388  fail:
   6389 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6390 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6391 			bus_dmamap_destroy(sc->sc_dmat,
   6392 			    rxq->rxq_soft[i].rxs_dmamap);
   6393 	}
   6394 	return error;
   6395 }
   6396 
   6397 static void
   6398 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6399 {
   6400 	int i;
   6401 
   6402 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6403 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6404 			bus_dmamap_destroy(sc->sc_dmat,
   6405 			    rxq->rxq_soft[i].rxs_dmamap);
   6406 	}
   6407 }
   6408 
   6409 /*
   6410  * wm_alloc_quques:
   6411  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6412  */
   6413 static int
   6414 wm_alloc_txrx_queues(struct wm_softc *sc)
   6415 {
   6416 	int i, error, tx_done, rx_done;
   6417 
   6418 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6419 	    KM_SLEEP);
   6420 	if (sc->sc_queue == NULL) {
   6421 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6422 		error = ENOMEM;
   6423 		goto fail_0;
   6424 	}
   6425 
   6426 	/*
   6427 	 * For transmission
   6428 	 */
   6429 	error = 0;
   6430 	tx_done = 0;
   6431 	for (i = 0; i < sc->sc_nqueues; i++) {
   6432 #ifdef WM_EVENT_COUNTERS
   6433 		int j;
   6434 		const char *xname;
   6435 #endif
   6436 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6437 		txq->txq_sc = sc;
   6438 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6439 
   6440 		error = wm_alloc_tx_descs(sc, txq);
   6441 		if (error)
   6442 			break;
   6443 		error = wm_alloc_tx_buffer(sc, txq);
   6444 		if (error) {
   6445 			wm_free_tx_descs(sc, txq);
   6446 			break;
   6447 		}
   6448 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6449 		if (txq->txq_interq == NULL) {
   6450 			wm_free_tx_descs(sc, txq);
   6451 			wm_free_tx_buffer(sc, txq);
   6452 			error = ENOMEM;
   6453 			break;
   6454 		}
   6455 
   6456 #ifdef WM_EVENT_COUNTERS
   6457 		xname = device_xname(sc->sc_dev);
   6458 
   6459 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6460 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6461 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6462 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6463 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6464 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6465 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6466 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6467 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6468 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6469 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6470 
   6471 		for (j = 0; j < WM_NTXSEGS; j++) {
   6472 			snprintf(txq->txq_txseg_evcnt_names[j],
   6473 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6474 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6475 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6476 		}
   6477 
   6478 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6479 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6480 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6481 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6482 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6483 #endif /* WM_EVENT_COUNTERS */
   6484 
   6485 		tx_done++;
   6486 	}
   6487 	if (error)
   6488 		goto fail_1;
   6489 
   6490 	/*
   6491 	 * For recieve
   6492 	 */
   6493 	error = 0;
   6494 	rx_done = 0;
   6495 	for (i = 0; i < sc->sc_nqueues; i++) {
   6496 #ifdef WM_EVENT_COUNTERS
   6497 		const char *xname;
   6498 #endif
   6499 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6500 		rxq->rxq_sc = sc;
   6501 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6502 
   6503 		error = wm_alloc_rx_descs(sc, rxq);
   6504 		if (error)
   6505 			break;
   6506 
   6507 		error = wm_alloc_rx_buffer(sc, rxq);
   6508 		if (error) {
   6509 			wm_free_rx_descs(sc, rxq);
   6510 			break;
   6511 		}
   6512 
   6513 #ifdef WM_EVENT_COUNTERS
   6514 		xname = device_xname(sc->sc_dev);
   6515 
   6516 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6517 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6518 
   6519 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6520 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6521 #endif /* WM_EVENT_COUNTERS */
   6522 
   6523 		rx_done++;
   6524 	}
   6525 	if (error)
   6526 		goto fail_2;
   6527 
   6528 	return 0;
   6529 
   6530  fail_2:
   6531 	for (i = 0; i < rx_done; i++) {
   6532 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6533 		wm_free_rx_buffer(sc, rxq);
   6534 		wm_free_rx_descs(sc, rxq);
   6535 		if (rxq->rxq_lock)
   6536 			mutex_obj_free(rxq->rxq_lock);
   6537 	}
   6538  fail_1:
   6539 	for (i = 0; i < tx_done; i++) {
   6540 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6541 		pcq_destroy(txq->txq_interq);
   6542 		wm_free_tx_buffer(sc, txq);
   6543 		wm_free_tx_descs(sc, txq);
   6544 		if (txq->txq_lock)
   6545 			mutex_obj_free(txq->txq_lock);
   6546 	}
   6547 
   6548 	kmem_free(sc->sc_queue,
   6549 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6550  fail_0:
   6551 	return error;
   6552 }
   6553 
   6554 /*
   6555  * wm_free_quques:
   6556  *	Free {tx,rx}descs and {tx,rx} buffers
   6557  */
   6558 static void
   6559 wm_free_txrx_queues(struct wm_softc *sc)
   6560 {
   6561 	int i;
   6562 
   6563 	for (i = 0; i < sc->sc_nqueues; i++) {
   6564 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6565 
   6566 #ifdef WM_EVENT_COUNTERS
   6567 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6568 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6569 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6570 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6571 #endif /* WM_EVENT_COUNTERS */
   6572 
   6573 		wm_free_rx_buffer(sc, rxq);
   6574 		wm_free_rx_descs(sc, rxq);
   6575 		if (rxq->rxq_lock)
   6576 			mutex_obj_free(rxq->rxq_lock);
   6577 	}
   6578 
   6579 	for (i = 0; i < sc->sc_nqueues; i++) {
   6580 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6581 		struct mbuf *m;
   6582 #ifdef WM_EVENT_COUNTERS
   6583 		int j;
   6584 
   6585 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6586 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6587 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6588 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6589 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6590 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6591 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6592 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6593 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6594 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6595 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6596 
   6597 		for (j = 0; j < WM_NTXSEGS; j++)
   6598 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6599 
   6600 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6601 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6602 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6603 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6604 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6605 #endif /* WM_EVENT_COUNTERS */
   6606 
   6607 		/* drain txq_interq */
   6608 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6609 			m_freem(m);
   6610 		pcq_destroy(txq->txq_interq);
   6611 
   6612 		wm_free_tx_buffer(sc, txq);
   6613 		wm_free_tx_descs(sc, txq);
   6614 		if (txq->txq_lock)
   6615 			mutex_obj_free(txq->txq_lock);
   6616 	}
   6617 
   6618 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6619 }
   6620 
   6621 static void
   6622 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6623 {
   6624 
   6625 	KASSERT(mutex_owned(txq->txq_lock));
   6626 
   6627 	/* Initialize the transmit descriptor ring. */
   6628 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6629 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6630 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6631 	txq->txq_free = WM_NTXDESC(txq);
   6632 	txq->txq_next = 0;
   6633 }
   6634 
   6635 static void
   6636 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6637     struct wm_txqueue *txq)
   6638 {
   6639 
   6640 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6641 		device_xname(sc->sc_dev), __func__));
   6642 	KASSERT(mutex_owned(txq->txq_lock));
   6643 
   6644 	if (sc->sc_type < WM_T_82543) {
   6645 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6646 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6647 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6648 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6649 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6650 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6651 	} else {
   6652 		int qid = wmq->wmq_id;
   6653 
   6654 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6655 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6656 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6657 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6658 
   6659 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6660 			/*
   6661 			 * Don't write TDT before TCTL.EN is set.
   6662 			 * See the document.
   6663 			 */
   6664 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6665 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6666 			    | TXDCTL_WTHRESH(0));
   6667 		else {
   6668 			/* XXX should update with AIM? */
   6669 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6670 			if (sc->sc_type >= WM_T_82540) {
   6671 				/* should be same */
   6672 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6673 			}
   6674 
   6675 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6676 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6677 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6678 		}
   6679 	}
   6680 }
   6681 
   6682 static void
   6683 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6684 {
   6685 	int i;
   6686 
   6687 	KASSERT(mutex_owned(txq->txq_lock));
   6688 
   6689 	/* Initialize the transmit job descriptors. */
   6690 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6691 		txq->txq_soft[i].txs_mbuf = NULL;
   6692 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6693 	txq->txq_snext = 0;
   6694 	txq->txq_sdirty = 0;
   6695 }
   6696 
   6697 static void
   6698 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6699     struct wm_txqueue *txq)
   6700 {
   6701 
   6702 	KASSERT(mutex_owned(txq->txq_lock));
   6703 
   6704 	/*
   6705 	 * Set up some register offsets that are different between
   6706 	 * the i82542 and the i82543 and later chips.
   6707 	 */
   6708 	if (sc->sc_type < WM_T_82543)
   6709 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6710 	else
   6711 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6712 
   6713 	wm_init_tx_descs(sc, txq);
   6714 	wm_init_tx_regs(sc, wmq, txq);
   6715 	wm_init_tx_buffer(sc, txq);
   6716 
   6717 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   6718 	txq->txq_sending = false;
   6719 }
   6720 
   6721 static void
   6722 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6723     struct wm_rxqueue *rxq)
   6724 {
   6725 
   6726 	KASSERT(mutex_owned(rxq->rxq_lock));
   6727 
   6728 	/*
   6729 	 * Initialize the receive descriptor and receive job
   6730 	 * descriptor rings.
   6731 	 */
   6732 	if (sc->sc_type < WM_T_82543) {
   6733 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6734 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6735 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6736 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6737 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6738 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6739 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6740 
   6741 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6742 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6743 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6744 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6745 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6746 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6747 	} else {
   6748 		int qid = wmq->wmq_id;
   6749 
   6750 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6751 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6752 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   6753 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6754 
   6755 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6756 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6757 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6758 
   6759 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6760 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6761 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6762 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6763 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6764 			    | RXDCTL_WTHRESH(1));
   6765 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6766 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6767 		} else {
   6768 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6769 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6770 			/* XXX should update with AIM? */
   6771 			CSR_WRITE(sc, WMREG_RDTR,
   6772 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   6773 			/* MUST be same */
   6774 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6775 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6776 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6777 		}
   6778 	}
   6779 }
   6780 
   6781 static int
   6782 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6783 {
   6784 	struct wm_rxsoft *rxs;
   6785 	int error, i;
   6786 
   6787 	KASSERT(mutex_owned(rxq->rxq_lock));
   6788 
   6789 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6790 		rxs = &rxq->rxq_soft[i];
   6791 		if (rxs->rxs_mbuf == NULL) {
   6792 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6793 				log(LOG_ERR, "%s: unable to allocate or map "
   6794 				    "rx buffer %d, error = %d\n",
   6795 				    device_xname(sc->sc_dev), i, error);
   6796 				/*
   6797 				 * XXX Should attempt to run with fewer receive
   6798 				 * XXX buffers instead of just failing.
   6799 				 */
   6800 				wm_rxdrain(rxq);
   6801 				return ENOMEM;
   6802 			}
   6803 		} else {
   6804 			/*
   6805 			 * For 82575 and 82576, the RX descriptors must be
   6806 			 * initialized after the setting of RCTL.EN in
   6807 			 * wm_set_filter()
   6808 			 */
   6809 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6810 				wm_init_rxdesc(rxq, i);
   6811 		}
   6812 	}
   6813 	rxq->rxq_ptr = 0;
   6814 	rxq->rxq_discard = 0;
   6815 	WM_RXCHAIN_RESET(rxq);
   6816 
   6817 	return 0;
   6818 }
   6819 
   6820 static int
   6821 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6822     struct wm_rxqueue *rxq)
   6823 {
   6824 
   6825 	KASSERT(mutex_owned(rxq->rxq_lock));
   6826 
   6827 	/*
   6828 	 * Set up some register offsets that are different between
   6829 	 * the i82542 and the i82543 and later chips.
   6830 	 */
   6831 	if (sc->sc_type < WM_T_82543)
   6832 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6833 	else
   6834 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6835 
   6836 	wm_init_rx_regs(sc, wmq, rxq);
   6837 	return wm_init_rx_buffer(sc, rxq);
   6838 }
   6839 
   6840 /*
   6841  * wm_init_quques:
   6842  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6843  */
   6844 static int
   6845 wm_init_txrx_queues(struct wm_softc *sc)
   6846 {
   6847 	int i, error = 0;
   6848 
   6849 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6850 		device_xname(sc->sc_dev), __func__));
   6851 
   6852 	for (i = 0; i < sc->sc_nqueues; i++) {
   6853 		struct wm_queue *wmq = &sc->sc_queue[i];
   6854 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6855 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6856 
   6857 		/*
   6858 		 * TODO
   6859 		 * Currently, use constant variable instead of AIM.
   6860 		 * Furthermore, the interrupt interval of multiqueue which use
   6861 		 * polling mode is less than default value.
   6862 		 * More tuning and AIM are required.
   6863 		 */
   6864 		if (wm_is_using_multiqueue(sc))
   6865 			wmq->wmq_itr = 50;
   6866 		else
   6867 			wmq->wmq_itr = sc->sc_itr_init;
   6868 		wmq->wmq_set_itr = true;
   6869 
   6870 		mutex_enter(txq->txq_lock);
   6871 		wm_init_tx_queue(sc, wmq, txq);
   6872 		mutex_exit(txq->txq_lock);
   6873 
   6874 		mutex_enter(rxq->rxq_lock);
   6875 		error = wm_init_rx_queue(sc, wmq, rxq);
   6876 		mutex_exit(rxq->rxq_lock);
   6877 		if (error)
   6878 			break;
   6879 	}
   6880 
   6881 	return error;
   6882 }
   6883 
   6884 /*
   6885  * wm_tx_offload:
   6886  *
   6887  *	Set up TCP/IP checksumming parameters for the
   6888  *	specified packet.
   6889  */
   6890 static int
   6891 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6892     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6893 {
   6894 	struct mbuf *m0 = txs->txs_mbuf;
   6895 	struct livengood_tcpip_ctxdesc *t;
   6896 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6897 	uint32_t ipcse;
   6898 	struct ether_header *eh;
   6899 	int offset, iphl;
   6900 	uint8_t fields;
   6901 
   6902 	/*
   6903 	 * XXX It would be nice if the mbuf pkthdr had offset
   6904 	 * fields for the protocol headers.
   6905 	 */
   6906 
   6907 	eh = mtod(m0, struct ether_header *);
   6908 	switch (htons(eh->ether_type)) {
   6909 	case ETHERTYPE_IP:
   6910 	case ETHERTYPE_IPV6:
   6911 		offset = ETHER_HDR_LEN;
   6912 		break;
   6913 
   6914 	case ETHERTYPE_VLAN:
   6915 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6916 		break;
   6917 
   6918 	default:
   6919 		/*
   6920 		 * Don't support this protocol or encapsulation.
   6921 		 */
   6922 		*fieldsp = 0;
   6923 		*cmdp = 0;
   6924 		return 0;
   6925 	}
   6926 
   6927 	if ((m0->m_pkthdr.csum_flags &
   6928 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6929 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6930 	} else {
   6931 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   6932 	}
   6933 	ipcse = offset + iphl - 1;
   6934 
   6935 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6936 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6937 	seg = 0;
   6938 	fields = 0;
   6939 
   6940 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6941 		int hlen = offset + iphl;
   6942 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6943 
   6944 		if (__predict_false(m0->m_len <
   6945 				    (hlen + sizeof(struct tcphdr)))) {
   6946 			/*
   6947 			 * TCP/IP headers are not in the first mbuf; we need
   6948 			 * to do this the slow and painful way. Let's just
   6949 			 * hope this doesn't happen very often.
   6950 			 */
   6951 			struct tcphdr th;
   6952 
   6953 			WM_Q_EVCNT_INCR(txq, tsopain);
   6954 
   6955 			m_copydata(m0, hlen, sizeof(th), &th);
   6956 			if (v4) {
   6957 				struct ip ip;
   6958 
   6959 				m_copydata(m0, offset, sizeof(ip), &ip);
   6960 				ip.ip_len = 0;
   6961 				m_copyback(m0,
   6962 				    offset + offsetof(struct ip, ip_len),
   6963 				    sizeof(ip.ip_len), &ip.ip_len);
   6964 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6965 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6966 			} else {
   6967 				struct ip6_hdr ip6;
   6968 
   6969 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6970 				ip6.ip6_plen = 0;
   6971 				m_copyback(m0,
   6972 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6973 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6974 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6975 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6976 			}
   6977 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6978 			    sizeof(th.th_sum), &th.th_sum);
   6979 
   6980 			hlen += th.th_off << 2;
   6981 		} else {
   6982 			/*
   6983 			 * TCP/IP headers are in the first mbuf; we can do
   6984 			 * this the easy way.
   6985 			 */
   6986 			struct tcphdr *th;
   6987 
   6988 			if (v4) {
   6989 				struct ip *ip =
   6990 				    (void *)(mtod(m0, char *) + offset);
   6991 				th = (void *)(mtod(m0, char *) + hlen);
   6992 
   6993 				ip->ip_len = 0;
   6994 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6995 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6996 			} else {
   6997 				struct ip6_hdr *ip6 =
   6998 				    (void *)(mtod(m0, char *) + offset);
   6999 				th = (void *)(mtod(m0, char *) + hlen);
   7000 
   7001 				ip6->ip6_plen = 0;
   7002 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7003 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7004 			}
   7005 			hlen += th->th_off << 2;
   7006 		}
   7007 
   7008 		if (v4) {
   7009 			WM_Q_EVCNT_INCR(txq, tso);
   7010 			cmdlen |= WTX_TCPIP_CMD_IP;
   7011 		} else {
   7012 			WM_Q_EVCNT_INCR(txq, tso6);
   7013 			ipcse = 0;
   7014 		}
   7015 		cmd |= WTX_TCPIP_CMD_TSE;
   7016 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7017 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7018 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7019 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7020 	}
   7021 
   7022 	/*
   7023 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7024 	 * offload feature, if we load the context descriptor, we
   7025 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7026 	 */
   7027 
   7028 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7029 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7030 	    WTX_TCPIP_IPCSE(ipcse);
   7031 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7032 		WM_Q_EVCNT_INCR(txq, ipsum);
   7033 		fields |= WTX_IXSM;
   7034 	}
   7035 
   7036 	offset += iphl;
   7037 
   7038 	if (m0->m_pkthdr.csum_flags &
   7039 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7040 		WM_Q_EVCNT_INCR(txq, tusum);
   7041 		fields |= WTX_TXSM;
   7042 		tucs = WTX_TCPIP_TUCSS(offset) |
   7043 		    WTX_TCPIP_TUCSO(offset +
   7044 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7045 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7046 	} else if ((m0->m_pkthdr.csum_flags &
   7047 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7048 		WM_Q_EVCNT_INCR(txq, tusum6);
   7049 		fields |= WTX_TXSM;
   7050 		tucs = WTX_TCPIP_TUCSS(offset) |
   7051 		    WTX_TCPIP_TUCSO(offset +
   7052 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7053 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7054 	} else {
   7055 		/* Just initialize it to a valid TCP context. */
   7056 		tucs = WTX_TCPIP_TUCSS(offset) |
   7057 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7058 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7059 	}
   7060 
   7061 	/*
   7062 	 * We don't have to write context descriptor for every packet
   7063 	 * except for 82574. For 82574, we must write context descriptor
   7064 	 * for every packet when we use two descriptor queues.
   7065 	 * It would be overhead to write context descriptor for every packet,
   7066 	 * however it does not cause problems.
   7067 	 */
   7068 	/* Fill in the context descriptor. */
   7069 	t = (struct livengood_tcpip_ctxdesc *)
   7070 	    &txq->txq_descs[txq->txq_next];
   7071 	t->tcpip_ipcs = htole32(ipcs);
   7072 	t->tcpip_tucs = htole32(tucs);
   7073 	t->tcpip_cmdlen = htole32(cmdlen);
   7074 	t->tcpip_seg = htole32(seg);
   7075 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7076 
   7077 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7078 	txs->txs_ndesc++;
   7079 
   7080 	*cmdp = cmd;
   7081 	*fieldsp = fields;
   7082 
   7083 	return 0;
   7084 }
   7085 
   7086 static inline int
   7087 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7088 {
   7089 	struct wm_softc *sc = ifp->if_softc;
   7090 	u_int cpuid = cpu_index(curcpu());
   7091 
   7092 	/*
   7093 	 * Currently, simple distribute strategy.
   7094 	 * TODO:
   7095 	 * distribute by flowid(RSS has value).
   7096 	 */
   7097 	return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   7098 }
   7099 
   7100 /*
   7101  * wm_start:		[ifnet interface function]
   7102  *
   7103  *	Start packet transmission on the interface.
   7104  */
   7105 static void
   7106 wm_start(struct ifnet *ifp)
   7107 {
   7108 	struct wm_softc *sc = ifp->if_softc;
   7109 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7110 
   7111 #ifdef WM_MPSAFE
   7112 	KASSERT(if_is_mpsafe(ifp));
   7113 #endif
   7114 	/*
   7115 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7116 	 */
   7117 
   7118 	mutex_enter(txq->txq_lock);
   7119 	if (!txq->txq_stopping)
   7120 		wm_start_locked(ifp);
   7121 	mutex_exit(txq->txq_lock);
   7122 }
   7123 
   7124 static void
   7125 wm_start_locked(struct ifnet *ifp)
   7126 {
   7127 	struct wm_softc *sc = ifp->if_softc;
   7128 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7129 
   7130 	wm_send_common_locked(ifp, txq, false);
   7131 }
   7132 
   7133 static int
   7134 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7135 {
   7136 	int qid;
   7137 	struct wm_softc *sc = ifp->if_softc;
   7138 	struct wm_txqueue *txq;
   7139 
   7140 	qid = wm_select_txqueue(ifp, m);
   7141 	txq = &sc->sc_queue[qid].wmq_txq;
   7142 
   7143 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7144 		m_freem(m);
   7145 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7146 		return ENOBUFS;
   7147 	}
   7148 
   7149 	/*
   7150 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7151 	 */
   7152 	ifp->if_obytes += m->m_pkthdr.len;
   7153 	if (m->m_flags & M_MCAST)
   7154 		ifp->if_omcasts++;
   7155 
   7156 	if (mutex_tryenter(txq->txq_lock)) {
   7157 		if (!txq->txq_stopping)
   7158 			wm_transmit_locked(ifp, txq);
   7159 		mutex_exit(txq->txq_lock);
   7160 	}
   7161 
   7162 	return 0;
   7163 }
   7164 
   7165 static void
   7166 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7167 {
   7168 
   7169 	wm_send_common_locked(ifp, txq, true);
   7170 }
   7171 
   7172 static void
   7173 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7174     bool is_transmit)
   7175 {
   7176 	struct wm_softc *sc = ifp->if_softc;
   7177 	struct mbuf *m0;
   7178 	struct wm_txsoft *txs;
   7179 	bus_dmamap_t dmamap;
   7180 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7181 	bus_addr_t curaddr;
   7182 	bus_size_t seglen, curlen;
   7183 	uint32_t cksumcmd;
   7184 	uint8_t cksumfields;
   7185 	bool remap = true;
   7186 
   7187 	KASSERT(mutex_owned(txq->txq_lock));
   7188 
   7189 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7190 		return;
   7191 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7192 		return;
   7193 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7194 		return;
   7195 
   7196 	/* Remember the previous number of free descriptors. */
   7197 	ofree = txq->txq_free;
   7198 
   7199 	/*
   7200 	 * Loop through the send queue, setting up transmit descriptors
   7201 	 * until we drain the queue, or use up all available transmit
   7202 	 * descriptors.
   7203 	 */
   7204 	for (;;) {
   7205 		m0 = NULL;
   7206 
   7207 		/* Get a work queue entry. */
   7208 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7209 			wm_txeof(txq, UINT_MAX);
   7210 			if (txq->txq_sfree == 0) {
   7211 				DPRINTF(WM_DEBUG_TX,
   7212 				    ("%s: TX: no free job descriptors\n",
   7213 					device_xname(sc->sc_dev)));
   7214 				WM_Q_EVCNT_INCR(txq, txsstall);
   7215 				break;
   7216 			}
   7217 		}
   7218 
   7219 		/* Grab a packet off the queue. */
   7220 		if (is_transmit)
   7221 			m0 = pcq_get(txq->txq_interq);
   7222 		else
   7223 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7224 		if (m0 == NULL)
   7225 			break;
   7226 
   7227 		DPRINTF(WM_DEBUG_TX,
   7228 		    ("%s: TX: have packet to transmit: %p\n",
   7229 			device_xname(sc->sc_dev), m0));
   7230 
   7231 		txs = &txq->txq_soft[txq->txq_snext];
   7232 		dmamap = txs->txs_dmamap;
   7233 
   7234 		use_tso = (m0->m_pkthdr.csum_flags &
   7235 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7236 
   7237 		/*
   7238 		 * So says the Linux driver:
   7239 		 * The controller does a simple calculation to make sure
   7240 		 * there is enough room in the FIFO before initiating the
   7241 		 * DMA for each buffer. The calc is:
   7242 		 *	4 = ceil(buffer len / MSS)
   7243 		 * To make sure we don't overrun the FIFO, adjust the max
   7244 		 * buffer len if the MSS drops.
   7245 		 */
   7246 		dmamap->dm_maxsegsz =
   7247 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7248 		    ? m0->m_pkthdr.segsz << 2
   7249 		    : WTX_MAX_LEN;
   7250 
   7251 		/*
   7252 		 * Load the DMA map.  If this fails, the packet either
   7253 		 * didn't fit in the allotted number of segments, or we
   7254 		 * were short on resources.  For the too-many-segments
   7255 		 * case, we simply report an error and drop the packet,
   7256 		 * since we can't sanely copy a jumbo packet to a single
   7257 		 * buffer.
   7258 		 */
   7259 retry:
   7260 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7261 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7262 		if (__predict_false(error)) {
   7263 			if (error == EFBIG) {
   7264 				if (remap == true) {
   7265 					struct mbuf *m;
   7266 
   7267 					remap = false;
   7268 					m = m_defrag(m0, M_NOWAIT);
   7269 					if (m != NULL) {
   7270 						WM_Q_EVCNT_INCR(txq, defrag);
   7271 						m0 = m;
   7272 						goto retry;
   7273 					}
   7274 				}
   7275 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7276 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7277 				    "DMA segments, dropping...\n",
   7278 				    device_xname(sc->sc_dev));
   7279 				wm_dump_mbuf_chain(sc, m0);
   7280 				m_freem(m0);
   7281 				continue;
   7282 			}
   7283 			/*  Short on resources, just stop for now. */
   7284 			DPRINTF(WM_DEBUG_TX,
   7285 			    ("%s: TX: dmamap load failed: %d\n",
   7286 				device_xname(sc->sc_dev), error));
   7287 			break;
   7288 		}
   7289 
   7290 		segs_needed = dmamap->dm_nsegs;
   7291 		if (use_tso) {
   7292 			/* For sentinel descriptor; see below. */
   7293 			segs_needed++;
   7294 		}
   7295 
   7296 		/*
   7297 		 * Ensure we have enough descriptors free to describe
   7298 		 * the packet. Note, we always reserve one descriptor
   7299 		 * at the end of the ring due to the semantics of the
   7300 		 * TDT register, plus one more in the event we need
   7301 		 * to load offload context.
   7302 		 */
   7303 		if (segs_needed > txq->txq_free - 2) {
   7304 			/*
   7305 			 * Not enough free descriptors to transmit this
   7306 			 * packet.  We haven't committed anything yet,
   7307 			 * so just unload the DMA map, put the packet
   7308 			 * pack on the queue, and punt. Notify the upper
   7309 			 * layer that there are no more slots left.
   7310 			 */
   7311 			DPRINTF(WM_DEBUG_TX,
   7312 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7313 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7314 				segs_needed, txq->txq_free - 1));
   7315 			if (!is_transmit)
   7316 				ifp->if_flags |= IFF_OACTIVE;
   7317 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7318 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7319 			WM_Q_EVCNT_INCR(txq, txdstall);
   7320 			break;
   7321 		}
   7322 
   7323 		/*
   7324 		 * Check for 82547 Tx FIFO bug. We need to do this
   7325 		 * once we know we can transmit the packet, since we
   7326 		 * do some internal FIFO space accounting here.
   7327 		 */
   7328 		if (sc->sc_type == WM_T_82547 &&
   7329 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7330 			DPRINTF(WM_DEBUG_TX,
   7331 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7332 				device_xname(sc->sc_dev)));
   7333 			if (!is_transmit)
   7334 				ifp->if_flags |= IFF_OACTIVE;
   7335 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7336 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7337 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7338 			break;
   7339 		}
   7340 
   7341 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7342 
   7343 		DPRINTF(WM_DEBUG_TX,
   7344 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7345 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7346 
   7347 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7348 
   7349 		/*
   7350 		 * Store a pointer to the packet so that we can free it
   7351 		 * later.
   7352 		 *
   7353 		 * Initially, we consider the number of descriptors the
   7354 		 * packet uses the number of DMA segments.  This may be
   7355 		 * incremented by 1 if we do checksum offload (a descriptor
   7356 		 * is used to set the checksum context).
   7357 		 */
   7358 		txs->txs_mbuf = m0;
   7359 		txs->txs_firstdesc = txq->txq_next;
   7360 		txs->txs_ndesc = segs_needed;
   7361 
   7362 		/* Set up offload parameters for this packet. */
   7363 		if (m0->m_pkthdr.csum_flags &
   7364 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7365 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7366 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7367 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7368 					  &cksumfields) != 0) {
   7369 				/* Error message already displayed. */
   7370 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7371 				continue;
   7372 			}
   7373 		} else {
   7374 			cksumcmd = 0;
   7375 			cksumfields = 0;
   7376 		}
   7377 
   7378 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7379 
   7380 		/* Sync the DMA map. */
   7381 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7382 		    BUS_DMASYNC_PREWRITE);
   7383 
   7384 		/* Initialize the transmit descriptor. */
   7385 		for (nexttx = txq->txq_next, seg = 0;
   7386 		     seg < dmamap->dm_nsegs; seg++) {
   7387 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7388 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7389 			     seglen != 0;
   7390 			     curaddr += curlen, seglen -= curlen,
   7391 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7392 				curlen = seglen;
   7393 
   7394 				/*
   7395 				 * So says the Linux driver:
   7396 				 * Work around for premature descriptor
   7397 				 * write-backs in TSO mode.  Append a
   7398 				 * 4-byte sentinel descriptor.
   7399 				 */
   7400 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7401 				    curlen > 8)
   7402 					curlen -= 4;
   7403 
   7404 				wm_set_dma_addr(
   7405 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7406 				txq->txq_descs[nexttx].wtx_cmdlen
   7407 				    = htole32(cksumcmd | curlen);
   7408 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7409 				    = 0;
   7410 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7411 				    = cksumfields;
   7412 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7413 				lasttx = nexttx;
   7414 
   7415 				DPRINTF(WM_DEBUG_TX,
   7416 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7417 					"len %#04zx\n",
   7418 					device_xname(sc->sc_dev), nexttx,
   7419 					(uint64_t)curaddr, curlen));
   7420 			}
   7421 		}
   7422 
   7423 		KASSERT(lasttx != -1);
   7424 
   7425 		/*
   7426 		 * Set up the command byte on the last descriptor of
   7427 		 * the packet. If we're in the interrupt delay window,
   7428 		 * delay the interrupt.
   7429 		 */
   7430 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7431 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7432 
   7433 		/*
   7434 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7435 		 * up the descriptor to encapsulate the packet for us.
   7436 		 *
   7437 		 * This is only valid on the last descriptor of the packet.
   7438 		 */
   7439 		if (vlan_has_tag(m0)) {
   7440 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7441 			    htole32(WTX_CMD_VLE);
   7442 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7443 			    = htole16(vlan_get_tag(m0));
   7444 		}
   7445 
   7446 		txs->txs_lastdesc = lasttx;
   7447 
   7448 		DPRINTF(WM_DEBUG_TX,
   7449 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7450 			device_xname(sc->sc_dev),
   7451 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7452 
   7453 		/* Sync the descriptors we're using. */
   7454 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7455 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7456 
   7457 		/* Give the packet to the chip. */
   7458 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7459 
   7460 		DPRINTF(WM_DEBUG_TX,
   7461 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7462 
   7463 		DPRINTF(WM_DEBUG_TX,
   7464 		    ("%s: TX: finished transmitting packet, job %d\n",
   7465 			device_xname(sc->sc_dev), txq->txq_snext));
   7466 
   7467 		/* Advance the tx pointer. */
   7468 		txq->txq_free -= txs->txs_ndesc;
   7469 		txq->txq_next = nexttx;
   7470 
   7471 		txq->txq_sfree--;
   7472 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7473 
   7474 		/* Pass the packet to any BPF listeners. */
   7475 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7476 	}
   7477 
   7478 	if (m0 != NULL) {
   7479 		if (!is_transmit)
   7480 			ifp->if_flags |= IFF_OACTIVE;
   7481 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7482 		WM_Q_EVCNT_INCR(txq, descdrop);
   7483 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7484 			__func__));
   7485 		m_freem(m0);
   7486 	}
   7487 
   7488 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7489 		/* No more slots; notify upper layer. */
   7490 		if (!is_transmit)
   7491 			ifp->if_flags |= IFF_OACTIVE;
   7492 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7493 	}
   7494 
   7495 	if (txq->txq_free != ofree) {
   7496 		/* Set a watchdog timer in case the chip flakes out. */
   7497 		txq->txq_lastsent = time_uptime;
   7498 		txq->txq_sending = true;
   7499 	}
   7500 }
   7501 
   7502 /*
   7503  * wm_nq_tx_offload:
   7504  *
   7505  *	Set up TCP/IP checksumming parameters for the
   7506  *	specified packet, for NEWQUEUE devices
   7507  */
   7508 static int
   7509 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7510     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7511 {
   7512 	struct mbuf *m0 = txs->txs_mbuf;
   7513 	uint32_t vl_len, mssidx, cmdc;
   7514 	struct ether_header *eh;
   7515 	int offset, iphl;
   7516 
   7517 	/*
   7518 	 * XXX It would be nice if the mbuf pkthdr had offset
   7519 	 * fields for the protocol headers.
   7520 	 */
   7521 	*cmdlenp = 0;
   7522 	*fieldsp = 0;
   7523 
   7524 	eh = mtod(m0, struct ether_header *);
   7525 	switch (htons(eh->ether_type)) {
   7526 	case ETHERTYPE_IP:
   7527 	case ETHERTYPE_IPV6:
   7528 		offset = ETHER_HDR_LEN;
   7529 		break;
   7530 
   7531 	case ETHERTYPE_VLAN:
   7532 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7533 		break;
   7534 
   7535 	default:
   7536 		/* Don't support this protocol or encapsulation. */
   7537 		*do_csum = false;
   7538 		return 0;
   7539 	}
   7540 	*do_csum = true;
   7541 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7542 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7543 
   7544 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7545 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7546 
   7547 	if ((m0->m_pkthdr.csum_flags &
   7548 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7549 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7550 	} else {
   7551 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7552 	}
   7553 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7554 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7555 
   7556 	if (vlan_has_tag(m0)) {
   7557 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7558 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7559 		*cmdlenp |= NQTX_CMD_VLE;
   7560 	}
   7561 
   7562 	mssidx = 0;
   7563 
   7564 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7565 		int hlen = offset + iphl;
   7566 		int tcp_hlen;
   7567 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7568 
   7569 		if (__predict_false(m0->m_len <
   7570 				    (hlen + sizeof(struct tcphdr)))) {
   7571 			/*
   7572 			 * TCP/IP headers are not in the first mbuf; we need
   7573 			 * to do this the slow and painful way. Let's just
   7574 			 * hope this doesn't happen very often.
   7575 			 */
   7576 			struct tcphdr th;
   7577 
   7578 			WM_Q_EVCNT_INCR(txq, tsopain);
   7579 
   7580 			m_copydata(m0, hlen, sizeof(th), &th);
   7581 			if (v4) {
   7582 				struct ip ip;
   7583 
   7584 				m_copydata(m0, offset, sizeof(ip), &ip);
   7585 				ip.ip_len = 0;
   7586 				m_copyback(m0,
   7587 				    offset + offsetof(struct ip, ip_len),
   7588 				    sizeof(ip.ip_len), &ip.ip_len);
   7589 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7590 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7591 			} else {
   7592 				struct ip6_hdr ip6;
   7593 
   7594 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7595 				ip6.ip6_plen = 0;
   7596 				m_copyback(m0,
   7597 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7598 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7599 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7600 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7601 			}
   7602 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7603 			    sizeof(th.th_sum), &th.th_sum);
   7604 
   7605 			tcp_hlen = th.th_off << 2;
   7606 		} else {
   7607 			/*
   7608 			 * TCP/IP headers are in the first mbuf; we can do
   7609 			 * this the easy way.
   7610 			 */
   7611 			struct tcphdr *th;
   7612 
   7613 			if (v4) {
   7614 				struct ip *ip =
   7615 				    (void *)(mtod(m0, char *) + offset);
   7616 				th = (void *)(mtod(m0, char *) + hlen);
   7617 
   7618 				ip->ip_len = 0;
   7619 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7620 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7621 			} else {
   7622 				struct ip6_hdr *ip6 =
   7623 				    (void *)(mtod(m0, char *) + offset);
   7624 				th = (void *)(mtod(m0, char *) + hlen);
   7625 
   7626 				ip6->ip6_plen = 0;
   7627 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7628 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7629 			}
   7630 			tcp_hlen = th->th_off << 2;
   7631 		}
   7632 		hlen += tcp_hlen;
   7633 		*cmdlenp |= NQTX_CMD_TSE;
   7634 
   7635 		if (v4) {
   7636 			WM_Q_EVCNT_INCR(txq, tso);
   7637 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7638 		} else {
   7639 			WM_Q_EVCNT_INCR(txq, tso6);
   7640 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7641 		}
   7642 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7643 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7644 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7645 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7646 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7647 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7648 	} else {
   7649 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7650 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7651 	}
   7652 
   7653 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7654 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7655 		cmdc |= NQTXC_CMD_IP4;
   7656 	}
   7657 
   7658 	if (m0->m_pkthdr.csum_flags &
   7659 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7660 		WM_Q_EVCNT_INCR(txq, tusum);
   7661 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7662 			cmdc |= NQTXC_CMD_TCP;
   7663 		} else {
   7664 			cmdc |= NQTXC_CMD_UDP;
   7665 		}
   7666 		cmdc |= NQTXC_CMD_IP4;
   7667 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7668 	}
   7669 	if (m0->m_pkthdr.csum_flags &
   7670 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7671 		WM_Q_EVCNT_INCR(txq, tusum6);
   7672 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7673 			cmdc |= NQTXC_CMD_TCP;
   7674 		} else {
   7675 			cmdc |= NQTXC_CMD_UDP;
   7676 		}
   7677 		cmdc |= NQTXC_CMD_IP6;
   7678 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7679 	}
   7680 
   7681 	/*
   7682 	 * We don't have to write context descriptor for every packet to
   7683 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7684 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7685 	 * controllers.
   7686 	 * It would be overhead to write context descriptor for every packet,
   7687 	 * however it does not cause problems.
   7688 	 */
   7689 	/* Fill in the context descriptor. */
   7690 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7691 	    htole32(vl_len);
   7692 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7693 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7694 	    htole32(cmdc);
   7695 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7696 	    htole32(mssidx);
   7697 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7698 	DPRINTF(WM_DEBUG_TX,
   7699 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7700 		txq->txq_next, 0, vl_len));
   7701 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7702 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7703 	txs->txs_ndesc++;
   7704 	return 0;
   7705 }
   7706 
   7707 /*
   7708  * wm_nq_start:		[ifnet interface function]
   7709  *
   7710  *	Start packet transmission on the interface for NEWQUEUE devices
   7711  */
   7712 static void
   7713 wm_nq_start(struct ifnet *ifp)
   7714 {
   7715 	struct wm_softc *sc = ifp->if_softc;
   7716 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7717 
   7718 #ifdef WM_MPSAFE
   7719 	KASSERT(if_is_mpsafe(ifp));
   7720 #endif
   7721 	/*
   7722 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7723 	 */
   7724 
   7725 	mutex_enter(txq->txq_lock);
   7726 	if (!txq->txq_stopping)
   7727 		wm_nq_start_locked(ifp);
   7728 	mutex_exit(txq->txq_lock);
   7729 }
   7730 
   7731 static void
   7732 wm_nq_start_locked(struct ifnet *ifp)
   7733 {
   7734 	struct wm_softc *sc = ifp->if_softc;
   7735 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7736 
   7737 	wm_nq_send_common_locked(ifp, txq, false);
   7738 }
   7739 
   7740 static int
   7741 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7742 {
   7743 	int qid;
   7744 	struct wm_softc *sc = ifp->if_softc;
   7745 	struct wm_txqueue *txq;
   7746 
   7747 	qid = wm_select_txqueue(ifp, m);
   7748 	txq = &sc->sc_queue[qid].wmq_txq;
   7749 
   7750 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7751 		m_freem(m);
   7752 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7753 		return ENOBUFS;
   7754 	}
   7755 
   7756 	/*
   7757 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7758 	 */
   7759 	ifp->if_obytes += m->m_pkthdr.len;
   7760 	if (m->m_flags & M_MCAST)
   7761 		ifp->if_omcasts++;
   7762 
   7763 	/*
   7764 	 * The situations which this mutex_tryenter() fails at running time
   7765 	 * are below two patterns.
   7766 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7767 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7768 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7769 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7770 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   7771 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   7772 	 * stuck, either.
   7773 	 */
   7774 	if (mutex_tryenter(txq->txq_lock)) {
   7775 		if (!txq->txq_stopping)
   7776 			wm_nq_transmit_locked(ifp, txq);
   7777 		mutex_exit(txq->txq_lock);
   7778 	}
   7779 
   7780 	return 0;
   7781 }
   7782 
   7783 static void
   7784 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7785 {
   7786 
   7787 	wm_nq_send_common_locked(ifp, txq, true);
   7788 }
   7789 
   7790 static void
   7791 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7792     bool is_transmit)
   7793 {
   7794 	struct wm_softc *sc = ifp->if_softc;
   7795 	struct mbuf *m0;
   7796 	struct wm_txsoft *txs;
   7797 	bus_dmamap_t dmamap;
   7798 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7799 	bool do_csum, sent;
   7800 	bool remap = true;
   7801 
   7802 	KASSERT(mutex_owned(txq->txq_lock));
   7803 
   7804 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7805 		return;
   7806 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7807 		return;
   7808 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7809 		return;
   7810 
   7811 	sent = false;
   7812 
   7813 	/*
   7814 	 * Loop through the send queue, setting up transmit descriptors
   7815 	 * until we drain the queue, or use up all available transmit
   7816 	 * descriptors.
   7817 	 */
   7818 	for (;;) {
   7819 		m0 = NULL;
   7820 
   7821 		/* Get a work queue entry. */
   7822 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7823 			wm_txeof(txq, UINT_MAX);
   7824 			if (txq->txq_sfree == 0) {
   7825 				DPRINTF(WM_DEBUG_TX,
   7826 				    ("%s: TX: no free job descriptors\n",
   7827 					device_xname(sc->sc_dev)));
   7828 				WM_Q_EVCNT_INCR(txq, txsstall);
   7829 				break;
   7830 			}
   7831 		}
   7832 
   7833 		/* Grab a packet off the queue. */
   7834 		if (is_transmit)
   7835 			m0 = pcq_get(txq->txq_interq);
   7836 		else
   7837 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7838 		if (m0 == NULL)
   7839 			break;
   7840 
   7841 		DPRINTF(WM_DEBUG_TX,
   7842 		    ("%s: TX: have packet to transmit: %p\n",
   7843 		    device_xname(sc->sc_dev), m0));
   7844 
   7845 		txs = &txq->txq_soft[txq->txq_snext];
   7846 		dmamap = txs->txs_dmamap;
   7847 
   7848 		/*
   7849 		 * Load the DMA map.  If this fails, the packet either
   7850 		 * didn't fit in the allotted number of segments, or we
   7851 		 * were short on resources.  For the too-many-segments
   7852 		 * case, we simply report an error and drop the packet,
   7853 		 * since we can't sanely copy a jumbo packet to a single
   7854 		 * buffer.
   7855 		 */
   7856 retry:
   7857 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7858 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7859 		if (__predict_false(error)) {
   7860 			if (error == EFBIG) {
   7861 				if (remap == true) {
   7862 					struct mbuf *m;
   7863 
   7864 					remap = false;
   7865 					m = m_defrag(m0, M_NOWAIT);
   7866 					if (m != NULL) {
   7867 						WM_Q_EVCNT_INCR(txq, defrag);
   7868 						m0 = m;
   7869 						goto retry;
   7870 					}
   7871 				}
   7872 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7873 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7874 				    "DMA segments, dropping...\n",
   7875 				    device_xname(sc->sc_dev));
   7876 				wm_dump_mbuf_chain(sc, m0);
   7877 				m_freem(m0);
   7878 				continue;
   7879 			}
   7880 			/* Short on resources, just stop for now. */
   7881 			DPRINTF(WM_DEBUG_TX,
   7882 			    ("%s: TX: dmamap load failed: %d\n",
   7883 				device_xname(sc->sc_dev), error));
   7884 			break;
   7885 		}
   7886 
   7887 		segs_needed = dmamap->dm_nsegs;
   7888 
   7889 		/*
   7890 		 * Ensure we have enough descriptors free to describe
   7891 		 * the packet. Note, we always reserve one descriptor
   7892 		 * at the end of the ring due to the semantics of the
   7893 		 * TDT register, plus one more in the event we need
   7894 		 * to load offload context.
   7895 		 */
   7896 		if (segs_needed > txq->txq_free - 2) {
   7897 			/*
   7898 			 * Not enough free descriptors to transmit this
   7899 			 * packet.  We haven't committed anything yet,
   7900 			 * so just unload the DMA map, put the packet
   7901 			 * pack on the queue, and punt. Notify the upper
   7902 			 * layer that there are no more slots left.
   7903 			 */
   7904 			DPRINTF(WM_DEBUG_TX,
   7905 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7906 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7907 				segs_needed, txq->txq_free - 1));
   7908 			if (!is_transmit)
   7909 				ifp->if_flags |= IFF_OACTIVE;
   7910 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7911 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7912 			WM_Q_EVCNT_INCR(txq, txdstall);
   7913 			break;
   7914 		}
   7915 
   7916 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7917 
   7918 		DPRINTF(WM_DEBUG_TX,
   7919 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7920 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7921 
   7922 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7923 
   7924 		/*
   7925 		 * Store a pointer to the packet so that we can free it
   7926 		 * later.
   7927 		 *
   7928 		 * Initially, we consider the number of descriptors the
   7929 		 * packet uses the number of DMA segments.  This may be
   7930 		 * incremented by 1 if we do checksum offload (a descriptor
   7931 		 * is used to set the checksum context).
   7932 		 */
   7933 		txs->txs_mbuf = m0;
   7934 		txs->txs_firstdesc = txq->txq_next;
   7935 		txs->txs_ndesc = segs_needed;
   7936 
   7937 		/* Set up offload parameters for this packet. */
   7938 		uint32_t cmdlen, fields, dcmdlen;
   7939 		if (m0->m_pkthdr.csum_flags &
   7940 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7941 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7942 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7943 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7944 			    &do_csum) != 0) {
   7945 				/* Error message already displayed. */
   7946 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7947 				continue;
   7948 			}
   7949 		} else {
   7950 			do_csum = false;
   7951 			cmdlen = 0;
   7952 			fields = 0;
   7953 		}
   7954 
   7955 		/* Sync the DMA map. */
   7956 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7957 		    BUS_DMASYNC_PREWRITE);
   7958 
   7959 		/* Initialize the first transmit descriptor. */
   7960 		nexttx = txq->txq_next;
   7961 		if (!do_csum) {
   7962 			/* setup a legacy descriptor */
   7963 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7964 			    dmamap->dm_segs[0].ds_addr);
   7965 			txq->txq_descs[nexttx].wtx_cmdlen =
   7966 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7967 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7968 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7969 			if (vlan_has_tag(m0)) {
   7970 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7971 				    htole32(WTX_CMD_VLE);
   7972 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7973 				    htole16(vlan_get_tag(m0));
   7974 			} else {
   7975 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7976 			}
   7977 			dcmdlen = 0;
   7978 		} else {
   7979 			/* setup an advanced data descriptor */
   7980 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7981 			    htole64(dmamap->dm_segs[0].ds_addr);
   7982 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7983 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7984 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7985 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7986 			    htole32(fields);
   7987 			DPRINTF(WM_DEBUG_TX,
   7988 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7989 				device_xname(sc->sc_dev), nexttx,
   7990 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   7991 			DPRINTF(WM_DEBUG_TX,
   7992 			    ("\t 0x%08x%08x\n", fields,
   7993 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7994 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7995 		}
   7996 
   7997 		lasttx = nexttx;
   7998 		nexttx = WM_NEXTTX(txq, nexttx);
   7999 		/*
   8000 		 * fill in the next descriptors. legacy or advanced format
   8001 		 * is the same here
   8002 		 */
   8003 		for (seg = 1; seg < dmamap->dm_nsegs;
   8004 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8005 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8006 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8007 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8008 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8009 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8010 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8011 			lasttx = nexttx;
   8012 
   8013 			DPRINTF(WM_DEBUG_TX,
   8014 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8015 				device_xname(sc->sc_dev), nexttx,
   8016 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8017 				dmamap->dm_segs[seg].ds_len));
   8018 		}
   8019 
   8020 		KASSERT(lasttx != -1);
   8021 
   8022 		/*
   8023 		 * Set up the command byte on the last descriptor of
   8024 		 * the packet. If we're in the interrupt delay window,
   8025 		 * delay the interrupt.
   8026 		 */
   8027 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8028 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8029 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8030 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8031 
   8032 		txs->txs_lastdesc = lasttx;
   8033 
   8034 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8035 		    device_xname(sc->sc_dev),
   8036 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8037 
   8038 		/* Sync the descriptors we're using. */
   8039 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8040 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8041 
   8042 		/* Give the packet to the chip. */
   8043 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8044 		sent = true;
   8045 
   8046 		DPRINTF(WM_DEBUG_TX,
   8047 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8048 
   8049 		DPRINTF(WM_DEBUG_TX,
   8050 		    ("%s: TX: finished transmitting packet, job %d\n",
   8051 			device_xname(sc->sc_dev), txq->txq_snext));
   8052 
   8053 		/* Advance the tx pointer. */
   8054 		txq->txq_free -= txs->txs_ndesc;
   8055 		txq->txq_next = nexttx;
   8056 
   8057 		txq->txq_sfree--;
   8058 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8059 
   8060 		/* Pass the packet to any BPF listeners. */
   8061 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8062 	}
   8063 
   8064 	if (m0 != NULL) {
   8065 		if (!is_transmit)
   8066 			ifp->if_flags |= IFF_OACTIVE;
   8067 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8068 		WM_Q_EVCNT_INCR(txq, descdrop);
   8069 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8070 			__func__));
   8071 		m_freem(m0);
   8072 	}
   8073 
   8074 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8075 		/* No more slots; notify upper layer. */
   8076 		if (!is_transmit)
   8077 			ifp->if_flags |= IFF_OACTIVE;
   8078 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8079 	}
   8080 
   8081 	if (sent) {
   8082 		/* Set a watchdog timer in case the chip flakes out. */
   8083 		txq->txq_lastsent = time_uptime;
   8084 		txq->txq_sending = true;
   8085 	}
   8086 }
   8087 
   8088 static void
   8089 wm_deferred_start_locked(struct wm_txqueue *txq)
   8090 {
   8091 	struct wm_softc *sc = txq->txq_sc;
   8092 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8093 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8094 	int qid = wmq->wmq_id;
   8095 
   8096 	KASSERT(mutex_owned(txq->txq_lock));
   8097 
   8098 	if (txq->txq_stopping) {
   8099 		mutex_exit(txq->txq_lock);
   8100 		return;
   8101 	}
   8102 
   8103 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8104 		/* XXX need for ALTQ or one CPU system */
   8105 		if (qid == 0)
   8106 			wm_nq_start_locked(ifp);
   8107 		wm_nq_transmit_locked(ifp, txq);
   8108 	} else {
   8109 		/* XXX need for ALTQ or one CPU system */
   8110 		if (qid == 0)
   8111 			wm_start_locked(ifp);
   8112 		wm_transmit_locked(ifp, txq);
   8113 	}
   8114 }
   8115 
   8116 /* Interrupt */
   8117 
   8118 /*
   8119  * wm_txeof:
   8120  *
   8121  *	Helper; handle transmit interrupts.
   8122  */
   8123 static bool
   8124 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8125 {
   8126 	struct wm_softc *sc = txq->txq_sc;
   8127 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8128 	struct wm_txsoft *txs;
   8129 	int count = 0;
   8130 	int i;
   8131 	uint8_t status;
   8132 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8133 	bool more = false;
   8134 
   8135 	KASSERT(mutex_owned(txq->txq_lock));
   8136 
   8137 	if (txq->txq_stopping)
   8138 		return false;
   8139 
   8140 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8141 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8142 	if (wmq->wmq_id == 0)
   8143 		ifp->if_flags &= ~IFF_OACTIVE;
   8144 
   8145 	/*
   8146 	 * Go through the Tx list and free mbufs for those
   8147 	 * frames which have been transmitted.
   8148 	 */
   8149 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8150 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8151 		if (limit-- == 0) {
   8152 			more = true;
   8153 			DPRINTF(WM_DEBUG_TX,
   8154 			    ("%s: TX: loop limited, job %d is not processed\n",
   8155 				device_xname(sc->sc_dev), i));
   8156 			break;
   8157 		}
   8158 
   8159 		txs = &txq->txq_soft[i];
   8160 
   8161 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8162 			device_xname(sc->sc_dev), i));
   8163 
   8164 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8165 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8166 
   8167 		status =
   8168 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8169 		if ((status & WTX_ST_DD) == 0) {
   8170 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8171 			    BUS_DMASYNC_PREREAD);
   8172 			break;
   8173 		}
   8174 
   8175 		count++;
   8176 		DPRINTF(WM_DEBUG_TX,
   8177 		    ("%s: TX: job %d done: descs %d..%d\n",
   8178 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8179 		    txs->txs_lastdesc));
   8180 
   8181 		/*
   8182 		 * XXX We should probably be using the statistics
   8183 		 * XXX registers, but I don't know if they exist
   8184 		 * XXX on chips before the i82544.
   8185 		 */
   8186 
   8187 #ifdef WM_EVENT_COUNTERS
   8188 		if (status & WTX_ST_TU)
   8189 			WM_Q_EVCNT_INCR(txq, underrun);
   8190 #endif /* WM_EVENT_COUNTERS */
   8191 
   8192 		/*
   8193 		 * 82574 and newer's document says the status field has neither
   8194 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8195 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8196 		 * Developer's Manual", 82574 datasheet and newer.
   8197 		 *
   8198 		 * XXX I saw the LC bit was set on I218 even though the media
   8199 		 * was full duplex, so the bit might be used for other
   8200 		 * meaning ...(I have no document).
   8201 		 */
   8202 
   8203 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8204 		    && ((sc->sc_type < WM_T_82574)
   8205 			|| (sc->sc_type == WM_T_80003))) {
   8206 			ifp->if_oerrors++;
   8207 			if (status & WTX_ST_LC)
   8208 				log(LOG_WARNING, "%s: late collision\n",
   8209 				    device_xname(sc->sc_dev));
   8210 			else if (status & WTX_ST_EC) {
   8211 				ifp->if_collisions +=
   8212 				    TX_COLLISION_THRESHOLD + 1;
   8213 				log(LOG_WARNING, "%s: excessive collisions\n",
   8214 				    device_xname(sc->sc_dev));
   8215 			}
   8216 		} else
   8217 			ifp->if_opackets++;
   8218 
   8219 		txq->txq_packets++;
   8220 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8221 
   8222 		txq->txq_free += txs->txs_ndesc;
   8223 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8224 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8225 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8226 		m_freem(txs->txs_mbuf);
   8227 		txs->txs_mbuf = NULL;
   8228 	}
   8229 
   8230 	/* Update the dirty transmit buffer pointer. */
   8231 	txq->txq_sdirty = i;
   8232 	DPRINTF(WM_DEBUG_TX,
   8233 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8234 
   8235 	if (count != 0)
   8236 		rnd_add_uint32(&sc->rnd_source, count);
   8237 
   8238 	/*
   8239 	 * If there are no more pending transmissions, cancel the watchdog
   8240 	 * timer.
   8241 	 */
   8242 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8243 		txq->txq_sending = false;
   8244 
   8245 	return more;
   8246 }
   8247 
   8248 static inline uint32_t
   8249 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8250 {
   8251 	struct wm_softc *sc = rxq->rxq_sc;
   8252 
   8253 	if (sc->sc_type == WM_T_82574)
   8254 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8255 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8256 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8257 	else
   8258 		return rxq->rxq_descs[idx].wrx_status;
   8259 }
   8260 
   8261 static inline uint32_t
   8262 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8263 {
   8264 	struct wm_softc *sc = rxq->rxq_sc;
   8265 
   8266 	if (sc->sc_type == WM_T_82574)
   8267 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8268 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8269 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8270 	else
   8271 		return rxq->rxq_descs[idx].wrx_errors;
   8272 }
   8273 
   8274 static inline uint16_t
   8275 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8276 {
   8277 	struct wm_softc *sc = rxq->rxq_sc;
   8278 
   8279 	if (sc->sc_type == WM_T_82574)
   8280 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8281 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8282 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8283 	else
   8284 		return rxq->rxq_descs[idx].wrx_special;
   8285 }
   8286 
   8287 static inline int
   8288 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8289 {
   8290 	struct wm_softc *sc = rxq->rxq_sc;
   8291 
   8292 	if (sc->sc_type == WM_T_82574)
   8293 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8294 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8295 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8296 	else
   8297 		return rxq->rxq_descs[idx].wrx_len;
   8298 }
   8299 
   8300 #ifdef WM_DEBUG
   8301 static inline uint32_t
   8302 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8303 {
   8304 	struct wm_softc *sc = rxq->rxq_sc;
   8305 
   8306 	if (sc->sc_type == WM_T_82574)
   8307 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8308 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8309 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8310 	else
   8311 		return 0;
   8312 }
   8313 
   8314 static inline uint8_t
   8315 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8316 {
   8317 	struct wm_softc *sc = rxq->rxq_sc;
   8318 
   8319 	if (sc->sc_type == WM_T_82574)
   8320 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8321 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8322 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8323 	else
   8324 		return 0;
   8325 }
   8326 #endif /* WM_DEBUG */
   8327 
   8328 static inline bool
   8329 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8330     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8331 {
   8332 
   8333 	if (sc->sc_type == WM_T_82574)
   8334 		return (status & ext_bit) != 0;
   8335 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8336 		return (status & nq_bit) != 0;
   8337 	else
   8338 		return (status & legacy_bit) != 0;
   8339 }
   8340 
   8341 static inline bool
   8342 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8343     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8344 {
   8345 
   8346 	if (sc->sc_type == WM_T_82574)
   8347 		return (error & ext_bit) != 0;
   8348 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8349 		return (error & nq_bit) != 0;
   8350 	else
   8351 		return (error & legacy_bit) != 0;
   8352 }
   8353 
   8354 static inline bool
   8355 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8356 {
   8357 
   8358 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8359 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8360 		return true;
   8361 	else
   8362 		return false;
   8363 }
   8364 
   8365 static inline bool
   8366 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8367 {
   8368 	struct wm_softc *sc = rxq->rxq_sc;
   8369 
   8370 	/* XXXX missing error bit for newqueue? */
   8371 	if (wm_rxdesc_is_set_error(sc, errors,
   8372 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8373 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8374 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8375 		NQRXC_ERROR_RXE)) {
   8376 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8377 		    EXTRXC_ERROR_SE, 0))
   8378 			log(LOG_WARNING, "%s: symbol error\n",
   8379 			    device_xname(sc->sc_dev));
   8380 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8381 		    EXTRXC_ERROR_SEQ, 0))
   8382 			log(LOG_WARNING, "%s: receive sequence error\n",
   8383 			    device_xname(sc->sc_dev));
   8384 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8385 		    EXTRXC_ERROR_CE, 0))
   8386 			log(LOG_WARNING, "%s: CRC error\n",
   8387 			    device_xname(sc->sc_dev));
   8388 		return true;
   8389 	}
   8390 
   8391 	return false;
   8392 }
   8393 
   8394 static inline bool
   8395 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8396 {
   8397 	struct wm_softc *sc = rxq->rxq_sc;
   8398 
   8399 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8400 		NQRXC_STATUS_DD)) {
   8401 		/* We have processed all of the receive descriptors. */
   8402 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8403 		return false;
   8404 	}
   8405 
   8406 	return true;
   8407 }
   8408 
   8409 static inline bool
   8410 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8411     uint16_t vlantag, struct mbuf *m)
   8412 {
   8413 
   8414 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8415 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8416 		vlan_set_tag(m, le16toh(vlantag));
   8417 	}
   8418 
   8419 	return true;
   8420 }
   8421 
   8422 static inline void
   8423 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8424     uint32_t errors, struct mbuf *m)
   8425 {
   8426 	struct wm_softc *sc = rxq->rxq_sc;
   8427 
   8428 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8429 		if (wm_rxdesc_is_set_status(sc, status,
   8430 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8431 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8432 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8433 			if (wm_rxdesc_is_set_error(sc, errors,
   8434 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8435 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8436 		}
   8437 		if (wm_rxdesc_is_set_status(sc, status,
   8438 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8439 			/*
   8440 			 * Note: we don't know if this was TCP or UDP,
   8441 			 * so we just set both bits, and expect the
   8442 			 * upper layers to deal.
   8443 			 */
   8444 			WM_Q_EVCNT_INCR(rxq, tusum);
   8445 			m->m_pkthdr.csum_flags |=
   8446 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8447 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8448 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8449 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8450 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8451 		}
   8452 	}
   8453 }
   8454 
   8455 /*
   8456  * wm_rxeof:
   8457  *
   8458  *	Helper; handle receive interrupts.
   8459  */
   8460 static bool
   8461 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8462 {
   8463 	struct wm_softc *sc = rxq->rxq_sc;
   8464 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8465 	struct wm_rxsoft *rxs;
   8466 	struct mbuf *m;
   8467 	int i, len;
   8468 	int count = 0;
   8469 	uint32_t status, errors;
   8470 	uint16_t vlantag;
   8471 	bool more = false;
   8472 
   8473 	KASSERT(mutex_owned(rxq->rxq_lock));
   8474 
   8475 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8476 		if (limit-- == 0) {
   8477 			rxq->rxq_ptr = i;
   8478 			more = true;
   8479 			DPRINTF(WM_DEBUG_RX,
   8480 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8481 				device_xname(sc->sc_dev), i));
   8482 			break;
   8483 		}
   8484 
   8485 		rxs = &rxq->rxq_soft[i];
   8486 
   8487 		DPRINTF(WM_DEBUG_RX,
   8488 		    ("%s: RX: checking descriptor %d\n",
   8489 			device_xname(sc->sc_dev), i));
   8490 		wm_cdrxsync(rxq, i,
   8491 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8492 
   8493 		status = wm_rxdesc_get_status(rxq, i);
   8494 		errors = wm_rxdesc_get_errors(rxq, i);
   8495 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8496 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8497 #ifdef WM_DEBUG
   8498 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8499 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8500 #endif
   8501 
   8502 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8503 			/*
   8504 			 * Update the receive pointer holding rxq_lock
   8505 			 * consistent with increment counter.
   8506 			 */
   8507 			rxq->rxq_ptr = i;
   8508 			break;
   8509 		}
   8510 
   8511 		count++;
   8512 		if (__predict_false(rxq->rxq_discard)) {
   8513 			DPRINTF(WM_DEBUG_RX,
   8514 			    ("%s: RX: discarding contents of descriptor %d\n",
   8515 				device_xname(sc->sc_dev), i));
   8516 			wm_init_rxdesc(rxq, i);
   8517 			if (wm_rxdesc_is_eop(rxq, status)) {
   8518 				/* Reset our state. */
   8519 				DPRINTF(WM_DEBUG_RX,
   8520 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8521 					device_xname(sc->sc_dev)));
   8522 				rxq->rxq_discard = 0;
   8523 			}
   8524 			continue;
   8525 		}
   8526 
   8527 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8528 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8529 
   8530 		m = rxs->rxs_mbuf;
   8531 
   8532 		/*
   8533 		 * Add a new receive buffer to the ring, unless of
   8534 		 * course the length is zero. Treat the latter as a
   8535 		 * failed mapping.
   8536 		 */
   8537 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8538 			/*
   8539 			 * Failed, throw away what we've done so
   8540 			 * far, and discard the rest of the packet.
   8541 			 */
   8542 			ifp->if_ierrors++;
   8543 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8544 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8545 			wm_init_rxdesc(rxq, i);
   8546 			if (!wm_rxdesc_is_eop(rxq, status))
   8547 				rxq->rxq_discard = 1;
   8548 			if (rxq->rxq_head != NULL)
   8549 				m_freem(rxq->rxq_head);
   8550 			WM_RXCHAIN_RESET(rxq);
   8551 			DPRINTF(WM_DEBUG_RX,
   8552 			    ("%s: RX: Rx buffer allocation failed, "
   8553 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8554 				rxq->rxq_discard ? " (discard)" : ""));
   8555 			continue;
   8556 		}
   8557 
   8558 		m->m_len = len;
   8559 		rxq->rxq_len += len;
   8560 		DPRINTF(WM_DEBUG_RX,
   8561 		    ("%s: RX: buffer at %p len %d\n",
   8562 			device_xname(sc->sc_dev), m->m_data, len));
   8563 
   8564 		/* If this is not the end of the packet, keep looking. */
   8565 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8566 			WM_RXCHAIN_LINK(rxq, m);
   8567 			DPRINTF(WM_DEBUG_RX,
   8568 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8569 				device_xname(sc->sc_dev), rxq->rxq_len));
   8570 			continue;
   8571 		}
   8572 
   8573 		/*
   8574 		 * Okay, we have the entire packet now. The chip is
   8575 		 * configured to include the FCS except I350 and I21[01]
   8576 		 * (not all chips can be configured to strip it),
   8577 		 * so we need to trim it.
   8578 		 * May need to adjust length of previous mbuf in the
   8579 		 * chain if the current mbuf is too short.
   8580 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8581 		 * is always set in I350, so we don't trim it.
   8582 		 */
   8583 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8584 		    && (sc->sc_type != WM_T_I210)
   8585 		    && (sc->sc_type != WM_T_I211)) {
   8586 			if (m->m_len < ETHER_CRC_LEN) {
   8587 				rxq->rxq_tail->m_len
   8588 				    -= (ETHER_CRC_LEN - m->m_len);
   8589 				m->m_len = 0;
   8590 			} else
   8591 				m->m_len -= ETHER_CRC_LEN;
   8592 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8593 		} else
   8594 			len = rxq->rxq_len;
   8595 
   8596 		WM_RXCHAIN_LINK(rxq, m);
   8597 
   8598 		*rxq->rxq_tailp = NULL;
   8599 		m = rxq->rxq_head;
   8600 
   8601 		WM_RXCHAIN_RESET(rxq);
   8602 
   8603 		DPRINTF(WM_DEBUG_RX,
   8604 		    ("%s: RX: have entire packet, len -> %d\n",
   8605 			device_xname(sc->sc_dev), len));
   8606 
   8607 		/* If an error occurred, update stats and drop the packet. */
   8608 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8609 			m_freem(m);
   8610 			continue;
   8611 		}
   8612 
   8613 		/* No errors.  Receive the packet. */
   8614 		m_set_rcvif(m, ifp);
   8615 		m->m_pkthdr.len = len;
   8616 		/*
   8617 		 * TODO
   8618 		 * should be save rsshash and rsstype to this mbuf.
   8619 		 */
   8620 		DPRINTF(WM_DEBUG_RX,
   8621 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8622 			device_xname(sc->sc_dev), rsstype, rsshash));
   8623 
   8624 		/*
   8625 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8626 		 * for us.  Associate the tag with the packet.
   8627 		 */
   8628 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8629 			continue;
   8630 
   8631 		/* Set up checksum info for this packet. */
   8632 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8633 		/*
   8634 		 * Update the receive pointer holding rxq_lock consistent with
   8635 		 * increment counter.
   8636 		 */
   8637 		rxq->rxq_ptr = i;
   8638 		rxq->rxq_packets++;
   8639 		rxq->rxq_bytes += len;
   8640 		mutex_exit(rxq->rxq_lock);
   8641 
   8642 		/* Pass it on. */
   8643 		if_percpuq_enqueue(sc->sc_ipq, m);
   8644 
   8645 		mutex_enter(rxq->rxq_lock);
   8646 
   8647 		if (rxq->rxq_stopping)
   8648 			break;
   8649 	}
   8650 
   8651 	if (count != 0)
   8652 		rnd_add_uint32(&sc->rnd_source, count);
   8653 
   8654 	DPRINTF(WM_DEBUG_RX,
   8655 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8656 
   8657 	return more;
   8658 }
   8659 
   8660 /*
   8661  * wm_linkintr_gmii:
   8662  *
   8663  *	Helper; handle link interrupts for GMII.
   8664  */
   8665 static void
   8666 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8667 {
   8668 
   8669 	KASSERT(WM_CORE_LOCKED(sc));
   8670 
   8671 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8672 		__func__));
   8673 
   8674 	if (icr & ICR_LSC) {
   8675 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8676 		uint32_t reg;
   8677 		bool link;
   8678 
   8679 		link = status & STATUS_LU;
   8680 		if (link) {
   8681 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8682 				device_xname(sc->sc_dev),
   8683 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8684 		} else {
   8685 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8686 				device_xname(sc->sc_dev)));
   8687 		}
   8688 		if ((sc->sc_type == WM_T_ICH8) && (link == false))
   8689 			wm_gig_downshift_workaround_ich8lan(sc);
   8690 
   8691 		if ((sc->sc_type == WM_T_ICH8)
   8692 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8693 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8694 		}
   8695 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8696 			device_xname(sc->sc_dev)));
   8697 		mii_pollstat(&sc->sc_mii);
   8698 		if (sc->sc_type == WM_T_82543) {
   8699 			int miistatus, active;
   8700 
   8701 			/*
   8702 			 * With 82543, we need to force speed and
   8703 			 * duplex on the MAC equal to what the PHY
   8704 			 * speed and duplex configuration is.
   8705 			 */
   8706 			miistatus = sc->sc_mii.mii_media_status;
   8707 
   8708 			if (miistatus & IFM_ACTIVE) {
   8709 				active = sc->sc_mii.mii_media_active;
   8710 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8711 				switch (IFM_SUBTYPE(active)) {
   8712 				case IFM_10_T:
   8713 					sc->sc_ctrl |= CTRL_SPEED_10;
   8714 					break;
   8715 				case IFM_100_TX:
   8716 					sc->sc_ctrl |= CTRL_SPEED_100;
   8717 					break;
   8718 				case IFM_1000_T:
   8719 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8720 					break;
   8721 				default:
   8722 					/*
   8723 					 * fiber?
   8724 					 * Shoud not enter here.
   8725 					 */
   8726 					printf("unknown media (%x)\n", active);
   8727 					break;
   8728 				}
   8729 				if (active & IFM_FDX)
   8730 					sc->sc_ctrl |= CTRL_FD;
   8731 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8732 			}
   8733 		} else if (sc->sc_type == WM_T_PCH) {
   8734 			wm_k1_gig_workaround_hv(sc,
   8735 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8736 		}
   8737 
   8738 		if ((sc->sc_phytype == WMPHY_82578)
   8739 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8740 			== IFM_1000_T)) {
   8741 
   8742 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8743 				delay(200*1000); /* XXX too big */
   8744 
   8745 				/* Link stall fix for link up */
   8746 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8747 				    HV_MUX_DATA_CTRL,
   8748 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8749 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8750 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8751 				    HV_MUX_DATA_CTRL,
   8752 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8753 			}
   8754 		}
   8755 		/*
   8756 		 * I217 Packet Loss issue:
   8757 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8758 		 * on power up.
   8759 		 * Set the Beacon Duration for I217 to 8 usec
   8760 		 */
   8761 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8762 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8763 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8764 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8765 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8766 		}
   8767 
   8768 		/* Work-around I218 hang issue */
   8769 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   8770 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   8771 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   8772 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   8773 			wm_k1_workaround_lpt_lp(sc, link);
   8774 
   8775 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8776 			/*
   8777 			 * Set platform power management values for Latency
   8778 			 * Tolerance Reporting (LTR)
   8779 			 */
   8780 			wm_platform_pm_pch_lpt(sc,
   8781 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8782 		}
   8783 
   8784 		/* FEXTNVM6 K1-off workaround */
   8785 		if (sc->sc_type == WM_T_PCH_SPT) {
   8786 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8787 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8788 			    & FEXTNVM6_K1_OFF_ENABLE)
   8789 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8790 			else
   8791 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8792 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8793 		}
   8794 	} else if (icr & ICR_RXSEQ) {
   8795 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8796 			device_xname(sc->sc_dev)));
   8797 	}
   8798 }
   8799 
   8800 /*
   8801  * wm_linkintr_tbi:
   8802  *
   8803  *	Helper; handle link interrupts for TBI mode.
   8804  */
   8805 static void
   8806 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8807 {
   8808 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8809 	uint32_t status;
   8810 
   8811 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8812 		__func__));
   8813 
   8814 	status = CSR_READ(sc, WMREG_STATUS);
   8815 	if (icr & ICR_LSC) {
   8816 		wm_check_for_link(sc);
   8817 		if (status & STATUS_LU) {
   8818 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8819 				device_xname(sc->sc_dev),
   8820 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8821 			/*
   8822 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8823 			 * so we should update sc->sc_ctrl
   8824 			 */
   8825 
   8826 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8827 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8828 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8829 			if (status & STATUS_FD)
   8830 				sc->sc_tctl |=
   8831 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8832 			else
   8833 				sc->sc_tctl |=
   8834 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8835 			if (sc->sc_ctrl & CTRL_TFCE)
   8836 				sc->sc_fcrtl |= FCRTL_XONE;
   8837 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8838 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8839 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   8840 			sc->sc_tbi_linkup = 1;
   8841 			if_link_state_change(ifp, LINK_STATE_UP);
   8842 		} else {
   8843 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8844 				device_xname(sc->sc_dev)));
   8845 			sc->sc_tbi_linkup = 0;
   8846 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8847 		}
   8848 		/* Update LED */
   8849 		wm_tbi_serdes_set_linkled(sc);
   8850 	} else if (icr & ICR_RXSEQ) {
   8851 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   8852 			device_xname(sc->sc_dev)));
   8853 	}
   8854 }
   8855 
   8856 /*
   8857  * wm_linkintr_serdes:
   8858  *
   8859  *	Helper; handle link interrupts for TBI mode.
   8860  */
   8861 static void
   8862 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8863 {
   8864 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8865 	struct mii_data *mii = &sc->sc_mii;
   8866 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8867 	uint32_t pcs_adv, pcs_lpab, reg;
   8868 
   8869 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8870 		__func__));
   8871 
   8872 	if (icr & ICR_LSC) {
   8873 		/* Check PCS */
   8874 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8875 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8876 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8877 				device_xname(sc->sc_dev)));
   8878 			mii->mii_media_status |= IFM_ACTIVE;
   8879 			sc->sc_tbi_linkup = 1;
   8880 			if_link_state_change(ifp, LINK_STATE_UP);
   8881 		} else {
   8882 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8883 				device_xname(sc->sc_dev)));
   8884 			mii->mii_media_status |= IFM_NONE;
   8885 			sc->sc_tbi_linkup = 0;
   8886 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8887 			wm_tbi_serdes_set_linkled(sc);
   8888 			return;
   8889 		}
   8890 		mii->mii_media_active |= IFM_1000_SX;
   8891 		if ((reg & PCS_LSTS_FDX) != 0)
   8892 			mii->mii_media_active |= IFM_FDX;
   8893 		else
   8894 			mii->mii_media_active |= IFM_HDX;
   8895 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8896 			/* Check flow */
   8897 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8898 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8899 				DPRINTF(WM_DEBUG_LINK,
   8900 				    ("XXX LINKOK but not ACOMP\n"));
   8901 				return;
   8902 			}
   8903 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8904 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8905 			DPRINTF(WM_DEBUG_LINK,
   8906 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8907 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8908 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8909 				mii->mii_media_active |= IFM_FLOW
   8910 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8911 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8912 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8913 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8914 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8915 				mii->mii_media_active |= IFM_FLOW
   8916 				    | IFM_ETH_TXPAUSE;
   8917 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8918 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8919 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8920 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8921 				mii->mii_media_active |= IFM_FLOW
   8922 				    | IFM_ETH_RXPAUSE;
   8923 		}
   8924 		/* Update LED */
   8925 		wm_tbi_serdes_set_linkled(sc);
   8926 	} else {
   8927 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   8928 		    device_xname(sc->sc_dev)));
   8929 	}
   8930 }
   8931 
   8932 /*
   8933  * wm_linkintr:
   8934  *
   8935  *	Helper; handle link interrupts.
   8936  */
   8937 static void
   8938 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8939 {
   8940 
   8941 	KASSERT(WM_CORE_LOCKED(sc));
   8942 
   8943 	if (sc->sc_flags & WM_F_HAS_MII)
   8944 		wm_linkintr_gmii(sc, icr);
   8945 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8946 	    && (sc->sc_type >= WM_T_82575))
   8947 		wm_linkintr_serdes(sc, icr);
   8948 	else
   8949 		wm_linkintr_tbi(sc, icr);
   8950 }
   8951 
   8952 /*
   8953  * wm_intr_legacy:
   8954  *
   8955  *	Interrupt service routine for INTx and MSI.
   8956  */
   8957 static int
   8958 wm_intr_legacy(void *arg)
   8959 {
   8960 	struct wm_softc *sc = arg;
   8961 	struct wm_queue *wmq = &sc->sc_queue[0];
   8962 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8963 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8964 	uint32_t icr, rndval = 0;
   8965 	int handled = 0;
   8966 
   8967 	while (1 /* CONSTCOND */) {
   8968 		icr = CSR_READ(sc, WMREG_ICR);
   8969 		if ((icr & sc->sc_icr) == 0)
   8970 			break;
   8971 		if (handled == 0) {
   8972 			DPRINTF(WM_DEBUG_TX,
   8973 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8974 		}
   8975 		if (rndval == 0)
   8976 			rndval = icr;
   8977 
   8978 		mutex_enter(rxq->rxq_lock);
   8979 
   8980 		if (rxq->rxq_stopping) {
   8981 			mutex_exit(rxq->rxq_lock);
   8982 			break;
   8983 		}
   8984 
   8985 		handled = 1;
   8986 
   8987 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8988 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8989 			DPRINTF(WM_DEBUG_RX,
   8990 			    ("%s: RX: got Rx intr 0x%08x\n",
   8991 				device_xname(sc->sc_dev),
   8992 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   8993 			WM_Q_EVCNT_INCR(rxq, intr);
   8994 		}
   8995 #endif
   8996 		/*
   8997 		 * wm_rxeof() does *not* call upper layer functions directly,
   8998 		 * as if_percpuq_enqueue() just call softint_schedule().
   8999 		 * So, we can call wm_rxeof() in interrupt context.
   9000 		 */
   9001 		wm_rxeof(rxq, UINT_MAX);
   9002 
   9003 		mutex_exit(rxq->rxq_lock);
   9004 		mutex_enter(txq->txq_lock);
   9005 
   9006 		if (txq->txq_stopping) {
   9007 			mutex_exit(txq->txq_lock);
   9008 			break;
   9009 		}
   9010 
   9011 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9012 		if (icr & ICR_TXDW) {
   9013 			DPRINTF(WM_DEBUG_TX,
   9014 			    ("%s: TX: got TXDW interrupt\n",
   9015 				device_xname(sc->sc_dev)));
   9016 			WM_Q_EVCNT_INCR(txq, txdw);
   9017 		}
   9018 #endif
   9019 		wm_txeof(txq, UINT_MAX);
   9020 
   9021 		mutex_exit(txq->txq_lock);
   9022 		WM_CORE_LOCK(sc);
   9023 
   9024 		if (sc->sc_core_stopping) {
   9025 			WM_CORE_UNLOCK(sc);
   9026 			break;
   9027 		}
   9028 
   9029 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9030 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9031 			wm_linkintr(sc, icr);
   9032 		}
   9033 
   9034 		WM_CORE_UNLOCK(sc);
   9035 
   9036 		if (icr & ICR_RXO) {
   9037 #if defined(WM_DEBUG)
   9038 			log(LOG_WARNING, "%s: Receive overrun\n",
   9039 			    device_xname(sc->sc_dev));
   9040 #endif /* defined(WM_DEBUG) */
   9041 		}
   9042 	}
   9043 
   9044 	rnd_add_uint32(&sc->rnd_source, rndval);
   9045 
   9046 	if (handled) {
   9047 		/* Try to get more packets going. */
   9048 		softint_schedule(wmq->wmq_si);
   9049 	}
   9050 
   9051 	return handled;
   9052 }
   9053 
   9054 static inline void
   9055 wm_txrxintr_disable(struct wm_queue *wmq)
   9056 {
   9057 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9058 
   9059 	if (sc->sc_type == WM_T_82574)
   9060 		CSR_WRITE(sc, WMREG_IMC,
   9061 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9062 	else if (sc->sc_type == WM_T_82575)
   9063 		CSR_WRITE(sc, WMREG_EIMC,
   9064 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9065 	else
   9066 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9067 }
   9068 
   9069 static inline void
   9070 wm_txrxintr_enable(struct wm_queue *wmq)
   9071 {
   9072 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9073 
   9074 	wm_itrs_calculate(sc, wmq);
   9075 
   9076 	/*
   9077 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9078 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9079 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9080 	 * while each wm_handle_queue(wmq) is runnig.
   9081 	 */
   9082 	if (sc->sc_type == WM_T_82574)
   9083 		CSR_WRITE(sc, WMREG_IMS,
   9084 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9085 	else if (sc->sc_type == WM_T_82575)
   9086 		CSR_WRITE(sc, WMREG_EIMS,
   9087 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9088 	else
   9089 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9090 }
   9091 
   9092 static int
   9093 wm_txrxintr_msix(void *arg)
   9094 {
   9095 	struct wm_queue *wmq = arg;
   9096 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9097 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9098 	struct wm_softc *sc = txq->txq_sc;
   9099 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9100 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9101 	bool txmore;
   9102 	bool rxmore;
   9103 
   9104 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9105 
   9106 	DPRINTF(WM_DEBUG_TX,
   9107 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9108 
   9109 	wm_txrxintr_disable(wmq);
   9110 
   9111 	mutex_enter(txq->txq_lock);
   9112 
   9113 	if (txq->txq_stopping) {
   9114 		mutex_exit(txq->txq_lock);
   9115 		return 0;
   9116 	}
   9117 
   9118 	WM_Q_EVCNT_INCR(txq, txdw);
   9119 	txmore = wm_txeof(txq, txlimit);
   9120 	/* wm_deferred start() is done in wm_handle_queue(). */
   9121 	mutex_exit(txq->txq_lock);
   9122 
   9123 	DPRINTF(WM_DEBUG_RX,
   9124 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9125 	mutex_enter(rxq->rxq_lock);
   9126 
   9127 	if (rxq->rxq_stopping) {
   9128 		mutex_exit(rxq->rxq_lock);
   9129 		return 0;
   9130 	}
   9131 
   9132 	WM_Q_EVCNT_INCR(rxq, intr);
   9133 	rxmore = wm_rxeof(rxq, rxlimit);
   9134 	mutex_exit(rxq->rxq_lock);
   9135 
   9136 	wm_itrs_writereg(sc, wmq);
   9137 
   9138 	if (txmore || rxmore)
   9139 		softint_schedule(wmq->wmq_si);
   9140 	else
   9141 		wm_txrxintr_enable(wmq);
   9142 
   9143 	return 1;
   9144 }
   9145 
   9146 static void
   9147 wm_handle_queue(void *arg)
   9148 {
   9149 	struct wm_queue *wmq = arg;
   9150 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9151 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9152 	struct wm_softc *sc = txq->txq_sc;
   9153 	u_int txlimit = sc->sc_tx_process_limit;
   9154 	u_int rxlimit = sc->sc_rx_process_limit;
   9155 	bool txmore;
   9156 	bool rxmore;
   9157 
   9158 	mutex_enter(txq->txq_lock);
   9159 	if (txq->txq_stopping) {
   9160 		mutex_exit(txq->txq_lock);
   9161 		return;
   9162 	}
   9163 	txmore = wm_txeof(txq, txlimit);
   9164 	wm_deferred_start_locked(txq);
   9165 	mutex_exit(txq->txq_lock);
   9166 
   9167 	mutex_enter(rxq->rxq_lock);
   9168 	if (rxq->rxq_stopping) {
   9169 		mutex_exit(rxq->rxq_lock);
   9170 		return;
   9171 	}
   9172 	WM_Q_EVCNT_INCR(rxq, defer);
   9173 	rxmore = wm_rxeof(rxq, rxlimit);
   9174 	mutex_exit(rxq->rxq_lock);
   9175 
   9176 	if (txmore || rxmore)
   9177 		softint_schedule(wmq->wmq_si);
   9178 	else
   9179 		wm_txrxintr_enable(wmq);
   9180 }
   9181 
   9182 /*
   9183  * wm_linkintr_msix:
   9184  *
   9185  *	Interrupt service routine for link status change for MSI-X.
   9186  */
   9187 static int
   9188 wm_linkintr_msix(void *arg)
   9189 {
   9190 	struct wm_softc *sc = arg;
   9191 	uint32_t reg;
   9192 	bool has_rxo;
   9193 
   9194 	DPRINTF(WM_DEBUG_LINK,
   9195 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9196 
   9197 	reg = CSR_READ(sc, WMREG_ICR);
   9198 	WM_CORE_LOCK(sc);
   9199 	if (sc->sc_core_stopping)
   9200 		goto out;
   9201 
   9202 	if ((reg & ICR_LSC) != 0) {
   9203 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9204 		wm_linkintr(sc, ICR_LSC);
   9205 	}
   9206 
   9207 	/*
   9208 	 * XXX 82574 MSI-X mode workaround
   9209 	 *
   9210 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9211 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9212 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9213 	 * interrupts by writing WMREG_ICS to process receive packets.
   9214 	 */
   9215 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9216 #if defined(WM_DEBUG)
   9217 		log(LOG_WARNING, "%s: Receive overrun\n",
   9218 		    device_xname(sc->sc_dev));
   9219 #endif /* defined(WM_DEBUG) */
   9220 
   9221 		has_rxo = true;
   9222 		/*
   9223 		 * The RXO interrupt is very high rate when receive traffic is
   9224 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9225 		 * interrupts. ICR_OTHER will be enabled at the end of
   9226 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9227 		 * ICR_RXQ(1) interrupts.
   9228 		 */
   9229 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9230 
   9231 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9232 	}
   9233 
   9234 
   9235 
   9236 out:
   9237 	WM_CORE_UNLOCK(sc);
   9238 
   9239 	if (sc->sc_type == WM_T_82574) {
   9240 		if (!has_rxo)
   9241 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9242 		else
   9243 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9244 	} else if (sc->sc_type == WM_T_82575)
   9245 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9246 	else
   9247 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9248 
   9249 	return 1;
   9250 }
   9251 
   9252 /*
   9253  * Media related.
   9254  * GMII, SGMII, TBI (and SERDES)
   9255  */
   9256 
   9257 /* Common */
   9258 
   9259 /*
   9260  * wm_tbi_serdes_set_linkled:
   9261  *
   9262  *	Update the link LED on TBI and SERDES devices.
   9263  */
   9264 static void
   9265 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9266 {
   9267 
   9268 	if (sc->sc_tbi_linkup)
   9269 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9270 	else
   9271 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9272 
   9273 	/* 82540 or newer devices are active low */
   9274 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9275 
   9276 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9277 }
   9278 
   9279 /* GMII related */
   9280 
   9281 /*
   9282  * wm_gmii_reset:
   9283  *
   9284  *	Reset the PHY.
   9285  */
   9286 static void
   9287 wm_gmii_reset(struct wm_softc *sc)
   9288 {
   9289 	uint32_t reg;
   9290 	int rv;
   9291 
   9292 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9293 		device_xname(sc->sc_dev), __func__));
   9294 
   9295 	rv = sc->phy.acquire(sc);
   9296 	if (rv != 0) {
   9297 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9298 		    __func__);
   9299 		return;
   9300 	}
   9301 
   9302 	switch (sc->sc_type) {
   9303 	case WM_T_82542_2_0:
   9304 	case WM_T_82542_2_1:
   9305 		/* null */
   9306 		break;
   9307 	case WM_T_82543:
   9308 		/*
   9309 		 * With 82543, we need to force speed and duplex on the MAC
   9310 		 * equal to what the PHY speed and duplex configuration is.
   9311 		 * In addition, we need to perform a hardware reset on the PHY
   9312 		 * to take it out of reset.
   9313 		 */
   9314 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9315 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9316 
   9317 		/* The PHY reset pin is active-low. */
   9318 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9319 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9320 		    CTRL_EXT_SWDPIN(4));
   9321 		reg |= CTRL_EXT_SWDPIO(4);
   9322 
   9323 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9324 		CSR_WRITE_FLUSH(sc);
   9325 		delay(10*1000);
   9326 
   9327 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9328 		CSR_WRITE_FLUSH(sc);
   9329 		delay(150);
   9330 #if 0
   9331 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9332 #endif
   9333 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9334 		break;
   9335 	case WM_T_82544:	/* reset 10000us */
   9336 	case WM_T_82540:
   9337 	case WM_T_82545:
   9338 	case WM_T_82545_3:
   9339 	case WM_T_82546:
   9340 	case WM_T_82546_3:
   9341 	case WM_T_82541:
   9342 	case WM_T_82541_2:
   9343 	case WM_T_82547:
   9344 	case WM_T_82547_2:
   9345 	case WM_T_82571:	/* reset 100us */
   9346 	case WM_T_82572:
   9347 	case WM_T_82573:
   9348 	case WM_T_82574:
   9349 	case WM_T_82575:
   9350 	case WM_T_82576:
   9351 	case WM_T_82580:
   9352 	case WM_T_I350:
   9353 	case WM_T_I354:
   9354 	case WM_T_I210:
   9355 	case WM_T_I211:
   9356 	case WM_T_82583:
   9357 	case WM_T_80003:
   9358 		/* generic reset */
   9359 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9360 		CSR_WRITE_FLUSH(sc);
   9361 		delay(20000);
   9362 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9363 		CSR_WRITE_FLUSH(sc);
   9364 		delay(20000);
   9365 
   9366 		if ((sc->sc_type == WM_T_82541)
   9367 		    || (sc->sc_type == WM_T_82541_2)
   9368 		    || (sc->sc_type == WM_T_82547)
   9369 		    || (sc->sc_type == WM_T_82547_2)) {
   9370 			/* workaround for igp are done in igp_reset() */
   9371 			/* XXX add code to set LED after phy reset */
   9372 		}
   9373 		break;
   9374 	case WM_T_ICH8:
   9375 	case WM_T_ICH9:
   9376 	case WM_T_ICH10:
   9377 	case WM_T_PCH:
   9378 	case WM_T_PCH2:
   9379 	case WM_T_PCH_LPT:
   9380 	case WM_T_PCH_SPT:
   9381 	case WM_T_PCH_CNP:
   9382 		/* generic reset */
   9383 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9384 		CSR_WRITE_FLUSH(sc);
   9385 		delay(100);
   9386 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9387 		CSR_WRITE_FLUSH(sc);
   9388 		delay(150);
   9389 		break;
   9390 	default:
   9391 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9392 		    __func__);
   9393 		break;
   9394 	}
   9395 
   9396 	sc->phy.release(sc);
   9397 
   9398 	/* get_cfg_done */
   9399 	wm_get_cfg_done(sc);
   9400 
   9401 	/* extra setup */
   9402 	switch (sc->sc_type) {
   9403 	case WM_T_82542_2_0:
   9404 	case WM_T_82542_2_1:
   9405 	case WM_T_82543:
   9406 	case WM_T_82544:
   9407 	case WM_T_82540:
   9408 	case WM_T_82545:
   9409 	case WM_T_82545_3:
   9410 	case WM_T_82546:
   9411 	case WM_T_82546_3:
   9412 	case WM_T_82541_2:
   9413 	case WM_T_82547_2:
   9414 	case WM_T_82571:
   9415 	case WM_T_82572:
   9416 	case WM_T_82573:
   9417 	case WM_T_82574:
   9418 	case WM_T_82583:
   9419 	case WM_T_82575:
   9420 	case WM_T_82576:
   9421 	case WM_T_82580:
   9422 	case WM_T_I350:
   9423 	case WM_T_I354:
   9424 	case WM_T_I210:
   9425 	case WM_T_I211:
   9426 	case WM_T_80003:
   9427 		/* null */
   9428 		break;
   9429 	case WM_T_82541:
   9430 	case WM_T_82547:
   9431 		/* XXX Configure actively LED after PHY reset */
   9432 		break;
   9433 	case WM_T_ICH8:
   9434 	case WM_T_ICH9:
   9435 	case WM_T_ICH10:
   9436 	case WM_T_PCH:
   9437 	case WM_T_PCH2:
   9438 	case WM_T_PCH_LPT:
   9439 	case WM_T_PCH_SPT:
   9440 	case WM_T_PCH_CNP:
   9441 		wm_phy_post_reset(sc);
   9442 		break;
   9443 	default:
   9444 		panic("%s: unknown type\n", __func__);
   9445 		break;
   9446 	}
   9447 }
   9448 
   9449 /*
   9450  * Setup sc_phytype and mii_{read|write}reg.
   9451  *
   9452  *  To identify PHY type, correct read/write function should be selected.
   9453  * To select correct read/write function, PCI ID or MAC type are required
   9454  * without accessing PHY registers.
   9455  *
   9456  *  On the first call of this function, PHY ID is not known yet. Check
   9457  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9458  * result might be incorrect.
   9459  *
   9460  *  In the second call, PHY OUI and model is used to identify PHY type.
   9461  * It might not be perfpect because of the lack of compared entry, but it
   9462  * would be better than the first call.
   9463  *
   9464  *  If the detected new result and previous assumption is different,
   9465  * diagnous message will be printed.
   9466  */
   9467 static void
   9468 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9469     uint16_t phy_model)
   9470 {
   9471 	device_t dev = sc->sc_dev;
   9472 	struct mii_data *mii = &sc->sc_mii;
   9473 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9474 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9475 	mii_readreg_t new_readreg;
   9476 	mii_writereg_t new_writereg;
   9477 
   9478 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9479 		device_xname(sc->sc_dev), __func__));
   9480 
   9481 	if (mii->mii_readreg == NULL) {
   9482 		/*
   9483 		 *  This is the first call of this function. For ICH and PCH
   9484 		 * variants, it's difficult to determine the PHY access method
   9485 		 * by sc_type, so use the PCI product ID for some devices.
   9486 		 */
   9487 
   9488 		switch (sc->sc_pcidevid) {
   9489 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9490 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9491 			/* 82577 */
   9492 			new_phytype = WMPHY_82577;
   9493 			break;
   9494 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9495 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9496 			/* 82578 */
   9497 			new_phytype = WMPHY_82578;
   9498 			break;
   9499 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9500 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9501 			/* 82579 */
   9502 			new_phytype = WMPHY_82579;
   9503 			break;
   9504 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9505 		case PCI_PRODUCT_INTEL_82801I_BM:
   9506 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9507 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9508 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9509 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9510 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9511 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9512 			/* ICH8, 9, 10 with 82567 */
   9513 			new_phytype = WMPHY_BM;
   9514 			break;
   9515 		default:
   9516 			break;
   9517 		}
   9518 	} else {
   9519 		/* It's not the first call. Use PHY OUI and model */
   9520 		switch (phy_oui) {
   9521 		case MII_OUI_ATHEROS: /* XXX ??? */
   9522 			switch (phy_model) {
   9523 			case 0x0004: /* XXX */
   9524 				new_phytype = WMPHY_82578;
   9525 				break;
   9526 			default:
   9527 				break;
   9528 			}
   9529 			break;
   9530 		case MII_OUI_xxMARVELL:
   9531 			switch (phy_model) {
   9532 			case MII_MODEL_xxMARVELL_I210:
   9533 				new_phytype = WMPHY_I210;
   9534 				break;
   9535 			case MII_MODEL_xxMARVELL_E1011:
   9536 			case MII_MODEL_xxMARVELL_E1000_3:
   9537 			case MII_MODEL_xxMARVELL_E1000_5:
   9538 			case MII_MODEL_xxMARVELL_E1112:
   9539 				new_phytype = WMPHY_M88;
   9540 				break;
   9541 			case MII_MODEL_xxMARVELL_E1149:
   9542 				new_phytype = WMPHY_BM;
   9543 				break;
   9544 			case MII_MODEL_xxMARVELL_E1111:
   9545 			case MII_MODEL_xxMARVELL_I347:
   9546 			case MII_MODEL_xxMARVELL_E1512:
   9547 			case MII_MODEL_xxMARVELL_E1340M:
   9548 			case MII_MODEL_xxMARVELL_E1543:
   9549 				new_phytype = WMPHY_M88;
   9550 				break;
   9551 			case MII_MODEL_xxMARVELL_I82563:
   9552 				new_phytype = WMPHY_GG82563;
   9553 				break;
   9554 			default:
   9555 				break;
   9556 			}
   9557 			break;
   9558 		case MII_OUI_INTEL:
   9559 			switch (phy_model) {
   9560 			case MII_MODEL_INTEL_I82577:
   9561 				new_phytype = WMPHY_82577;
   9562 				break;
   9563 			case MII_MODEL_INTEL_I82579:
   9564 				new_phytype = WMPHY_82579;
   9565 				break;
   9566 			case MII_MODEL_INTEL_I217:
   9567 				new_phytype = WMPHY_I217;
   9568 				break;
   9569 			case MII_MODEL_INTEL_I82580:
   9570 			case MII_MODEL_INTEL_I350:
   9571 				new_phytype = WMPHY_82580;
   9572 				break;
   9573 			default:
   9574 				break;
   9575 			}
   9576 			break;
   9577 		case MII_OUI_yyINTEL:
   9578 			switch (phy_model) {
   9579 			case MII_MODEL_yyINTEL_I82562G:
   9580 			case MII_MODEL_yyINTEL_I82562EM:
   9581 			case MII_MODEL_yyINTEL_I82562ET:
   9582 				new_phytype = WMPHY_IFE;
   9583 				break;
   9584 			case MII_MODEL_yyINTEL_IGP01E1000:
   9585 				new_phytype = WMPHY_IGP;
   9586 				break;
   9587 			case MII_MODEL_yyINTEL_I82566:
   9588 				new_phytype = WMPHY_IGP_3;
   9589 				break;
   9590 			default:
   9591 				break;
   9592 			}
   9593 			break;
   9594 		default:
   9595 			break;
   9596 		}
   9597 		if (new_phytype == WMPHY_UNKNOWN)
   9598 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9599 			    __func__);
   9600 
   9601 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9602 		    && (sc->sc_phytype != new_phytype )) {
   9603 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9604 			    "was incorrect. PHY type from PHY ID = %u\n",
   9605 			    sc->sc_phytype, new_phytype);
   9606 		}
   9607 	}
   9608 
   9609 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9610 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9611 		/* SGMII */
   9612 		new_readreg = wm_sgmii_readreg;
   9613 		new_writereg = wm_sgmii_writereg;
   9614 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9615 		/* BM2 (phyaddr == 1) */
   9616 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9617 		    && (new_phytype != WMPHY_BM)
   9618 		    && (new_phytype != WMPHY_UNKNOWN))
   9619 			doubt_phytype = new_phytype;
   9620 		new_phytype = WMPHY_BM;
   9621 		new_readreg = wm_gmii_bm_readreg;
   9622 		new_writereg = wm_gmii_bm_writereg;
   9623 	} else if (sc->sc_type >= WM_T_PCH) {
   9624 		/* All PCH* use _hv_ */
   9625 		new_readreg = wm_gmii_hv_readreg;
   9626 		new_writereg = wm_gmii_hv_writereg;
   9627 	} else if (sc->sc_type >= WM_T_ICH8) {
   9628 		/* non-82567 ICH8, 9 and 10 */
   9629 		new_readreg = wm_gmii_i82544_readreg;
   9630 		new_writereg = wm_gmii_i82544_writereg;
   9631 	} else if (sc->sc_type >= WM_T_80003) {
   9632 		/* 80003 */
   9633 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9634 		    && (new_phytype != WMPHY_GG82563)
   9635 		    && (new_phytype != WMPHY_UNKNOWN))
   9636 			doubt_phytype = new_phytype;
   9637 		new_phytype = WMPHY_GG82563;
   9638 		new_readreg = wm_gmii_i80003_readreg;
   9639 		new_writereg = wm_gmii_i80003_writereg;
   9640 	} else if (sc->sc_type >= WM_T_I210) {
   9641 		/* I210 and I211 */
   9642 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9643 		    && (new_phytype != WMPHY_I210)
   9644 		    && (new_phytype != WMPHY_UNKNOWN))
   9645 			doubt_phytype = new_phytype;
   9646 		new_phytype = WMPHY_I210;
   9647 		new_readreg = wm_gmii_gs40g_readreg;
   9648 		new_writereg = wm_gmii_gs40g_writereg;
   9649 	} else if (sc->sc_type >= WM_T_82580) {
   9650 		/* 82580, I350 and I354 */
   9651 		new_readreg = wm_gmii_82580_readreg;
   9652 		new_writereg = wm_gmii_82580_writereg;
   9653 	} else if (sc->sc_type >= WM_T_82544) {
   9654 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9655 		new_readreg = wm_gmii_i82544_readreg;
   9656 		new_writereg = wm_gmii_i82544_writereg;
   9657 	} else {
   9658 		new_readreg = wm_gmii_i82543_readreg;
   9659 		new_writereg = wm_gmii_i82543_writereg;
   9660 	}
   9661 
   9662 	if (new_phytype == WMPHY_BM) {
   9663 		/* All BM use _bm_ */
   9664 		new_readreg = wm_gmii_bm_readreg;
   9665 		new_writereg = wm_gmii_bm_writereg;
   9666 	}
   9667 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   9668 		/* All PCH* use _hv_ */
   9669 		new_readreg = wm_gmii_hv_readreg;
   9670 		new_writereg = wm_gmii_hv_writereg;
   9671 	}
   9672 
   9673 	/* Diag output */
   9674 	if (doubt_phytype != WMPHY_UNKNOWN)
   9675 		aprint_error_dev(dev, "Assumed new PHY type was "
   9676 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9677 		    new_phytype);
   9678 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9679 	    && (sc->sc_phytype != new_phytype ))
   9680 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9681 		    "was incorrect. New PHY type = %u\n",
   9682 		    sc->sc_phytype, new_phytype);
   9683 
   9684 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9685 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9686 
   9687 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9688 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9689 		    "function was incorrect.\n");
   9690 
   9691 	/* Update now */
   9692 	sc->sc_phytype = new_phytype;
   9693 	mii->mii_readreg = new_readreg;
   9694 	mii->mii_writereg = new_writereg;
   9695 }
   9696 
   9697 /*
   9698  * wm_get_phy_id_82575:
   9699  *
   9700  * Return PHY ID. Return -1 if it failed.
   9701  */
   9702 static int
   9703 wm_get_phy_id_82575(struct wm_softc *sc)
   9704 {
   9705 	uint32_t reg;
   9706 	int phyid = -1;
   9707 
   9708 	/* XXX */
   9709 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9710 		return -1;
   9711 
   9712 	if (wm_sgmii_uses_mdio(sc)) {
   9713 		switch (sc->sc_type) {
   9714 		case WM_T_82575:
   9715 		case WM_T_82576:
   9716 			reg = CSR_READ(sc, WMREG_MDIC);
   9717 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9718 			break;
   9719 		case WM_T_82580:
   9720 		case WM_T_I350:
   9721 		case WM_T_I354:
   9722 		case WM_T_I210:
   9723 		case WM_T_I211:
   9724 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9725 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9726 			break;
   9727 		default:
   9728 			return -1;
   9729 		}
   9730 	}
   9731 
   9732 	return phyid;
   9733 }
   9734 
   9735 
   9736 /*
   9737  * wm_gmii_mediainit:
   9738  *
   9739  *	Initialize media for use on 1000BASE-T devices.
   9740  */
   9741 static void
   9742 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9743 {
   9744 	device_t dev = sc->sc_dev;
   9745 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9746 	struct mii_data *mii = &sc->sc_mii;
   9747 	uint32_t reg;
   9748 
   9749 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9750 		device_xname(sc->sc_dev), __func__));
   9751 
   9752 	/* We have GMII. */
   9753 	sc->sc_flags |= WM_F_HAS_MII;
   9754 
   9755 	if (sc->sc_type == WM_T_80003)
   9756 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9757 	else
   9758 		sc->sc_tipg = TIPG_1000T_DFLT;
   9759 
   9760 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9761 	if ((sc->sc_type == WM_T_82580)
   9762 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9763 	    || (sc->sc_type == WM_T_I211)) {
   9764 		reg = CSR_READ(sc, WMREG_PHPM);
   9765 		reg &= ~PHPM_GO_LINK_D;
   9766 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9767 	}
   9768 
   9769 	/*
   9770 	 * Let the chip set speed/duplex on its own based on
   9771 	 * signals from the PHY.
   9772 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9773 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9774 	 */
   9775 	sc->sc_ctrl |= CTRL_SLU;
   9776 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9777 
   9778 	/* Initialize our media structures and probe the GMII. */
   9779 	mii->mii_ifp = ifp;
   9780 
   9781 	mii->mii_statchg = wm_gmii_statchg;
   9782 
   9783 	/* get PHY control from SMBus to PCIe */
   9784 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9785 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   9786 	    || (sc->sc_type == WM_T_PCH_CNP))
   9787 		wm_smbustopci(sc);
   9788 
   9789 	wm_gmii_reset(sc);
   9790 
   9791 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9792 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9793 	    wm_gmii_mediastatus);
   9794 
   9795 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9796 	    || (sc->sc_type == WM_T_82580)
   9797 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9798 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9799 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9800 			/* Attach only one port */
   9801 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9802 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9803 		} else {
   9804 			int i, id;
   9805 			uint32_t ctrl_ext;
   9806 
   9807 			id = wm_get_phy_id_82575(sc);
   9808 			if (id != -1) {
   9809 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9810 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9811 			}
   9812 			if ((id == -1)
   9813 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9814 				/* Power on sgmii phy if it is disabled */
   9815 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9816 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9817 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9818 				CSR_WRITE_FLUSH(sc);
   9819 				delay(300*1000); /* XXX too long */
   9820 
   9821 				/* from 1 to 8 */
   9822 				for (i = 1; i < 8; i++)
   9823 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9824 					    0xffffffff, i, MII_OFFSET_ANY,
   9825 					    MIIF_DOPAUSE);
   9826 
   9827 				/* restore previous sfp cage power state */
   9828 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9829 			}
   9830 		}
   9831 	} else {
   9832 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9833 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9834 	}
   9835 
   9836 	/*
   9837 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9838 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9839 	 */
   9840 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   9841 		|| (sc->sc_type == WM_T_PCH_SPT)
   9842 		|| (sc->sc_type == WM_T_PCH_CNP))
   9843 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9844 		wm_set_mdio_slow_mode_hv(sc);
   9845 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9846 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9847 	}
   9848 
   9849 	/*
   9850 	 * (For ICH8 variants)
   9851 	 * If PHY detection failed, use BM's r/w function and retry.
   9852 	 */
   9853 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9854 		/* if failed, retry with *_bm_* */
   9855 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9856 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9857 		    sc->sc_phytype);
   9858 		sc->sc_phytype = WMPHY_BM;
   9859 		mii->mii_readreg = wm_gmii_bm_readreg;
   9860 		mii->mii_writereg = wm_gmii_bm_writereg;
   9861 
   9862 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9863 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9864 	}
   9865 
   9866 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9867 		/* Any PHY wasn't find */
   9868 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9869 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9870 		sc->sc_phytype = WMPHY_NONE;
   9871 	} else {
   9872 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9873 
   9874 		/*
   9875 		 * PHY Found! Check PHY type again by the second call of
   9876 		 * wm_gmii_setup_phytype.
   9877 		 */
   9878 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9879 		    child->mii_mpd_model);
   9880 
   9881 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9882 	}
   9883 }
   9884 
   9885 /*
   9886  * wm_gmii_mediachange:	[ifmedia interface function]
   9887  *
   9888  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9889  */
   9890 static int
   9891 wm_gmii_mediachange(struct ifnet *ifp)
   9892 {
   9893 	struct wm_softc *sc = ifp->if_softc;
   9894 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9895 	int rc;
   9896 
   9897 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9898 		device_xname(sc->sc_dev), __func__));
   9899 	if ((ifp->if_flags & IFF_UP) == 0)
   9900 		return 0;
   9901 
   9902 	/* Disable D0 LPLU. */
   9903 	wm_lplu_d0_disable(sc);
   9904 
   9905 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9906 	sc->sc_ctrl |= CTRL_SLU;
   9907 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9908 	    || (sc->sc_type > WM_T_82543)) {
   9909 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9910 	} else {
   9911 		sc->sc_ctrl &= ~CTRL_ASDE;
   9912 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9913 		if (ife->ifm_media & IFM_FDX)
   9914 			sc->sc_ctrl |= CTRL_FD;
   9915 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9916 		case IFM_10_T:
   9917 			sc->sc_ctrl |= CTRL_SPEED_10;
   9918 			break;
   9919 		case IFM_100_TX:
   9920 			sc->sc_ctrl |= CTRL_SPEED_100;
   9921 			break;
   9922 		case IFM_1000_T:
   9923 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9924 			break;
   9925 		default:
   9926 			panic("wm_gmii_mediachange: bad media 0x%x",
   9927 			    ife->ifm_media);
   9928 		}
   9929 	}
   9930 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9931 	CSR_WRITE_FLUSH(sc);
   9932 	if (sc->sc_type <= WM_T_82543)
   9933 		wm_gmii_reset(sc);
   9934 
   9935 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9936 		return 0;
   9937 	return rc;
   9938 }
   9939 
   9940 /*
   9941  * wm_gmii_mediastatus:	[ifmedia interface function]
   9942  *
   9943  *	Get the current interface media status on a 1000BASE-T device.
   9944  */
   9945 static void
   9946 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9947 {
   9948 	struct wm_softc *sc = ifp->if_softc;
   9949 
   9950 	ether_mediastatus(ifp, ifmr);
   9951 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9952 	    | sc->sc_flowflags;
   9953 }
   9954 
   9955 #define	MDI_IO		CTRL_SWDPIN(2)
   9956 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9957 #define	MDI_CLK		CTRL_SWDPIN(3)
   9958 
   9959 static void
   9960 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9961 {
   9962 	uint32_t i, v;
   9963 
   9964 	v = CSR_READ(sc, WMREG_CTRL);
   9965 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9966 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9967 
   9968 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9969 		if (data & i)
   9970 			v |= MDI_IO;
   9971 		else
   9972 			v &= ~MDI_IO;
   9973 		CSR_WRITE(sc, WMREG_CTRL, v);
   9974 		CSR_WRITE_FLUSH(sc);
   9975 		delay(10);
   9976 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9977 		CSR_WRITE_FLUSH(sc);
   9978 		delay(10);
   9979 		CSR_WRITE(sc, WMREG_CTRL, v);
   9980 		CSR_WRITE_FLUSH(sc);
   9981 		delay(10);
   9982 	}
   9983 }
   9984 
   9985 static uint32_t
   9986 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9987 {
   9988 	uint32_t v, i, data = 0;
   9989 
   9990 	v = CSR_READ(sc, WMREG_CTRL);
   9991 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9992 	v |= CTRL_SWDPIO(3);
   9993 
   9994 	CSR_WRITE(sc, WMREG_CTRL, v);
   9995 	CSR_WRITE_FLUSH(sc);
   9996 	delay(10);
   9997 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9998 	CSR_WRITE_FLUSH(sc);
   9999 	delay(10);
   10000 	CSR_WRITE(sc, WMREG_CTRL, v);
   10001 	CSR_WRITE_FLUSH(sc);
   10002 	delay(10);
   10003 
   10004 	for (i = 0; i < 16; i++) {
   10005 		data <<= 1;
   10006 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10007 		CSR_WRITE_FLUSH(sc);
   10008 		delay(10);
   10009 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10010 			data |= 1;
   10011 		CSR_WRITE(sc, WMREG_CTRL, v);
   10012 		CSR_WRITE_FLUSH(sc);
   10013 		delay(10);
   10014 	}
   10015 
   10016 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10017 	CSR_WRITE_FLUSH(sc);
   10018 	delay(10);
   10019 	CSR_WRITE(sc, WMREG_CTRL, v);
   10020 	CSR_WRITE_FLUSH(sc);
   10021 	delay(10);
   10022 
   10023 	return data;
   10024 }
   10025 
   10026 #undef MDI_IO
   10027 #undef MDI_DIR
   10028 #undef MDI_CLK
   10029 
   10030 /*
   10031  * wm_gmii_i82543_readreg:	[mii interface function]
   10032  *
   10033  *	Read a PHY register on the GMII (i82543 version).
   10034  */
   10035 static int
   10036 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   10037 {
   10038 	struct wm_softc *sc = device_private(dev);
   10039 	int rv;
   10040 
   10041 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10042 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10043 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10044 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   10045 
   10046 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   10047 		device_xname(dev), phy, reg, rv));
   10048 
   10049 	return rv;
   10050 }
   10051 
   10052 /*
   10053  * wm_gmii_i82543_writereg:	[mii interface function]
   10054  *
   10055  *	Write a PHY register on the GMII (i82543 version).
   10056  */
   10057 static void
   10058 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   10059 {
   10060 	struct wm_softc *sc = device_private(dev);
   10061 
   10062 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10063 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10064 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10065 	    (MII_COMMAND_START << 30), 32);
   10066 }
   10067 
   10068 /*
   10069  * wm_gmii_mdic_readreg:	[mii interface function]
   10070  *
   10071  *	Read a PHY register on the GMII.
   10072  */
   10073 static int
   10074 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   10075 {
   10076 	struct wm_softc *sc = device_private(dev);
   10077 	uint32_t mdic = 0;
   10078 	int i, rv;
   10079 
   10080 	if (reg > MII_ADDRMASK) {
   10081 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10082 		    __func__, sc->sc_phytype, reg);
   10083 		reg &= MII_ADDRMASK;
   10084 	}
   10085 
   10086 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10087 	    MDIC_REGADD(reg));
   10088 
   10089 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10090 		delay(50);
   10091 		mdic = CSR_READ(sc, WMREG_MDIC);
   10092 		if (mdic & MDIC_READY)
   10093 			break;
   10094 	}
   10095 
   10096 	if ((mdic & MDIC_READY) == 0) {
   10097 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   10098 		    device_xname(dev), phy, reg);
   10099 		return 0;
   10100 	} else if (mdic & MDIC_E) {
   10101 #if 0 /* This is normal if no PHY is present. */
   10102 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   10103 		    device_xname(dev), phy, reg);
   10104 #endif
   10105 		return 0;
   10106 	} else {
   10107 		rv = MDIC_DATA(mdic);
   10108 		if (rv == 0xffff)
   10109 			rv = 0;
   10110 	}
   10111 
   10112 	/*
   10113 	 * Allow some time after each MDIC transaction to avoid
   10114 	 * reading duplicate data in the next MDIC transaction.
   10115 	 */
   10116 	if (sc->sc_type == WM_T_PCH2)
   10117 		delay(100);
   10118 
   10119 	return rv;
   10120 }
   10121 
   10122 /*
   10123  * wm_gmii_mdic_writereg:	[mii interface function]
   10124  *
   10125  *	Write a PHY register on the GMII.
   10126  */
   10127 static void
   10128 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   10129 {
   10130 	struct wm_softc *sc = device_private(dev);
   10131 	uint32_t mdic = 0;
   10132 	int i;
   10133 
   10134 	if (reg > MII_ADDRMASK) {
   10135 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10136 		    __func__, sc->sc_phytype, reg);
   10137 		reg &= MII_ADDRMASK;
   10138 	}
   10139 
   10140 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10141 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10142 
   10143 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10144 		delay(50);
   10145 		mdic = CSR_READ(sc, WMREG_MDIC);
   10146 		if (mdic & MDIC_READY)
   10147 			break;
   10148 	}
   10149 
   10150 	if ((mdic & MDIC_READY) == 0) {
   10151 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   10152 		    device_xname(dev), phy, reg);
   10153 		return;
   10154 	} else if (mdic & MDIC_E) {
   10155 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   10156 		    device_xname(dev), phy, reg);
   10157 		return;
   10158 	}
   10159 
   10160 	/*
   10161 	 * Allow some time after each MDIC transaction to avoid
   10162 	 * reading duplicate data in the next MDIC transaction.
   10163 	 */
   10164 	if (sc->sc_type == WM_T_PCH2)
   10165 		delay(100);
   10166 }
   10167 
   10168 /*
   10169  * wm_gmii_i82544_readreg:	[mii interface function]
   10170  *
   10171  *	Read a PHY register on the GMII.
   10172  */
   10173 static int
   10174 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   10175 {
   10176 	struct wm_softc *sc = device_private(dev);
   10177 	int rv;
   10178 
   10179 	if (sc->phy.acquire(sc)) {
   10180 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10181 		return 0;
   10182 	}
   10183 
   10184 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10185 		switch (sc->sc_phytype) {
   10186 		case WMPHY_IGP:
   10187 		case WMPHY_IGP_2:
   10188 		case WMPHY_IGP_3:
   10189 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10190 			    reg);
   10191 			break;
   10192 		default:
   10193 #ifdef WM_DEBUG
   10194 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10195 			    __func__, sc->sc_phytype, reg);
   10196 #endif
   10197 			break;
   10198 		}
   10199 	}
   10200 
   10201 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10202 	sc->phy.release(sc);
   10203 
   10204 	return rv;
   10205 }
   10206 
   10207 /*
   10208  * wm_gmii_i82544_writereg:	[mii interface function]
   10209  *
   10210  *	Write a PHY register on the GMII.
   10211  */
   10212 static void
   10213 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   10214 {
   10215 	struct wm_softc *sc = device_private(dev);
   10216 
   10217 	if (sc->phy.acquire(sc)) {
   10218 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10219 		return;
   10220 	}
   10221 
   10222 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10223 		switch (sc->sc_phytype) {
   10224 		case WMPHY_IGP:
   10225 		case WMPHY_IGP_2:
   10226 		case WMPHY_IGP_3:
   10227 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10228 			    reg);
   10229 			break;
   10230 		default:
   10231 #ifdef WM_DEBUG
   10232 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10233 			    __func__, sc->sc_phytype, reg);
   10234 #endif
   10235 			break;
   10236 		}
   10237 	}
   10238 
   10239 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10240 	sc->phy.release(sc);
   10241 }
   10242 
   10243 /*
   10244  * wm_gmii_i80003_readreg:	[mii interface function]
   10245  *
   10246  *	Read a PHY register on the kumeran
   10247  * This could be handled by the PHY layer if we didn't have to lock the
   10248  * ressource ...
   10249  */
   10250 static int
   10251 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   10252 {
   10253 	struct wm_softc *sc = device_private(dev);
   10254 	int page_select, temp;
   10255 	int rv;
   10256 
   10257 	if (phy != 1) /* only one PHY on kumeran bus */
   10258 		return 0;
   10259 
   10260 	if (sc->phy.acquire(sc)) {
   10261 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10262 		return 0;
   10263 	}
   10264 
   10265 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10266 		page_select = GG82563_PHY_PAGE_SELECT;
   10267 	else {
   10268 		/*
   10269 		 * Use Alternative Page Select register to access registers
   10270 		 * 30 and 31.
   10271 		 */
   10272 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10273 	}
   10274 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10275 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10276 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10277 		/*
   10278 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10279 		 * register.
   10280 		 */
   10281 		delay(200);
   10282 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10283 			device_printf(dev, "%s failed\n", __func__);
   10284 			rv = 0; /* XXX */
   10285 			goto out;
   10286 		}
   10287 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10288 		delay(200);
   10289 	} else
   10290 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10291 
   10292 out:
   10293 	sc->phy.release(sc);
   10294 	return rv;
   10295 }
   10296 
   10297 /*
   10298  * wm_gmii_i80003_writereg:	[mii interface function]
   10299  *
   10300  *	Write a PHY register on the kumeran.
   10301  * This could be handled by the PHY layer if we didn't have to lock the
   10302  * ressource ...
   10303  */
   10304 static void
   10305 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10306 {
   10307 	struct wm_softc *sc = device_private(dev);
   10308 	int page_select, temp;
   10309 
   10310 	if (phy != 1) /* only one PHY on kumeran bus */
   10311 		return;
   10312 
   10313 	if (sc->phy.acquire(sc)) {
   10314 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10315 		return;
   10316 	}
   10317 
   10318 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10319 		page_select = GG82563_PHY_PAGE_SELECT;
   10320 	else {
   10321 		/*
   10322 		 * Use Alternative Page Select register to access registers
   10323 		 * 30 and 31.
   10324 		 */
   10325 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10326 	}
   10327 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10328 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10329 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10330 		/*
   10331 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10332 		 * register.
   10333 		 */
   10334 		delay(200);
   10335 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10336 			device_printf(dev, "%s failed\n", __func__);
   10337 			goto out;
   10338 		}
   10339 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10340 		delay(200);
   10341 	} else
   10342 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10343 
   10344 out:
   10345 	sc->phy.release(sc);
   10346 }
   10347 
   10348 /*
   10349  * wm_gmii_bm_readreg:	[mii interface function]
   10350  *
   10351  *	Read a PHY register on the kumeran
   10352  * This could be handled by the PHY layer if we didn't have to lock the
   10353  * ressource ...
   10354  */
   10355 static int
   10356 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10357 {
   10358 	struct wm_softc *sc = device_private(dev);
   10359 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10360 	uint16_t val;
   10361 	int rv;
   10362 
   10363 	if (sc->phy.acquire(sc)) {
   10364 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10365 		return 0;
   10366 	}
   10367 
   10368 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10369 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10370 		    || (reg == 31)) ? 1 : phy;
   10371 	/* Page 800 works differently than the rest so it has its own func */
   10372 	if (page == BM_WUC_PAGE) {
   10373 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10374 		rv = val;
   10375 		goto release;
   10376 	}
   10377 
   10378 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10379 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10380 		    && (sc->sc_type != WM_T_82583))
   10381 			wm_gmii_mdic_writereg(dev, phy,
   10382 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10383 		else
   10384 			wm_gmii_mdic_writereg(dev, phy,
   10385 			    BME1000_PHY_PAGE_SELECT, page);
   10386 	}
   10387 
   10388 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10389 
   10390 release:
   10391 	sc->phy.release(sc);
   10392 	return rv;
   10393 }
   10394 
   10395 /*
   10396  * wm_gmii_bm_writereg:	[mii interface function]
   10397  *
   10398  *	Write a PHY register on the kumeran.
   10399  * This could be handled by the PHY layer if we didn't have to lock the
   10400  * ressource ...
   10401  */
   10402 static void
   10403 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10404 {
   10405 	struct wm_softc *sc = device_private(dev);
   10406 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10407 
   10408 	if (sc->phy.acquire(sc)) {
   10409 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10410 		return;
   10411 	}
   10412 
   10413 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10414 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10415 		    || (reg == 31)) ? 1 : phy;
   10416 	/* Page 800 works differently than the rest so it has its own func */
   10417 	if (page == BM_WUC_PAGE) {
   10418 		uint16_t tmp;
   10419 
   10420 		tmp = val;
   10421 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10422 		goto release;
   10423 	}
   10424 
   10425 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10426 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10427 		    && (sc->sc_type != WM_T_82583))
   10428 			wm_gmii_mdic_writereg(dev, phy,
   10429 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10430 		else
   10431 			wm_gmii_mdic_writereg(dev, phy,
   10432 			    BME1000_PHY_PAGE_SELECT, page);
   10433 	}
   10434 
   10435 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10436 
   10437 release:
   10438 	sc->phy.release(sc);
   10439 }
   10440 
   10441 static void
   10442 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10443 {
   10444 	struct wm_softc *sc = device_private(dev);
   10445 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10446 	uint16_t wuce, reg;
   10447 
   10448 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10449 		device_xname(dev), __func__));
   10450 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10451 	if (sc->sc_type == WM_T_PCH) {
   10452 		/* XXX e1000 driver do nothing... why? */
   10453 	}
   10454 
   10455 	/*
   10456 	 * 1) Enable PHY wakeup register first.
   10457 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10458 	 */
   10459 
   10460 	/* Set page 769 */
   10461 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10462 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10463 
   10464 	/* Read WUCE and save it */
   10465 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10466 
   10467 	reg = wuce | BM_WUC_ENABLE_BIT;
   10468 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10469 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10470 
   10471 	/* Select page 800 */
   10472 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10473 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10474 
   10475 	/*
   10476 	 * 2) Access PHY wakeup register.
   10477 	 * See e1000_access_phy_wakeup_reg_bm.
   10478 	 */
   10479 
   10480 	/* Write page 800 */
   10481 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10482 
   10483 	if (rd)
   10484 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10485 	else
   10486 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10487 
   10488 	/*
   10489 	 * 3) Disable PHY wakeup register.
   10490 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10491 	 */
   10492 	/* Set page 769 */
   10493 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10494 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10495 
   10496 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10497 }
   10498 
   10499 /*
   10500  * wm_gmii_hv_readreg:	[mii interface function]
   10501  *
   10502  *	Read a PHY register on the kumeran
   10503  * This could be handled by the PHY layer if we didn't have to lock the
   10504  * ressource ...
   10505  */
   10506 static int
   10507 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10508 {
   10509 	struct wm_softc *sc = device_private(dev);
   10510 	int rv;
   10511 
   10512 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10513 		device_xname(dev), __func__));
   10514 	if (sc->phy.acquire(sc)) {
   10515 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10516 		return 0;
   10517 	}
   10518 
   10519 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
   10520 	sc->phy.release(sc);
   10521 	return rv;
   10522 }
   10523 
   10524 static int
   10525 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
   10526 {
   10527 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10528 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10529 	uint16_t val;
   10530 	int rv;
   10531 
   10532 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10533 
   10534 	/* Page 800 works differently than the rest so it has its own func */
   10535 	if (page == BM_WUC_PAGE) {
   10536 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10537 		return val;
   10538 	}
   10539 
   10540 	/*
   10541 	 * Lower than page 768 works differently than the rest so it has its
   10542 	 * own func
   10543 	 */
   10544 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10545 		printf("gmii_hv_readreg!!!\n");
   10546 		return 0;
   10547 	}
   10548 
   10549 	/*
   10550 	 * XXX I21[789] documents say that the SMBus Address register is at
   10551 	 * PHY address 01, Page 0 (not 768), Register 26.
   10552 	 */
   10553 	if (page == HV_INTC_FC_PAGE_START)
   10554 		page = 0;
   10555 
   10556 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10557 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10558 		    page << BME1000_PAGE_SHIFT);
   10559 	}
   10560 
   10561 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10562 	return rv;
   10563 }
   10564 
   10565 /*
   10566  * wm_gmii_hv_writereg:	[mii interface function]
   10567  *
   10568  *	Write a PHY register on the kumeran.
   10569  * This could be handled by the PHY layer if we didn't have to lock the
   10570  * ressource ...
   10571  */
   10572 static void
   10573 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10574 {
   10575 	struct wm_softc *sc = device_private(dev);
   10576 
   10577 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10578 		device_xname(dev), __func__));
   10579 
   10580 	if (sc->phy.acquire(sc)) {
   10581 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10582 		return;
   10583 	}
   10584 
   10585 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10586 	sc->phy.release(sc);
   10587 }
   10588 
   10589 static void
   10590 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
   10591 {
   10592 	struct wm_softc *sc = device_private(dev);
   10593 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10594 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10595 
   10596 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10597 
   10598 	/* Page 800 works differently than the rest so it has its own func */
   10599 	if (page == BM_WUC_PAGE) {
   10600 		uint16_t tmp;
   10601 
   10602 		tmp = val;
   10603 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10604 		return;
   10605 	}
   10606 
   10607 	/*
   10608 	 * Lower than page 768 works differently than the rest so it has its
   10609 	 * own func
   10610 	 */
   10611 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10612 		printf("gmii_hv_writereg!!!\n");
   10613 		return;
   10614 	}
   10615 
   10616 	{
   10617 		/*
   10618 		 * XXX I21[789] documents say that the SMBus Address register
   10619 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10620 		 */
   10621 		if (page == HV_INTC_FC_PAGE_START)
   10622 			page = 0;
   10623 
   10624 		/*
   10625 		 * XXX Workaround MDIO accesses being disabled after entering
   10626 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10627 		 * register is set)
   10628 		 */
   10629 		if (sc->sc_phytype == WMPHY_82578) {
   10630 			struct mii_softc *child;
   10631 
   10632 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10633 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10634 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10635 			    && ((val & (1 << 11)) != 0)) {
   10636 				printf("XXX need workaround\n");
   10637 			}
   10638 		}
   10639 
   10640 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10641 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10642 			    page << BME1000_PAGE_SHIFT);
   10643 		}
   10644 	}
   10645 
   10646 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10647 }
   10648 
   10649 /*
   10650  * wm_gmii_82580_readreg:	[mii interface function]
   10651  *
   10652  *	Read a PHY register on the 82580 and I350.
   10653  * This could be handled by the PHY layer if we didn't have to lock the
   10654  * ressource ...
   10655  */
   10656 static int
   10657 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10658 {
   10659 	struct wm_softc *sc = device_private(dev);
   10660 	int rv;
   10661 
   10662 	if (sc->phy.acquire(sc) != 0) {
   10663 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10664 		return 0;
   10665 	}
   10666 
   10667 #ifdef DIAGNOSTIC
   10668 	if (reg > MII_ADDRMASK) {
   10669 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10670 		    __func__, sc->sc_phytype, reg);
   10671 		reg &= MII_ADDRMASK;
   10672 	}
   10673 #endif
   10674 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10675 
   10676 	sc->phy.release(sc);
   10677 	return rv;
   10678 }
   10679 
   10680 /*
   10681  * wm_gmii_82580_writereg:	[mii interface function]
   10682  *
   10683  *	Write a PHY register on the 82580 and I350.
   10684  * This could be handled by the PHY layer if we didn't have to lock the
   10685  * ressource ...
   10686  */
   10687 static void
   10688 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10689 {
   10690 	struct wm_softc *sc = device_private(dev);
   10691 
   10692 	if (sc->phy.acquire(sc) != 0) {
   10693 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10694 		return;
   10695 	}
   10696 
   10697 #ifdef DIAGNOSTIC
   10698 	if (reg > MII_ADDRMASK) {
   10699 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10700 		    __func__, sc->sc_phytype, reg);
   10701 		reg &= MII_ADDRMASK;
   10702 	}
   10703 #endif
   10704 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10705 
   10706 	sc->phy.release(sc);
   10707 }
   10708 
   10709 /*
   10710  * wm_gmii_gs40g_readreg:	[mii interface function]
   10711  *
   10712  *	Read a PHY register on the I2100 and I211.
   10713  * This could be handled by the PHY layer if we didn't have to lock the
   10714  * ressource ...
   10715  */
   10716 static int
   10717 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10718 {
   10719 	struct wm_softc *sc = device_private(dev);
   10720 	int page, offset;
   10721 	int rv;
   10722 
   10723 	/* Acquire semaphore */
   10724 	if (sc->phy.acquire(sc)) {
   10725 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10726 		return 0;
   10727 	}
   10728 
   10729 	/* Page select */
   10730 	page = reg >> GS40G_PAGE_SHIFT;
   10731 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10732 
   10733 	/* Read reg */
   10734 	offset = reg & GS40G_OFFSET_MASK;
   10735 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10736 
   10737 	sc->phy.release(sc);
   10738 	return rv;
   10739 }
   10740 
   10741 /*
   10742  * wm_gmii_gs40g_writereg:	[mii interface function]
   10743  *
   10744  *	Write a PHY register on the I210 and I211.
   10745  * This could be handled by the PHY layer if we didn't have to lock the
   10746  * ressource ...
   10747  */
   10748 static void
   10749 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10750 {
   10751 	struct wm_softc *sc = device_private(dev);
   10752 	int page, offset;
   10753 
   10754 	/* Acquire semaphore */
   10755 	if (sc->phy.acquire(sc)) {
   10756 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10757 		return;
   10758 	}
   10759 
   10760 	/* Page select */
   10761 	page = reg >> GS40G_PAGE_SHIFT;
   10762 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10763 
   10764 	/* Write reg */
   10765 	offset = reg & GS40G_OFFSET_MASK;
   10766 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10767 
   10768 	/* Release semaphore */
   10769 	sc->phy.release(sc);
   10770 }
   10771 
   10772 /*
   10773  * wm_gmii_statchg:	[mii interface function]
   10774  *
   10775  *	Callback from MII layer when media changes.
   10776  */
   10777 static void
   10778 wm_gmii_statchg(struct ifnet *ifp)
   10779 {
   10780 	struct wm_softc *sc = ifp->if_softc;
   10781 	struct mii_data *mii = &sc->sc_mii;
   10782 
   10783 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10784 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10785 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10786 
   10787 	/*
   10788 	 * Get flow control negotiation result.
   10789 	 */
   10790 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10791 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10792 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10793 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10794 	}
   10795 
   10796 	if (sc->sc_flowflags & IFM_FLOW) {
   10797 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10798 			sc->sc_ctrl |= CTRL_TFCE;
   10799 			sc->sc_fcrtl |= FCRTL_XONE;
   10800 		}
   10801 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10802 			sc->sc_ctrl |= CTRL_RFCE;
   10803 	}
   10804 
   10805 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10806 		DPRINTF(WM_DEBUG_LINK,
   10807 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10808 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10809 	} else {
   10810 		DPRINTF(WM_DEBUG_LINK,
   10811 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10812 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10813 	}
   10814 
   10815 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10816 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10817 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10818 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10819 	if (sc->sc_type == WM_T_80003) {
   10820 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10821 		case IFM_1000_T:
   10822 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10823 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10824 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10825 			break;
   10826 		default:
   10827 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10828 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10829 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10830 			break;
   10831 		}
   10832 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10833 	}
   10834 }
   10835 
   10836 /* kumeran related (80003, ICH* and PCH*) */
   10837 
   10838 /*
   10839  * wm_kmrn_readreg:
   10840  *
   10841  *	Read a kumeran register
   10842  */
   10843 static int
   10844 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   10845 {
   10846 	int rv;
   10847 
   10848 	if (sc->sc_type == WM_T_80003)
   10849 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10850 	else
   10851 		rv = sc->phy.acquire(sc);
   10852 	if (rv != 0) {
   10853 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10854 		    __func__);
   10855 		return rv;
   10856 	}
   10857 
   10858 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   10859 
   10860 	if (sc->sc_type == WM_T_80003)
   10861 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10862 	else
   10863 		sc->phy.release(sc);
   10864 
   10865 	return rv;
   10866 }
   10867 
   10868 static int
   10869 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   10870 {
   10871 
   10872 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10873 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10874 	    KUMCTRLSTA_REN);
   10875 	CSR_WRITE_FLUSH(sc);
   10876 	delay(2);
   10877 
   10878 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10879 
   10880 	return 0;
   10881 }
   10882 
   10883 /*
   10884  * wm_kmrn_writereg:
   10885  *
   10886  *	Write a kumeran register
   10887  */
   10888 static int
   10889 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   10890 {
   10891 	int rv;
   10892 
   10893 	if (sc->sc_type == WM_T_80003)
   10894 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10895 	else
   10896 		rv = sc->phy.acquire(sc);
   10897 	if (rv != 0) {
   10898 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10899 		    __func__);
   10900 		return rv;
   10901 	}
   10902 
   10903 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   10904 
   10905 	if (sc->sc_type == WM_T_80003)
   10906 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10907 	else
   10908 		sc->phy.release(sc);
   10909 
   10910 	return rv;
   10911 }
   10912 
   10913 static int
   10914 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   10915 {
   10916 
   10917 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10918 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   10919 
   10920 	return 0;
   10921 }
   10922 
   10923 /* SGMII related */
   10924 
   10925 /*
   10926  * wm_sgmii_uses_mdio
   10927  *
   10928  * Check whether the transaction is to the internal PHY or the external
   10929  * MDIO interface. Return true if it's MDIO.
   10930  */
   10931 static bool
   10932 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10933 {
   10934 	uint32_t reg;
   10935 	bool ismdio = false;
   10936 
   10937 	switch (sc->sc_type) {
   10938 	case WM_T_82575:
   10939 	case WM_T_82576:
   10940 		reg = CSR_READ(sc, WMREG_MDIC);
   10941 		ismdio = ((reg & MDIC_DEST) != 0);
   10942 		break;
   10943 	case WM_T_82580:
   10944 	case WM_T_I350:
   10945 	case WM_T_I354:
   10946 	case WM_T_I210:
   10947 	case WM_T_I211:
   10948 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10949 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10950 		break;
   10951 	default:
   10952 		break;
   10953 	}
   10954 
   10955 	return ismdio;
   10956 }
   10957 
   10958 /*
   10959  * wm_sgmii_readreg:	[mii interface function]
   10960  *
   10961  *	Read a PHY register on the SGMII
   10962  * This could be handled by the PHY layer if we didn't have to lock the
   10963  * ressource ...
   10964  */
   10965 static int
   10966 wm_sgmii_readreg(device_t dev, int phy, int reg)
   10967 {
   10968 	struct wm_softc *sc = device_private(dev);
   10969 	uint32_t i2ccmd;
   10970 	int i, rv;
   10971 
   10972 	if (sc->phy.acquire(sc)) {
   10973 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10974 		return 0;
   10975 	}
   10976 
   10977 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10978 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10979 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10980 
   10981 	/* Poll the ready bit */
   10982 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10983 		delay(50);
   10984 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10985 		if (i2ccmd & I2CCMD_READY)
   10986 			break;
   10987 	}
   10988 	if ((i2ccmd & I2CCMD_READY) == 0)
   10989 		device_printf(dev, "I2CCMD Read did not complete\n");
   10990 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10991 		device_printf(dev, "I2CCMD Error bit set\n");
   10992 
   10993 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10994 
   10995 	sc->phy.release(sc);
   10996 	return rv;
   10997 }
   10998 
   10999 /*
   11000  * wm_sgmii_writereg:	[mii interface function]
   11001  *
   11002  *	Write a PHY register on the SGMII.
   11003  * This could be handled by the PHY layer if we didn't have to lock the
   11004  * ressource ...
   11005  */
   11006 static void
   11007 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   11008 {
   11009 	struct wm_softc *sc = device_private(dev);
   11010 	uint32_t i2ccmd;
   11011 	int i;
   11012 	int swapdata;
   11013 
   11014 	if (sc->phy.acquire(sc) != 0) {
   11015 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11016 		return;
   11017 	}
   11018 	/* Swap the data bytes for the I2C interface */
   11019 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11020 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11021 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11022 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11023 
   11024 	/* Poll the ready bit */
   11025 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11026 		delay(50);
   11027 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11028 		if (i2ccmd & I2CCMD_READY)
   11029 			break;
   11030 	}
   11031 	if ((i2ccmd & I2CCMD_READY) == 0)
   11032 		device_printf(dev, "I2CCMD Write did not complete\n");
   11033 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11034 		device_printf(dev, "I2CCMD Error bit set\n");
   11035 
   11036 	sc->phy.release(sc);
   11037 }
   11038 
   11039 /* TBI related */
   11040 
   11041 static bool
   11042 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11043 {
   11044 	bool sig;
   11045 
   11046 	sig = ctrl & CTRL_SWDPIN(1);
   11047 
   11048 	/*
   11049 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11050 	 * detect a signal, 1 if they don't.
   11051 	 */
   11052 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11053 		sig = !sig;
   11054 
   11055 	return sig;
   11056 }
   11057 
   11058 /*
   11059  * wm_tbi_mediainit:
   11060  *
   11061  *	Initialize media for use on 1000BASE-X devices.
   11062  */
   11063 static void
   11064 wm_tbi_mediainit(struct wm_softc *sc)
   11065 {
   11066 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11067 	const char *sep = "";
   11068 
   11069 	if (sc->sc_type < WM_T_82543)
   11070 		sc->sc_tipg = TIPG_WM_DFLT;
   11071 	else
   11072 		sc->sc_tipg = TIPG_LG_DFLT;
   11073 
   11074 	sc->sc_tbi_serdes_anegticks = 5;
   11075 
   11076 	/* Initialize our media structures */
   11077 	sc->sc_mii.mii_ifp = ifp;
   11078 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11079 
   11080 	if ((sc->sc_type >= WM_T_82575)
   11081 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11082 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11083 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11084 	else
   11085 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11086 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11087 
   11088 	/*
   11089 	 * SWD Pins:
   11090 	 *
   11091 	 *	0 = Link LED (output)
   11092 	 *	1 = Loss Of Signal (input)
   11093 	 */
   11094 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11095 
   11096 	/* XXX Perhaps this is only for TBI */
   11097 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11098 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11099 
   11100 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11101 		sc->sc_ctrl &= ~CTRL_LRST;
   11102 
   11103 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11104 
   11105 #define	ADD(ss, mm, dd)							\
   11106 do {									\
   11107 	aprint_normal("%s%s", sep, ss);					\
   11108 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11109 	sep = ", ";							\
   11110 } while (/*CONSTCOND*/0)
   11111 
   11112 	aprint_normal_dev(sc->sc_dev, "");
   11113 
   11114 	if (sc->sc_type == WM_T_I354) {
   11115 		uint32_t status;
   11116 
   11117 		status = CSR_READ(sc, WMREG_STATUS);
   11118 		if (((status & STATUS_2P5_SKU) != 0)
   11119 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11120 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11121 		} else
   11122 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11123 	} else if (sc->sc_type == WM_T_82545) {
   11124 		/* Only 82545 is LX (XXX except SFP) */
   11125 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11126 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11127 	} else {
   11128 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11129 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11130 	}
   11131 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11132 	aprint_normal("\n");
   11133 
   11134 #undef ADD
   11135 
   11136 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11137 }
   11138 
   11139 /*
   11140  * wm_tbi_mediachange:	[ifmedia interface function]
   11141  *
   11142  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11143  */
   11144 static int
   11145 wm_tbi_mediachange(struct ifnet *ifp)
   11146 {
   11147 	struct wm_softc *sc = ifp->if_softc;
   11148 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11149 	uint32_t status, ctrl;
   11150 	bool signal;
   11151 	int i;
   11152 
   11153 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11154 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11155 		/* XXX need some work for >= 82571 and < 82575 */
   11156 		if (sc->sc_type < WM_T_82575)
   11157 			return 0;
   11158 	}
   11159 
   11160 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11161 	    || (sc->sc_type >= WM_T_82575))
   11162 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11163 
   11164 	sc->sc_ctrl &= ~CTRL_LRST;
   11165 	sc->sc_txcw = TXCW_ANE;
   11166 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11167 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11168 	else if (ife->ifm_media & IFM_FDX)
   11169 		sc->sc_txcw |= TXCW_FD;
   11170 	else
   11171 		sc->sc_txcw |= TXCW_HD;
   11172 
   11173 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11174 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11175 
   11176 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11177 		device_xname(sc->sc_dev), sc->sc_txcw));
   11178 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11179 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11180 	CSR_WRITE_FLUSH(sc);
   11181 	delay(1000);
   11182 
   11183 	ctrl =  CSR_READ(sc, WMREG_CTRL);
   11184 	signal = wm_tbi_havesignal(sc, ctrl);
   11185 
   11186 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11187 		signal));
   11188 
   11189 	if (signal) {
   11190 		/* Have signal; wait for the link to come up. */
   11191 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11192 			delay(10000);
   11193 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11194 				break;
   11195 		}
   11196 
   11197 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11198 			device_xname(sc->sc_dev),i));
   11199 
   11200 		status = CSR_READ(sc, WMREG_STATUS);
   11201 		DPRINTF(WM_DEBUG_LINK,
   11202 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11203 			device_xname(sc->sc_dev),status, STATUS_LU));
   11204 		if (status & STATUS_LU) {
   11205 			/* Link is up. */
   11206 			DPRINTF(WM_DEBUG_LINK,
   11207 			    ("%s: LINK: set media -> link up %s\n",
   11208 				device_xname(sc->sc_dev),
   11209 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11210 
   11211 			/*
   11212 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11213 			 * so we should update sc->sc_ctrl
   11214 			 */
   11215 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11216 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11217 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11218 			if (status & STATUS_FD)
   11219 				sc->sc_tctl |=
   11220 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11221 			else
   11222 				sc->sc_tctl |=
   11223 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11224 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11225 				sc->sc_fcrtl |= FCRTL_XONE;
   11226 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11227 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11228 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11229 			sc->sc_tbi_linkup = 1;
   11230 		} else {
   11231 			if (i == WM_LINKUP_TIMEOUT)
   11232 				wm_check_for_link(sc);
   11233 			/* Link is down. */
   11234 			DPRINTF(WM_DEBUG_LINK,
   11235 			    ("%s: LINK: set media -> link down\n",
   11236 				device_xname(sc->sc_dev)));
   11237 			sc->sc_tbi_linkup = 0;
   11238 		}
   11239 	} else {
   11240 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11241 			device_xname(sc->sc_dev)));
   11242 		sc->sc_tbi_linkup = 0;
   11243 	}
   11244 
   11245 	wm_tbi_serdes_set_linkled(sc);
   11246 
   11247 	return 0;
   11248 }
   11249 
   11250 /*
   11251  * wm_tbi_mediastatus:	[ifmedia interface function]
   11252  *
   11253  *	Get the current interface media status on a 1000BASE-X device.
   11254  */
   11255 static void
   11256 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11257 {
   11258 	struct wm_softc *sc = ifp->if_softc;
   11259 	uint32_t ctrl, status;
   11260 
   11261 	ifmr->ifm_status = IFM_AVALID;
   11262 	ifmr->ifm_active = IFM_ETHER;
   11263 
   11264 	status = CSR_READ(sc, WMREG_STATUS);
   11265 	if ((status & STATUS_LU) == 0) {
   11266 		ifmr->ifm_active |= IFM_NONE;
   11267 		return;
   11268 	}
   11269 
   11270 	ifmr->ifm_status |= IFM_ACTIVE;
   11271 	/* Only 82545 is LX */
   11272 	if (sc->sc_type == WM_T_82545)
   11273 		ifmr->ifm_active |= IFM_1000_LX;
   11274 	else
   11275 		ifmr->ifm_active |= IFM_1000_SX;
   11276 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11277 		ifmr->ifm_active |= IFM_FDX;
   11278 	else
   11279 		ifmr->ifm_active |= IFM_HDX;
   11280 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11281 	if (ctrl & CTRL_RFCE)
   11282 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11283 	if (ctrl & CTRL_TFCE)
   11284 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11285 }
   11286 
   11287 /* XXX TBI only */
   11288 static int
   11289 wm_check_for_link(struct wm_softc *sc)
   11290 {
   11291 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11292 	uint32_t rxcw;
   11293 	uint32_t ctrl;
   11294 	uint32_t status;
   11295 	bool signal;
   11296 
   11297 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11298 		device_xname(sc->sc_dev), __func__));
   11299 
   11300 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11301 		/* XXX need some work for >= 82571 */
   11302 		if (sc->sc_type >= WM_T_82571) {
   11303 			sc->sc_tbi_linkup = 1;
   11304 			return 0;
   11305 		}
   11306 	}
   11307 
   11308 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11309 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11310 	status = CSR_READ(sc, WMREG_STATUS);
   11311 	signal = wm_tbi_havesignal(sc, ctrl);
   11312 
   11313 	DPRINTF(WM_DEBUG_LINK,
   11314 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11315 		device_xname(sc->sc_dev), __func__, signal,
   11316 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11317 
   11318 	/*
   11319 	 * SWDPIN   LU RXCW
   11320 	 *	0    0	  0
   11321 	 *	0    0	  1	(should not happen)
   11322 	 *	0    1	  0	(should not happen)
   11323 	 *	0    1	  1	(should not happen)
   11324 	 *	1    0	  0	Disable autonego and force linkup
   11325 	 *	1    0	  1	got /C/ but not linkup yet
   11326 	 *	1    1	  0	(linkup)
   11327 	 *	1    1	  1	If IFM_AUTO, back to autonego
   11328 	 *
   11329 	 */
   11330 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   11331 		DPRINTF(WM_DEBUG_LINK,
   11332 		    ("%s: %s: force linkup and fullduplex\n",
   11333 			device_xname(sc->sc_dev), __func__));
   11334 		sc->sc_tbi_linkup = 0;
   11335 		/* Disable auto-negotiation in the TXCW register */
   11336 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11337 
   11338 		/*
   11339 		 * Force link-up and also force full-duplex.
   11340 		 *
   11341 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11342 		 * so we should update sc->sc_ctrl
   11343 		 */
   11344 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11345 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11346 	} else if (((status & STATUS_LU) != 0)
   11347 	    && ((rxcw & RXCW_C) != 0)
   11348 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11349 		sc->sc_tbi_linkup = 1;
   11350 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   11351 			device_xname(sc->sc_dev),
   11352 			__func__));
   11353 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11354 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11355 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   11356 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   11357 			device_xname(sc->sc_dev), __func__));
   11358 	} else {
   11359 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   11360 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   11361 			status));
   11362 	}
   11363 
   11364 	return 0;
   11365 }
   11366 
   11367 /*
   11368  * wm_tbi_tick:
   11369  *
   11370  *	Check the link on TBI devices.
   11371  *	This function acts as mii_tick().
   11372  */
   11373 static void
   11374 wm_tbi_tick(struct wm_softc *sc)
   11375 {
   11376 	struct mii_data *mii = &sc->sc_mii;
   11377 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11378 	uint32_t status;
   11379 
   11380 	KASSERT(WM_CORE_LOCKED(sc));
   11381 
   11382 	status = CSR_READ(sc, WMREG_STATUS);
   11383 
   11384 	/* XXX is this needed? */
   11385 	(void)CSR_READ(sc, WMREG_RXCW);
   11386 	(void)CSR_READ(sc, WMREG_CTRL);
   11387 
   11388 	/* set link status */
   11389 	if ((status & STATUS_LU) == 0) {
   11390 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   11391 			device_xname(sc->sc_dev)));
   11392 		sc->sc_tbi_linkup = 0;
   11393 	} else if (sc->sc_tbi_linkup == 0) {
   11394 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   11395 			device_xname(sc->sc_dev),
   11396 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11397 		sc->sc_tbi_linkup = 1;
   11398 		sc->sc_tbi_serdes_ticks = 0;
   11399 	}
   11400 
   11401 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11402 		goto setled;
   11403 
   11404 	if ((status & STATUS_LU) == 0) {
   11405 		sc->sc_tbi_linkup = 0;
   11406 		/* If the timer expired, retry autonegotiation */
   11407 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11408 		    && (++sc->sc_tbi_serdes_ticks
   11409 			>= sc->sc_tbi_serdes_anegticks)) {
   11410 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11411 			sc->sc_tbi_serdes_ticks = 0;
   11412 			/*
   11413 			 * Reset the link, and let autonegotiation do
   11414 			 * its thing
   11415 			 */
   11416 			sc->sc_ctrl |= CTRL_LRST;
   11417 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11418 			CSR_WRITE_FLUSH(sc);
   11419 			delay(1000);
   11420 			sc->sc_ctrl &= ~CTRL_LRST;
   11421 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11422 			CSR_WRITE_FLUSH(sc);
   11423 			delay(1000);
   11424 			CSR_WRITE(sc, WMREG_TXCW,
   11425 			    sc->sc_txcw & ~TXCW_ANE);
   11426 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11427 		}
   11428 	}
   11429 
   11430 setled:
   11431 	wm_tbi_serdes_set_linkled(sc);
   11432 }
   11433 
   11434 /* SERDES related */
   11435 static void
   11436 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11437 {
   11438 	uint32_t reg;
   11439 
   11440 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11441 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11442 		return;
   11443 
   11444 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11445 	reg |= PCS_CFG_PCS_EN;
   11446 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11447 
   11448 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11449 	reg &= ~CTRL_EXT_SWDPIN(3);
   11450 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11451 	CSR_WRITE_FLUSH(sc);
   11452 }
   11453 
   11454 static int
   11455 wm_serdes_mediachange(struct ifnet *ifp)
   11456 {
   11457 	struct wm_softc *sc = ifp->if_softc;
   11458 	bool pcs_autoneg = true; /* XXX */
   11459 	uint32_t ctrl_ext, pcs_lctl, reg;
   11460 
   11461 	/* XXX Currently, this function is not called on 8257[12] */
   11462 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11463 	    || (sc->sc_type >= WM_T_82575))
   11464 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11465 
   11466 	wm_serdes_power_up_link_82575(sc);
   11467 
   11468 	sc->sc_ctrl |= CTRL_SLU;
   11469 
   11470 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11471 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11472 
   11473 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11474 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11475 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11476 	case CTRL_EXT_LINK_MODE_SGMII:
   11477 		pcs_autoneg = true;
   11478 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11479 		break;
   11480 	case CTRL_EXT_LINK_MODE_1000KX:
   11481 		pcs_autoneg = false;
   11482 		/* FALLTHROUGH */
   11483 	default:
   11484 		if ((sc->sc_type == WM_T_82575)
   11485 		    || (sc->sc_type == WM_T_82576)) {
   11486 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11487 				pcs_autoneg = false;
   11488 		}
   11489 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11490 		    | CTRL_FRCFDX;
   11491 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11492 	}
   11493 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11494 
   11495 	if (pcs_autoneg) {
   11496 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11497 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11498 
   11499 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11500 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11501 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11502 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11503 	} else
   11504 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11505 
   11506 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11507 
   11508 
   11509 	return 0;
   11510 }
   11511 
   11512 static void
   11513 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11514 {
   11515 	struct wm_softc *sc = ifp->if_softc;
   11516 	struct mii_data *mii = &sc->sc_mii;
   11517 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11518 	uint32_t pcs_adv, pcs_lpab, reg;
   11519 
   11520 	ifmr->ifm_status = IFM_AVALID;
   11521 	ifmr->ifm_active = IFM_ETHER;
   11522 
   11523 	/* Check PCS */
   11524 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11525 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11526 		ifmr->ifm_active |= IFM_NONE;
   11527 		sc->sc_tbi_linkup = 0;
   11528 		goto setled;
   11529 	}
   11530 
   11531 	sc->sc_tbi_linkup = 1;
   11532 	ifmr->ifm_status |= IFM_ACTIVE;
   11533 	if (sc->sc_type == WM_T_I354) {
   11534 		uint32_t status;
   11535 
   11536 		status = CSR_READ(sc, WMREG_STATUS);
   11537 		if (((status & STATUS_2P5_SKU) != 0)
   11538 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11539 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11540 		} else
   11541 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11542 	} else {
   11543 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11544 		case PCS_LSTS_SPEED_10:
   11545 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11546 			break;
   11547 		case PCS_LSTS_SPEED_100:
   11548 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11549 			break;
   11550 		case PCS_LSTS_SPEED_1000:
   11551 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11552 			break;
   11553 		default:
   11554 			device_printf(sc->sc_dev, "Unknown speed\n");
   11555 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11556 			break;
   11557 		}
   11558 	}
   11559 	if ((reg & PCS_LSTS_FDX) != 0)
   11560 		ifmr->ifm_active |= IFM_FDX;
   11561 	else
   11562 		ifmr->ifm_active |= IFM_HDX;
   11563 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11564 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11565 		/* Check flow */
   11566 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11567 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11568 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11569 			goto setled;
   11570 		}
   11571 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11572 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11573 		DPRINTF(WM_DEBUG_LINK,
   11574 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11575 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11576 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11577 			mii->mii_media_active |= IFM_FLOW
   11578 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11579 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11580 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11581 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11582 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11583 			mii->mii_media_active |= IFM_FLOW
   11584 			    | IFM_ETH_TXPAUSE;
   11585 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11586 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11587 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11588 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11589 			mii->mii_media_active |= IFM_FLOW
   11590 			    | IFM_ETH_RXPAUSE;
   11591 		}
   11592 	}
   11593 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11594 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11595 setled:
   11596 	wm_tbi_serdes_set_linkled(sc);
   11597 }
   11598 
   11599 /*
   11600  * wm_serdes_tick:
   11601  *
   11602  *	Check the link on serdes devices.
   11603  */
   11604 static void
   11605 wm_serdes_tick(struct wm_softc *sc)
   11606 {
   11607 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11608 	struct mii_data *mii = &sc->sc_mii;
   11609 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11610 	uint32_t reg;
   11611 
   11612 	KASSERT(WM_CORE_LOCKED(sc));
   11613 
   11614 	mii->mii_media_status = IFM_AVALID;
   11615 	mii->mii_media_active = IFM_ETHER;
   11616 
   11617 	/* Check PCS */
   11618 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11619 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11620 		mii->mii_media_status |= IFM_ACTIVE;
   11621 		sc->sc_tbi_linkup = 1;
   11622 		sc->sc_tbi_serdes_ticks = 0;
   11623 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11624 		if ((reg & PCS_LSTS_FDX) != 0)
   11625 			mii->mii_media_active |= IFM_FDX;
   11626 		else
   11627 			mii->mii_media_active |= IFM_HDX;
   11628 	} else {
   11629 		mii->mii_media_status |= IFM_NONE;
   11630 		sc->sc_tbi_linkup = 0;
   11631 		/* If the timer expired, retry autonegotiation */
   11632 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11633 		    && (++sc->sc_tbi_serdes_ticks
   11634 			>= sc->sc_tbi_serdes_anegticks)) {
   11635 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11636 			sc->sc_tbi_serdes_ticks = 0;
   11637 			/* XXX */
   11638 			wm_serdes_mediachange(ifp);
   11639 		}
   11640 	}
   11641 
   11642 	wm_tbi_serdes_set_linkled(sc);
   11643 }
   11644 
   11645 /* SFP related */
   11646 
   11647 static int
   11648 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11649 {
   11650 	uint32_t i2ccmd;
   11651 	int i;
   11652 
   11653 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11654 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11655 
   11656 	/* Poll the ready bit */
   11657 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11658 		delay(50);
   11659 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11660 		if (i2ccmd & I2CCMD_READY)
   11661 			break;
   11662 	}
   11663 	if ((i2ccmd & I2CCMD_READY) == 0)
   11664 		return -1;
   11665 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11666 		return -1;
   11667 
   11668 	*data = i2ccmd & 0x00ff;
   11669 
   11670 	return 0;
   11671 }
   11672 
   11673 static uint32_t
   11674 wm_sfp_get_media_type(struct wm_softc *sc)
   11675 {
   11676 	uint32_t ctrl_ext;
   11677 	uint8_t val = 0;
   11678 	int timeout = 3;
   11679 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11680 	int rv = -1;
   11681 
   11682 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11683 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11684 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11685 	CSR_WRITE_FLUSH(sc);
   11686 
   11687 	/* Read SFP module data */
   11688 	while (timeout) {
   11689 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11690 		if (rv == 0)
   11691 			break;
   11692 		delay(100*1000); /* XXX too big */
   11693 		timeout--;
   11694 	}
   11695 	if (rv != 0)
   11696 		goto out;
   11697 	switch (val) {
   11698 	case SFF_SFP_ID_SFF:
   11699 		aprint_normal_dev(sc->sc_dev,
   11700 		    "Module/Connector soldered to board\n");
   11701 		break;
   11702 	case SFF_SFP_ID_SFP:
   11703 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11704 		break;
   11705 	case SFF_SFP_ID_UNKNOWN:
   11706 		goto out;
   11707 	default:
   11708 		break;
   11709 	}
   11710 
   11711 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11712 	if (rv != 0) {
   11713 		goto out;
   11714 	}
   11715 
   11716 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11717 		mediatype = WM_MEDIATYPE_SERDES;
   11718 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   11719 		sc->sc_flags |= WM_F_SGMII;
   11720 		mediatype = WM_MEDIATYPE_COPPER;
   11721 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   11722 		sc->sc_flags |= WM_F_SGMII;
   11723 		mediatype = WM_MEDIATYPE_SERDES;
   11724 	}
   11725 
   11726 out:
   11727 	/* Restore I2C interface setting */
   11728 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11729 
   11730 	return mediatype;
   11731 }
   11732 
   11733 /*
   11734  * NVM related.
   11735  * Microwire, SPI (w/wo EERD) and Flash.
   11736  */
   11737 
   11738 /* Both spi and uwire */
   11739 
   11740 /*
   11741  * wm_eeprom_sendbits:
   11742  *
   11743  *	Send a series of bits to the EEPROM.
   11744  */
   11745 static void
   11746 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11747 {
   11748 	uint32_t reg;
   11749 	int x;
   11750 
   11751 	reg = CSR_READ(sc, WMREG_EECD);
   11752 
   11753 	for (x = nbits; x > 0; x--) {
   11754 		if (bits & (1U << (x - 1)))
   11755 			reg |= EECD_DI;
   11756 		else
   11757 			reg &= ~EECD_DI;
   11758 		CSR_WRITE(sc, WMREG_EECD, reg);
   11759 		CSR_WRITE_FLUSH(sc);
   11760 		delay(2);
   11761 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11762 		CSR_WRITE_FLUSH(sc);
   11763 		delay(2);
   11764 		CSR_WRITE(sc, WMREG_EECD, reg);
   11765 		CSR_WRITE_FLUSH(sc);
   11766 		delay(2);
   11767 	}
   11768 }
   11769 
   11770 /*
   11771  * wm_eeprom_recvbits:
   11772  *
   11773  *	Receive a series of bits from the EEPROM.
   11774  */
   11775 static void
   11776 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11777 {
   11778 	uint32_t reg, val;
   11779 	int x;
   11780 
   11781 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11782 
   11783 	val = 0;
   11784 	for (x = nbits; x > 0; x--) {
   11785 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11786 		CSR_WRITE_FLUSH(sc);
   11787 		delay(2);
   11788 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11789 			val |= (1U << (x - 1));
   11790 		CSR_WRITE(sc, WMREG_EECD, reg);
   11791 		CSR_WRITE_FLUSH(sc);
   11792 		delay(2);
   11793 	}
   11794 	*valp = val;
   11795 }
   11796 
   11797 /* Microwire */
   11798 
   11799 /*
   11800  * wm_nvm_read_uwire:
   11801  *
   11802  *	Read a word from the EEPROM using the MicroWire protocol.
   11803  */
   11804 static int
   11805 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11806 {
   11807 	uint32_t reg, val;
   11808 	int i;
   11809 
   11810 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11811 		device_xname(sc->sc_dev), __func__));
   11812 
   11813 	if (sc->nvm.acquire(sc) != 0)
   11814 		return -1;
   11815 
   11816 	for (i = 0; i < wordcnt; i++) {
   11817 		/* Clear SK and DI. */
   11818 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11819 		CSR_WRITE(sc, WMREG_EECD, reg);
   11820 
   11821 		/*
   11822 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11823 		 * and Xen.
   11824 		 *
   11825 		 * We use this workaround only for 82540 because qemu's
   11826 		 * e1000 act as 82540.
   11827 		 */
   11828 		if (sc->sc_type == WM_T_82540) {
   11829 			reg |= EECD_SK;
   11830 			CSR_WRITE(sc, WMREG_EECD, reg);
   11831 			reg &= ~EECD_SK;
   11832 			CSR_WRITE(sc, WMREG_EECD, reg);
   11833 			CSR_WRITE_FLUSH(sc);
   11834 			delay(2);
   11835 		}
   11836 		/* XXX: end of workaround */
   11837 
   11838 		/* Set CHIP SELECT. */
   11839 		reg |= EECD_CS;
   11840 		CSR_WRITE(sc, WMREG_EECD, reg);
   11841 		CSR_WRITE_FLUSH(sc);
   11842 		delay(2);
   11843 
   11844 		/* Shift in the READ command. */
   11845 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11846 
   11847 		/* Shift in address. */
   11848 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11849 
   11850 		/* Shift out the data. */
   11851 		wm_eeprom_recvbits(sc, &val, 16);
   11852 		data[i] = val & 0xffff;
   11853 
   11854 		/* Clear CHIP SELECT. */
   11855 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11856 		CSR_WRITE(sc, WMREG_EECD, reg);
   11857 		CSR_WRITE_FLUSH(sc);
   11858 		delay(2);
   11859 	}
   11860 
   11861 	sc->nvm.release(sc);
   11862 	return 0;
   11863 }
   11864 
   11865 /* SPI */
   11866 
   11867 /*
   11868  * Set SPI and FLASH related information from the EECD register.
   11869  * For 82541 and 82547, the word size is taken from EEPROM.
   11870  */
   11871 static int
   11872 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11873 {
   11874 	int size;
   11875 	uint32_t reg;
   11876 	uint16_t data;
   11877 
   11878 	reg = CSR_READ(sc, WMREG_EECD);
   11879 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11880 
   11881 	/* Read the size of NVM from EECD by default */
   11882 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11883 	switch (sc->sc_type) {
   11884 	case WM_T_82541:
   11885 	case WM_T_82541_2:
   11886 	case WM_T_82547:
   11887 	case WM_T_82547_2:
   11888 		/* Set dummy value to access EEPROM */
   11889 		sc->sc_nvm_wordsize = 64;
   11890 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   11891 			aprint_error_dev(sc->sc_dev,
   11892 			    "%s: failed to read EEPROM size\n", __func__);
   11893 		}
   11894 		reg = data;
   11895 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11896 		if (size == 0)
   11897 			size = 6; /* 64 word size */
   11898 		else
   11899 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11900 		break;
   11901 	case WM_T_80003:
   11902 	case WM_T_82571:
   11903 	case WM_T_82572:
   11904 	case WM_T_82573: /* SPI case */
   11905 	case WM_T_82574: /* SPI case */
   11906 	case WM_T_82583: /* SPI case */
   11907 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11908 		if (size > 14)
   11909 			size = 14;
   11910 		break;
   11911 	case WM_T_82575:
   11912 	case WM_T_82576:
   11913 	case WM_T_82580:
   11914 	case WM_T_I350:
   11915 	case WM_T_I354:
   11916 	case WM_T_I210:
   11917 	case WM_T_I211:
   11918 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11919 		if (size > 15)
   11920 			size = 15;
   11921 		break;
   11922 	default:
   11923 		aprint_error_dev(sc->sc_dev,
   11924 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11925 		return -1;
   11926 		break;
   11927 	}
   11928 
   11929 	sc->sc_nvm_wordsize = 1 << size;
   11930 
   11931 	return 0;
   11932 }
   11933 
   11934 /*
   11935  * wm_nvm_ready_spi:
   11936  *
   11937  *	Wait for a SPI EEPROM to be ready for commands.
   11938  */
   11939 static int
   11940 wm_nvm_ready_spi(struct wm_softc *sc)
   11941 {
   11942 	uint32_t val;
   11943 	int usec;
   11944 
   11945 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11946 		device_xname(sc->sc_dev), __func__));
   11947 
   11948 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11949 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11950 		wm_eeprom_recvbits(sc, &val, 8);
   11951 		if ((val & SPI_SR_RDY) == 0)
   11952 			break;
   11953 	}
   11954 	if (usec >= SPI_MAX_RETRIES) {
   11955 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11956 		return -1;
   11957 	}
   11958 	return 0;
   11959 }
   11960 
   11961 /*
   11962  * wm_nvm_read_spi:
   11963  *
   11964  *	Read a work from the EEPROM using the SPI protocol.
   11965  */
   11966 static int
   11967 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11968 {
   11969 	uint32_t reg, val;
   11970 	int i;
   11971 	uint8_t opc;
   11972 	int rv = 0;
   11973 
   11974 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11975 		device_xname(sc->sc_dev), __func__));
   11976 
   11977 	if (sc->nvm.acquire(sc) != 0)
   11978 		return -1;
   11979 
   11980 	/* Clear SK and CS. */
   11981 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11982 	CSR_WRITE(sc, WMREG_EECD, reg);
   11983 	CSR_WRITE_FLUSH(sc);
   11984 	delay(2);
   11985 
   11986 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   11987 		goto out;
   11988 
   11989 	/* Toggle CS to flush commands. */
   11990 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11991 	CSR_WRITE_FLUSH(sc);
   11992 	delay(2);
   11993 	CSR_WRITE(sc, WMREG_EECD, reg);
   11994 	CSR_WRITE_FLUSH(sc);
   11995 	delay(2);
   11996 
   11997 	opc = SPI_OPC_READ;
   11998 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11999 		opc |= SPI_OPC_A8;
   12000 
   12001 	wm_eeprom_sendbits(sc, opc, 8);
   12002 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12003 
   12004 	for (i = 0; i < wordcnt; i++) {
   12005 		wm_eeprom_recvbits(sc, &val, 16);
   12006 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12007 	}
   12008 
   12009 	/* Raise CS and clear SK. */
   12010 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12011 	CSR_WRITE(sc, WMREG_EECD, reg);
   12012 	CSR_WRITE_FLUSH(sc);
   12013 	delay(2);
   12014 
   12015 out:
   12016 	sc->nvm.release(sc);
   12017 	return rv;
   12018 }
   12019 
   12020 /* Using with EERD */
   12021 
   12022 static int
   12023 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12024 {
   12025 	uint32_t attempts = 100000;
   12026 	uint32_t i, reg = 0;
   12027 	int32_t done = -1;
   12028 
   12029 	for (i = 0; i < attempts; i++) {
   12030 		reg = CSR_READ(sc, rw);
   12031 
   12032 		if (reg & EERD_DONE) {
   12033 			done = 0;
   12034 			break;
   12035 		}
   12036 		delay(5);
   12037 	}
   12038 
   12039 	return done;
   12040 }
   12041 
   12042 static int
   12043 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12044 {
   12045 	int i, eerd = 0;
   12046 	int rv = 0;
   12047 
   12048 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12049 		device_xname(sc->sc_dev), __func__));
   12050 
   12051 	if (sc->nvm.acquire(sc) != 0)
   12052 		return -1;
   12053 
   12054 	for (i = 0; i < wordcnt; i++) {
   12055 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12056 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12057 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12058 		if (rv != 0) {
   12059 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12060 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12061 			break;
   12062 		}
   12063 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12064 	}
   12065 
   12066 	sc->nvm.release(sc);
   12067 	return rv;
   12068 }
   12069 
   12070 /* Flash */
   12071 
   12072 static int
   12073 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12074 {
   12075 	uint32_t eecd;
   12076 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12077 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12078 	uint32_t nvm_dword = 0;
   12079 	uint8_t sig_byte = 0;
   12080 	int rv;
   12081 
   12082 	switch (sc->sc_type) {
   12083 	case WM_T_PCH_SPT:
   12084 	case WM_T_PCH_CNP:
   12085 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12086 		act_offset = ICH_NVM_SIG_WORD * 2;
   12087 
   12088 		/* set bank to 0 in case flash read fails. */
   12089 		*bank = 0;
   12090 
   12091 		/* Check bank 0 */
   12092 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12093 		if (rv != 0)
   12094 			return rv;
   12095 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12096 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12097 			*bank = 0;
   12098 			return 0;
   12099 		}
   12100 
   12101 		/* Check bank 1 */
   12102 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12103 		    &nvm_dword);
   12104 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12105 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12106 			*bank = 1;
   12107 			return 0;
   12108 		}
   12109 		aprint_error_dev(sc->sc_dev,
   12110 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12111 		return -1;
   12112 	case WM_T_ICH8:
   12113 	case WM_T_ICH9:
   12114 		eecd = CSR_READ(sc, WMREG_EECD);
   12115 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12116 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12117 			return 0;
   12118 		}
   12119 		/* FALLTHROUGH */
   12120 	default:
   12121 		/* Default to 0 */
   12122 		*bank = 0;
   12123 
   12124 		/* Check bank 0 */
   12125 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12126 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12127 			*bank = 0;
   12128 			return 0;
   12129 		}
   12130 
   12131 		/* Check bank 1 */
   12132 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12133 		    &sig_byte);
   12134 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12135 			*bank = 1;
   12136 			return 0;
   12137 		}
   12138 	}
   12139 
   12140 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12141 		device_xname(sc->sc_dev)));
   12142 	return -1;
   12143 }
   12144 
   12145 /******************************************************************************
   12146  * This function does initial flash setup so that a new read/write/erase cycle
   12147  * can be started.
   12148  *
   12149  * sc - The pointer to the hw structure
   12150  ****************************************************************************/
   12151 static int32_t
   12152 wm_ich8_cycle_init(struct wm_softc *sc)
   12153 {
   12154 	uint16_t hsfsts;
   12155 	int32_t error = 1;
   12156 	int32_t i     = 0;
   12157 
   12158 	if (sc->sc_type >= WM_T_PCH_SPT)
   12159 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12160 	else
   12161 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12162 
   12163 	/* May be check the Flash Des Valid bit in Hw status */
   12164 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   12165 		return error;
   12166 	}
   12167 
   12168 	/* Clear FCERR in Hw status by writing 1 */
   12169 	/* Clear DAEL in Hw status by writing a 1 */
   12170 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12171 
   12172 	if (sc->sc_type >= WM_T_PCH_SPT)
   12173 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12174 	else
   12175 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12176 
   12177 	/*
   12178 	 * Either we should have a hardware SPI cycle in progress bit to check
   12179 	 * against, in order to start a new cycle or FDONE bit should be
   12180 	 * changed in the hardware so that it is 1 after harware reset, which
   12181 	 * can then be used as an indication whether a cycle is in progress or
   12182 	 * has been completed .. we should also have some software semaphore
   12183 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12184 	 * threads access to those bits can be sequentiallized or a way so that
   12185 	 * 2 threads dont start the cycle at the same time
   12186 	 */
   12187 
   12188 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12189 		/*
   12190 		 * There is no cycle running at present, so we can start a
   12191 		 * cycle
   12192 		 */
   12193 
   12194 		/* Begin by setting Flash Cycle Done. */
   12195 		hsfsts |= HSFSTS_DONE;
   12196 		if (sc->sc_type >= WM_T_PCH_SPT)
   12197 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12198 			    hsfsts & 0xffffUL);
   12199 		else
   12200 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12201 		error = 0;
   12202 	} else {
   12203 		/*
   12204 		 * otherwise poll for sometime so the current cycle has a
   12205 		 * chance to end before giving up.
   12206 		 */
   12207 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12208 			if (sc->sc_type >= WM_T_PCH_SPT)
   12209 				hsfsts = ICH8_FLASH_READ32(sc,
   12210 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12211 			else
   12212 				hsfsts = ICH8_FLASH_READ16(sc,
   12213 				    ICH_FLASH_HSFSTS);
   12214 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12215 				error = 0;
   12216 				break;
   12217 			}
   12218 			delay(1);
   12219 		}
   12220 		if (error == 0) {
   12221 			/*
   12222 			 * Successful in waiting for previous cycle to timeout,
   12223 			 * now set the Flash Cycle Done.
   12224 			 */
   12225 			hsfsts |= HSFSTS_DONE;
   12226 			if (sc->sc_type >= WM_T_PCH_SPT)
   12227 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12228 				    hsfsts & 0xffffUL);
   12229 			else
   12230 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12231 				    hsfsts);
   12232 		}
   12233 	}
   12234 	return error;
   12235 }
   12236 
   12237 /******************************************************************************
   12238  * This function starts a flash cycle and waits for its completion
   12239  *
   12240  * sc - The pointer to the hw structure
   12241  ****************************************************************************/
   12242 static int32_t
   12243 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12244 {
   12245 	uint16_t hsflctl;
   12246 	uint16_t hsfsts;
   12247 	int32_t error = 1;
   12248 	uint32_t i = 0;
   12249 
   12250 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12251 	if (sc->sc_type >= WM_T_PCH_SPT)
   12252 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12253 	else
   12254 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12255 	hsflctl |= HSFCTL_GO;
   12256 	if (sc->sc_type >= WM_T_PCH_SPT)
   12257 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12258 		    (uint32_t)hsflctl << 16);
   12259 	else
   12260 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12261 
   12262 	/* Wait till FDONE bit is set to 1 */
   12263 	do {
   12264 		if (sc->sc_type >= WM_T_PCH_SPT)
   12265 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12266 			    & 0xffffUL;
   12267 		else
   12268 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12269 		if (hsfsts & HSFSTS_DONE)
   12270 			break;
   12271 		delay(1);
   12272 		i++;
   12273 	} while (i < timeout);
   12274 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12275 		error = 0;
   12276 
   12277 	return error;
   12278 }
   12279 
   12280 /******************************************************************************
   12281  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12282  *
   12283  * sc - The pointer to the hw structure
   12284  * index - The index of the byte or word to read.
   12285  * size - Size of data to read, 1=byte 2=word, 4=dword
   12286  * data - Pointer to the word to store the value read.
   12287  *****************************************************************************/
   12288 static int32_t
   12289 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12290     uint32_t size, uint32_t *data)
   12291 {
   12292 	uint16_t hsfsts;
   12293 	uint16_t hsflctl;
   12294 	uint32_t flash_linear_address;
   12295 	uint32_t flash_data = 0;
   12296 	int32_t error = 1;
   12297 	int32_t count = 0;
   12298 
   12299 	if (size < 1  || size > 4 || data == 0x0 ||
   12300 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12301 		return error;
   12302 
   12303 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12304 	    sc->sc_ich8_flash_base;
   12305 
   12306 	do {
   12307 		delay(1);
   12308 		/* Steps */
   12309 		error = wm_ich8_cycle_init(sc);
   12310 		if (error)
   12311 			break;
   12312 
   12313 		if (sc->sc_type >= WM_T_PCH_SPT)
   12314 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12315 			    >> 16;
   12316 		else
   12317 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12318 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12319 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12320 		    & HSFCTL_BCOUNT_MASK;
   12321 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12322 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12323 			/*
   12324 			 * In SPT, This register is in Lan memory space, not
   12325 			 * flash. Therefore, only 32 bit access is supported.
   12326 			 */
   12327 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12328 			    (uint32_t)hsflctl << 16);
   12329 		} else
   12330 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12331 
   12332 		/*
   12333 		 * Write the last 24 bits of index into Flash Linear address
   12334 		 * field in Flash Address
   12335 		 */
   12336 		/* TODO: TBD maybe check the index against the size of flash */
   12337 
   12338 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12339 
   12340 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12341 
   12342 		/*
   12343 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12344 		 * the whole sequence a few more times, else read in (shift in)
   12345 		 * the Flash Data0, the order is least significant byte first
   12346 		 * msb to lsb
   12347 		 */
   12348 		if (error == 0) {
   12349 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12350 			if (size == 1)
   12351 				*data = (uint8_t)(flash_data & 0x000000FF);
   12352 			else if (size == 2)
   12353 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12354 			else if (size == 4)
   12355 				*data = (uint32_t)flash_data;
   12356 			break;
   12357 		} else {
   12358 			/*
   12359 			 * If we've gotten here, then things are probably
   12360 			 * completely hosed, but if the error condition is
   12361 			 * detected, it won't hurt to give it another try...
   12362 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12363 			 */
   12364 			if (sc->sc_type >= WM_T_PCH_SPT)
   12365 				hsfsts = ICH8_FLASH_READ32(sc,
   12366 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12367 			else
   12368 				hsfsts = ICH8_FLASH_READ16(sc,
   12369 				    ICH_FLASH_HSFSTS);
   12370 
   12371 			if (hsfsts & HSFSTS_ERR) {
   12372 				/* Repeat for some time before giving up. */
   12373 				continue;
   12374 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12375 				break;
   12376 		}
   12377 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12378 
   12379 	return error;
   12380 }
   12381 
   12382 /******************************************************************************
   12383  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12384  *
   12385  * sc - pointer to wm_hw structure
   12386  * index - The index of the byte to read.
   12387  * data - Pointer to a byte to store the value read.
   12388  *****************************************************************************/
   12389 static int32_t
   12390 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12391 {
   12392 	int32_t status;
   12393 	uint32_t word = 0;
   12394 
   12395 	status = wm_read_ich8_data(sc, index, 1, &word);
   12396 	if (status == 0)
   12397 		*data = (uint8_t)word;
   12398 	else
   12399 		*data = 0;
   12400 
   12401 	return status;
   12402 }
   12403 
   12404 /******************************************************************************
   12405  * Reads a word from the NVM using the ICH8 flash access registers.
   12406  *
   12407  * sc - pointer to wm_hw structure
   12408  * index - The starting byte index of the word to read.
   12409  * data - Pointer to a word to store the value read.
   12410  *****************************************************************************/
   12411 static int32_t
   12412 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12413 {
   12414 	int32_t status;
   12415 	uint32_t word = 0;
   12416 
   12417 	status = wm_read_ich8_data(sc, index, 2, &word);
   12418 	if (status == 0)
   12419 		*data = (uint16_t)word;
   12420 	else
   12421 		*data = 0;
   12422 
   12423 	return status;
   12424 }
   12425 
   12426 /******************************************************************************
   12427  * Reads a dword from the NVM using the ICH8 flash access registers.
   12428  *
   12429  * sc - pointer to wm_hw structure
   12430  * index - The starting byte index of the word to read.
   12431  * data - Pointer to a word to store the value read.
   12432  *****************************************************************************/
   12433 static int32_t
   12434 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12435 {
   12436 	int32_t status;
   12437 
   12438 	status = wm_read_ich8_data(sc, index, 4, data);
   12439 	return status;
   12440 }
   12441 
   12442 /******************************************************************************
   12443  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12444  * register.
   12445  *
   12446  * sc - Struct containing variables accessed by shared code
   12447  * offset - offset of word in the EEPROM to read
   12448  * data - word read from the EEPROM
   12449  * words - number of words to read
   12450  *****************************************************************************/
   12451 static int
   12452 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12453 {
   12454 	int32_t	 rv = 0;
   12455 	uint32_t flash_bank = 0;
   12456 	uint32_t act_offset = 0;
   12457 	uint32_t bank_offset = 0;
   12458 	uint16_t word = 0;
   12459 	uint16_t i = 0;
   12460 
   12461 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12462 		device_xname(sc->sc_dev), __func__));
   12463 
   12464 	if (sc->nvm.acquire(sc) != 0)
   12465 		return -1;
   12466 
   12467 	/*
   12468 	 * We need to know which is the valid flash bank.  In the event
   12469 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12470 	 * managing flash_bank. So it cannot be trusted and needs
   12471 	 * to be updated with each read.
   12472 	 */
   12473 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12474 	if (rv) {
   12475 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12476 			device_xname(sc->sc_dev)));
   12477 		flash_bank = 0;
   12478 	}
   12479 
   12480 	/*
   12481 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12482 	 * size
   12483 	 */
   12484 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12485 
   12486 	for (i = 0; i < words; i++) {
   12487 		/* The NVM part needs a byte offset, hence * 2 */
   12488 		act_offset = bank_offset + ((offset + i) * 2);
   12489 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12490 		if (rv) {
   12491 			aprint_error_dev(sc->sc_dev,
   12492 			    "%s: failed to read NVM\n", __func__);
   12493 			break;
   12494 		}
   12495 		data[i] = word;
   12496 	}
   12497 
   12498 	sc->nvm.release(sc);
   12499 	return rv;
   12500 }
   12501 
   12502 /******************************************************************************
   12503  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12504  * register.
   12505  *
   12506  * sc - Struct containing variables accessed by shared code
   12507  * offset - offset of word in the EEPROM to read
   12508  * data - word read from the EEPROM
   12509  * words - number of words to read
   12510  *****************************************************************************/
   12511 static int
   12512 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12513 {
   12514 	int32_t	 rv = 0;
   12515 	uint32_t flash_bank = 0;
   12516 	uint32_t act_offset = 0;
   12517 	uint32_t bank_offset = 0;
   12518 	uint32_t dword = 0;
   12519 	uint16_t i = 0;
   12520 
   12521 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12522 		device_xname(sc->sc_dev), __func__));
   12523 
   12524 	if (sc->nvm.acquire(sc) != 0)
   12525 		return -1;
   12526 
   12527 	/*
   12528 	 * We need to know which is the valid flash bank.  In the event
   12529 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12530 	 * managing flash_bank. So it cannot be trusted and needs
   12531 	 * to be updated with each read.
   12532 	 */
   12533 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12534 	if (rv) {
   12535 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12536 			device_xname(sc->sc_dev)));
   12537 		flash_bank = 0;
   12538 	}
   12539 
   12540 	/*
   12541 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12542 	 * size
   12543 	 */
   12544 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12545 
   12546 	for (i = 0; i < words; i++) {
   12547 		/* The NVM part needs a byte offset, hence * 2 */
   12548 		act_offset = bank_offset + ((offset + i) * 2);
   12549 		/* but we must read dword aligned, so mask ... */
   12550 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12551 		if (rv) {
   12552 			aprint_error_dev(sc->sc_dev,
   12553 			    "%s: failed to read NVM\n", __func__);
   12554 			break;
   12555 		}
   12556 		/* ... and pick out low or high word */
   12557 		if ((act_offset & 0x2) == 0)
   12558 			data[i] = (uint16_t)(dword & 0xFFFF);
   12559 		else
   12560 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12561 	}
   12562 
   12563 	sc->nvm.release(sc);
   12564 	return rv;
   12565 }
   12566 
   12567 /* iNVM */
   12568 
   12569 static int
   12570 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12571 {
   12572 	int32_t	 rv = 0;
   12573 	uint32_t invm_dword;
   12574 	uint16_t i;
   12575 	uint8_t record_type, word_address;
   12576 
   12577 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12578 		device_xname(sc->sc_dev), __func__));
   12579 
   12580 	for (i = 0; i < INVM_SIZE; i++) {
   12581 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12582 		/* Get record type */
   12583 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12584 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12585 			break;
   12586 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12587 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12588 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12589 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12590 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12591 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12592 			if (word_address == address) {
   12593 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12594 				rv = 0;
   12595 				break;
   12596 			}
   12597 		}
   12598 	}
   12599 
   12600 	return rv;
   12601 }
   12602 
   12603 static int
   12604 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12605 {
   12606 	int rv = 0;
   12607 	int i;
   12608 
   12609 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12610 		device_xname(sc->sc_dev), __func__));
   12611 
   12612 	if (sc->nvm.acquire(sc) != 0)
   12613 		return -1;
   12614 
   12615 	for (i = 0; i < words; i++) {
   12616 		switch (offset + i) {
   12617 		case NVM_OFF_MACADDR:
   12618 		case NVM_OFF_MACADDR1:
   12619 		case NVM_OFF_MACADDR2:
   12620 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12621 			if (rv != 0) {
   12622 				data[i] = 0xffff;
   12623 				rv = -1;
   12624 			}
   12625 			break;
   12626 		case NVM_OFF_CFG2:
   12627 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12628 			if (rv != 0) {
   12629 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12630 				rv = 0;
   12631 			}
   12632 			break;
   12633 		case NVM_OFF_CFG4:
   12634 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12635 			if (rv != 0) {
   12636 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12637 				rv = 0;
   12638 			}
   12639 			break;
   12640 		case NVM_OFF_LED_1_CFG:
   12641 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12642 			if (rv != 0) {
   12643 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12644 				rv = 0;
   12645 			}
   12646 			break;
   12647 		case NVM_OFF_LED_0_2_CFG:
   12648 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12649 			if (rv != 0) {
   12650 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12651 				rv = 0;
   12652 			}
   12653 			break;
   12654 		case NVM_OFF_ID_LED_SETTINGS:
   12655 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12656 			if (rv != 0) {
   12657 				*data = ID_LED_RESERVED_FFFF;
   12658 				rv = 0;
   12659 			}
   12660 			break;
   12661 		default:
   12662 			DPRINTF(WM_DEBUG_NVM,
   12663 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12664 			*data = NVM_RESERVED_WORD;
   12665 			break;
   12666 		}
   12667 	}
   12668 
   12669 	sc->nvm.release(sc);
   12670 	return rv;
   12671 }
   12672 
   12673 /* Lock, detecting NVM type, validate checksum, version and read */
   12674 
   12675 static int
   12676 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12677 {
   12678 	uint32_t eecd = 0;
   12679 
   12680 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12681 	    || sc->sc_type == WM_T_82583) {
   12682 		eecd = CSR_READ(sc, WMREG_EECD);
   12683 
   12684 		/* Isolate bits 15 & 16 */
   12685 		eecd = ((eecd >> 15) & 0x03);
   12686 
   12687 		/* If both bits are set, device is Flash type */
   12688 		if (eecd == 0x03)
   12689 			return 0;
   12690 	}
   12691 	return 1;
   12692 }
   12693 
   12694 static int
   12695 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   12696 {
   12697 	uint32_t eec;
   12698 
   12699 	eec = CSR_READ(sc, WMREG_EEC);
   12700 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12701 		return 1;
   12702 
   12703 	return 0;
   12704 }
   12705 
   12706 /*
   12707  * wm_nvm_validate_checksum
   12708  *
   12709  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12710  */
   12711 static int
   12712 wm_nvm_validate_checksum(struct wm_softc *sc)
   12713 {
   12714 	uint16_t checksum;
   12715 	uint16_t eeprom_data;
   12716 #ifdef WM_DEBUG
   12717 	uint16_t csum_wordaddr, valid_checksum;
   12718 #endif
   12719 	int i;
   12720 
   12721 	checksum = 0;
   12722 
   12723 	/* Don't check for I211 */
   12724 	if (sc->sc_type == WM_T_I211)
   12725 		return 0;
   12726 
   12727 #ifdef WM_DEBUG
   12728 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   12729 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   12730 		csum_wordaddr = NVM_OFF_COMPAT;
   12731 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12732 	} else {
   12733 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12734 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12735 	}
   12736 
   12737 	/* Dump EEPROM image for debug */
   12738 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12739 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12740 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12741 		/* XXX PCH_SPT? */
   12742 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12743 		if ((eeprom_data & valid_checksum) == 0) {
   12744 			DPRINTF(WM_DEBUG_NVM,
   12745 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12746 				device_xname(sc->sc_dev), eeprom_data,
   12747 				    valid_checksum));
   12748 		}
   12749 	}
   12750 
   12751 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12752 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12753 		for (i = 0; i < NVM_SIZE; i++) {
   12754 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12755 				printf("XXXX ");
   12756 			else
   12757 				printf("%04hx ", eeprom_data);
   12758 			if (i % 8 == 7)
   12759 				printf("\n");
   12760 		}
   12761 	}
   12762 
   12763 #endif /* WM_DEBUG */
   12764 
   12765 	for (i = 0; i < NVM_SIZE; i++) {
   12766 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12767 			return 1;
   12768 		checksum += eeprom_data;
   12769 	}
   12770 
   12771 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12772 #ifdef WM_DEBUG
   12773 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12774 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12775 #endif
   12776 	}
   12777 
   12778 	return 0;
   12779 }
   12780 
   12781 static void
   12782 wm_nvm_version_invm(struct wm_softc *sc)
   12783 {
   12784 	uint32_t dword;
   12785 
   12786 	/*
   12787 	 * Linux's code to decode version is very strange, so we don't
   12788 	 * obey that algorithm and just use word 61 as the document.
   12789 	 * Perhaps it's not perfect though...
   12790 	 *
   12791 	 * Example:
   12792 	 *
   12793 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12794 	 */
   12795 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12796 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12797 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12798 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12799 }
   12800 
   12801 static void
   12802 wm_nvm_version(struct wm_softc *sc)
   12803 {
   12804 	uint16_t major, minor, build, patch;
   12805 	uint16_t uid0, uid1;
   12806 	uint16_t nvm_data;
   12807 	uint16_t off;
   12808 	bool check_version = false;
   12809 	bool check_optionrom = false;
   12810 	bool have_build = false;
   12811 	bool have_uid = true;
   12812 
   12813 	/*
   12814 	 * Version format:
   12815 	 *
   12816 	 * XYYZ
   12817 	 * X0YZ
   12818 	 * X0YY
   12819 	 *
   12820 	 * Example:
   12821 	 *
   12822 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12823 	 *	82571	0x50a6	5.10.6?
   12824 	 *	82572	0x506a	5.6.10?
   12825 	 *	82572EI	0x5069	5.6.9?
   12826 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12827 	 *		0x2013	2.1.3?
   12828 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12829 	 */
   12830 
   12831 	/*
   12832 	 * XXX
   12833 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   12834 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   12835 	 */
   12836 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   12837 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   12838 		have_uid = false;
   12839 
   12840 	switch (sc->sc_type) {
   12841 	case WM_T_82571:
   12842 	case WM_T_82572:
   12843 	case WM_T_82574:
   12844 	case WM_T_82583:
   12845 		check_version = true;
   12846 		check_optionrom = true;
   12847 		have_build = true;
   12848 		break;
   12849 	case WM_T_82575:
   12850 	case WM_T_82576:
   12851 	case WM_T_82580:
   12852 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12853 			check_version = true;
   12854 		break;
   12855 	case WM_T_I211:
   12856 		wm_nvm_version_invm(sc);
   12857 		have_uid = false;
   12858 		goto printver;
   12859 	case WM_T_I210:
   12860 		if (!wm_nvm_flash_presence_i210(sc)) {
   12861 			wm_nvm_version_invm(sc);
   12862 			have_uid = false;
   12863 			goto printver;
   12864 		}
   12865 		/* FALLTHROUGH */
   12866 	case WM_T_I350:
   12867 	case WM_T_I354:
   12868 		check_version = true;
   12869 		check_optionrom = true;
   12870 		break;
   12871 	default:
   12872 		return;
   12873 	}
   12874 	if (check_version
   12875 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   12876 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12877 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12878 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12879 			build = nvm_data & NVM_BUILD_MASK;
   12880 			have_build = true;
   12881 		} else
   12882 			minor = nvm_data & 0x00ff;
   12883 
   12884 		/* Decimal */
   12885 		minor = (minor / 16) * 10 + (minor % 16);
   12886 		sc->sc_nvm_ver_major = major;
   12887 		sc->sc_nvm_ver_minor = minor;
   12888 
   12889 printver:
   12890 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12891 		    sc->sc_nvm_ver_minor);
   12892 		if (have_build) {
   12893 			sc->sc_nvm_ver_build = build;
   12894 			aprint_verbose(".%d", build);
   12895 		}
   12896 	}
   12897 
   12898 	/* Assume the Option ROM area is at avove NVM_SIZE */
   12899 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   12900 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   12901 		/* Option ROM Version */
   12902 		if ((off != 0x0000) && (off != 0xffff)) {
   12903 			int rv;
   12904 
   12905 			off += NVM_COMBO_VER_OFF;
   12906 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   12907 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   12908 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   12909 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12910 				/* 16bits */
   12911 				major = uid0 >> 8;
   12912 				build = (uid0 << 8) | (uid1 >> 8);
   12913 				patch = uid1 & 0x00ff;
   12914 				aprint_verbose(", option ROM Version %d.%d.%d",
   12915 				    major, build, patch);
   12916 			}
   12917 		}
   12918 	}
   12919 
   12920 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   12921 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12922 }
   12923 
   12924 /*
   12925  * wm_nvm_read:
   12926  *
   12927  *	Read data from the serial EEPROM.
   12928  */
   12929 static int
   12930 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12931 {
   12932 	int rv;
   12933 
   12934 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12935 		device_xname(sc->sc_dev), __func__));
   12936 
   12937 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12938 		return -1;
   12939 
   12940 	rv = sc->nvm.read(sc, word, wordcnt, data);
   12941 
   12942 	return rv;
   12943 }
   12944 
   12945 /*
   12946  * Hardware semaphores.
   12947  * Very complexed...
   12948  */
   12949 
   12950 static int
   12951 wm_get_null(struct wm_softc *sc)
   12952 {
   12953 
   12954 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12955 		device_xname(sc->sc_dev), __func__));
   12956 	return 0;
   12957 }
   12958 
   12959 static void
   12960 wm_put_null(struct wm_softc *sc)
   12961 {
   12962 
   12963 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12964 		device_xname(sc->sc_dev), __func__));
   12965 	return;
   12966 }
   12967 
   12968 static int
   12969 wm_get_eecd(struct wm_softc *sc)
   12970 {
   12971 	uint32_t reg;
   12972 	int x;
   12973 
   12974 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12975 		device_xname(sc->sc_dev), __func__));
   12976 
   12977 	reg = CSR_READ(sc, WMREG_EECD);
   12978 
   12979 	/* Request EEPROM access. */
   12980 	reg |= EECD_EE_REQ;
   12981 	CSR_WRITE(sc, WMREG_EECD, reg);
   12982 
   12983 	/* ..and wait for it to be granted. */
   12984 	for (x = 0; x < 1000; x++) {
   12985 		reg = CSR_READ(sc, WMREG_EECD);
   12986 		if (reg & EECD_EE_GNT)
   12987 			break;
   12988 		delay(5);
   12989 	}
   12990 	if ((reg & EECD_EE_GNT) == 0) {
   12991 		aprint_error_dev(sc->sc_dev,
   12992 		    "could not acquire EEPROM GNT\n");
   12993 		reg &= ~EECD_EE_REQ;
   12994 		CSR_WRITE(sc, WMREG_EECD, reg);
   12995 		return -1;
   12996 	}
   12997 
   12998 	return 0;
   12999 }
   13000 
   13001 static void
   13002 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13003 {
   13004 
   13005 	*eecd |= EECD_SK;
   13006 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13007 	CSR_WRITE_FLUSH(sc);
   13008 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13009 		delay(1);
   13010 	else
   13011 		delay(50);
   13012 }
   13013 
   13014 static void
   13015 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13016 {
   13017 
   13018 	*eecd &= ~EECD_SK;
   13019 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13020 	CSR_WRITE_FLUSH(sc);
   13021 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13022 		delay(1);
   13023 	else
   13024 		delay(50);
   13025 }
   13026 
   13027 static void
   13028 wm_put_eecd(struct wm_softc *sc)
   13029 {
   13030 	uint32_t reg;
   13031 
   13032 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13033 		device_xname(sc->sc_dev), __func__));
   13034 
   13035 	/* Stop nvm */
   13036 	reg = CSR_READ(sc, WMREG_EECD);
   13037 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13038 		/* Pull CS high */
   13039 		reg |= EECD_CS;
   13040 		wm_nvm_eec_clock_lower(sc, &reg);
   13041 	} else {
   13042 		/* CS on Microwire is active-high */
   13043 		reg &= ~(EECD_CS | EECD_DI);
   13044 		CSR_WRITE(sc, WMREG_EECD, reg);
   13045 		wm_nvm_eec_clock_raise(sc, &reg);
   13046 		wm_nvm_eec_clock_lower(sc, &reg);
   13047 	}
   13048 
   13049 	reg = CSR_READ(sc, WMREG_EECD);
   13050 	reg &= ~EECD_EE_REQ;
   13051 	CSR_WRITE(sc, WMREG_EECD, reg);
   13052 
   13053 	return;
   13054 }
   13055 
   13056 /*
   13057  * Get hardware semaphore.
   13058  * Same as e1000_get_hw_semaphore_generic()
   13059  */
   13060 static int
   13061 wm_get_swsm_semaphore(struct wm_softc *sc)
   13062 {
   13063 	int32_t timeout;
   13064 	uint32_t swsm;
   13065 
   13066 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13067 		device_xname(sc->sc_dev), __func__));
   13068 	KASSERT(sc->sc_nvm_wordsize > 0);
   13069 
   13070 retry:
   13071 	/* Get the SW semaphore. */
   13072 	timeout = sc->sc_nvm_wordsize + 1;
   13073 	while (timeout) {
   13074 		swsm = CSR_READ(sc, WMREG_SWSM);
   13075 
   13076 		if ((swsm & SWSM_SMBI) == 0)
   13077 			break;
   13078 
   13079 		delay(50);
   13080 		timeout--;
   13081 	}
   13082 
   13083 	if (timeout == 0) {
   13084 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13085 			/*
   13086 			 * In rare circumstances, the SW semaphore may already
   13087 			 * be held unintentionally. Clear the semaphore once
   13088 			 * before giving up.
   13089 			 */
   13090 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13091 			wm_put_swsm_semaphore(sc);
   13092 			goto retry;
   13093 		}
   13094 		aprint_error_dev(sc->sc_dev,
   13095 		    "could not acquire SWSM SMBI\n");
   13096 		return 1;
   13097 	}
   13098 
   13099 	/* Get the FW semaphore. */
   13100 	timeout = sc->sc_nvm_wordsize + 1;
   13101 	while (timeout) {
   13102 		swsm = CSR_READ(sc, WMREG_SWSM);
   13103 		swsm |= SWSM_SWESMBI;
   13104 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13105 		/* If we managed to set the bit we got the semaphore. */
   13106 		swsm = CSR_READ(sc, WMREG_SWSM);
   13107 		if (swsm & SWSM_SWESMBI)
   13108 			break;
   13109 
   13110 		delay(50);
   13111 		timeout--;
   13112 	}
   13113 
   13114 	if (timeout == 0) {
   13115 		aprint_error_dev(sc->sc_dev,
   13116 		    "could not acquire SWSM SWESMBI\n");
   13117 		/* Release semaphores */
   13118 		wm_put_swsm_semaphore(sc);
   13119 		return 1;
   13120 	}
   13121 	return 0;
   13122 }
   13123 
   13124 /*
   13125  * Put hardware semaphore.
   13126  * Same as e1000_put_hw_semaphore_generic()
   13127  */
   13128 static void
   13129 wm_put_swsm_semaphore(struct wm_softc *sc)
   13130 {
   13131 	uint32_t swsm;
   13132 
   13133 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13134 		device_xname(sc->sc_dev), __func__));
   13135 
   13136 	swsm = CSR_READ(sc, WMREG_SWSM);
   13137 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13138 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13139 }
   13140 
   13141 /*
   13142  * Get SW/FW semaphore.
   13143  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13144  */
   13145 static int
   13146 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13147 {
   13148 	uint32_t swfw_sync;
   13149 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13150 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13151 	int timeout;
   13152 
   13153 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13154 		device_xname(sc->sc_dev), __func__));
   13155 
   13156 	if (sc->sc_type == WM_T_80003)
   13157 		timeout = 50;
   13158 	else
   13159 		timeout = 200;
   13160 
   13161 	while (timeout) {
   13162 		if (wm_get_swsm_semaphore(sc)) {
   13163 			aprint_error_dev(sc->sc_dev,
   13164 			    "%s: failed to get semaphore\n",
   13165 			    __func__);
   13166 			return 1;
   13167 		}
   13168 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13169 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13170 			swfw_sync |= swmask;
   13171 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13172 			wm_put_swsm_semaphore(sc);
   13173 			return 0;
   13174 		}
   13175 		wm_put_swsm_semaphore(sc);
   13176 		delay(5000);
   13177 		timeout--;
   13178 	}
   13179 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13180 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13181 	return 1;
   13182 }
   13183 
   13184 static void
   13185 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13186 {
   13187 	uint32_t swfw_sync;
   13188 
   13189 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13190 		device_xname(sc->sc_dev), __func__));
   13191 
   13192 	while (wm_get_swsm_semaphore(sc) != 0)
   13193 		continue;
   13194 
   13195 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13196 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13197 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13198 
   13199 	wm_put_swsm_semaphore(sc);
   13200 }
   13201 
   13202 static int
   13203 wm_get_nvm_80003(struct wm_softc *sc)
   13204 {
   13205 	int rv;
   13206 
   13207 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13208 		device_xname(sc->sc_dev), __func__));
   13209 
   13210 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13211 		aprint_error_dev(sc->sc_dev,
   13212 		    "%s: failed to get semaphore(SWFW)\n",
   13213 		    __func__);
   13214 		return rv;
   13215 	}
   13216 
   13217 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13218 	    && (rv = wm_get_eecd(sc)) != 0) {
   13219 		aprint_error_dev(sc->sc_dev,
   13220 		    "%s: failed to get semaphore(EECD)\n",
   13221 		    __func__);
   13222 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13223 		return rv;
   13224 	}
   13225 
   13226 	return 0;
   13227 }
   13228 
   13229 static void
   13230 wm_put_nvm_80003(struct wm_softc *sc)
   13231 {
   13232 
   13233 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13234 		device_xname(sc->sc_dev), __func__));
   13235 
   13236 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13237 		wm_put_eecd(sc);
   13238 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13239 }
   13240 
   13241 static int
   13242 wm_get_nvm_82571(struct wm_softc *sc)
   13243 {
   13244 	int rv;
   13245 
   13246 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13247 		device_xname(sc->sc_dev), __func__));
   13248 
   13249 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13250 		return rv;
   13251 
   13252 	switch (sc->sc_type) {
   13253 	case WM_T_82573:
   13254 		break;
   13255 	default:
   13256 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13257 			rv = wm_get_eecd(sc);
   13258 		break;
   13259 	}
   13260 
   13261 	if (rv != 0) {
   13262 		aprint_error_dev(sc->sc_dev,
   13263 		    "%s: failed to get semaphore\n",
   13264 		    __func__);
   13265 		wm_put_swsm_semaphore(sc);
   13266 	}
   13267 
   13268 	return rv;
   13269 }
   13270 
   13271 static void
   13272 wm_put_nvm_82571(struct wm_softc *sc)
   13273 {
   13274 
   13275 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13276 		device_xname(sc->sc_dev), __func__));
   13277 
   13278 	switch (sc->sc_type) {
   13279 	case WM_T_82573:
   13280 		break;
   13281 	default:
   13282 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13283 			wm_put_eecd(sc);
   13284 		break;
   13285 	}
   13286 
   13287 	wm_put_swsm_semaphore(sc);
   13288 }
   13289 
   13290 static int
   13291 wm_get_phy_82575(struct wm_softc *sc)
   13292 {
   13293 
   13294 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13295 		device_xname(sc->sc_dev), __func__));
   13296 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13297 }
   13298 
   13299 static void
   13300 wm_put_phy_82575(struct wm_softc *sc)
   13301 {
   13302 
   13303 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13304 		device_xname(sc->sc_dev), __func__));
   13305 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13306 }
   13307 
   13308 static int
   13309 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13310 {
   13311 	uint32_t ext_ctrl;
   13312 	int timeout = 200;
   13313 
   13314 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13315 		device_xname(sc->sc_dev), __func__));
   13316 
   13317 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13318 	for (timeout = 0; timeout < 200; timeout++) {
   13319 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13320 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13321 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13322 
   13323 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13324 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13325 			return 0;
   13326 		delay(5000);
   13327 	}
   13328 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13329 	    device_xname(sc->sc_dev), ext_ctrl);
   13330 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13331 	return 1;
   13332 }
   13333 
   13334 static void
   13335 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13336 {
   13337 	uint32_t ext_ctrl;
   13338 
   13339 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13340 		device_xname(sc->sc_dev), __func__));
   13341 
   13342 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13343 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13344 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13345 
   13346 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13347 }
   13348 
   13349 static int
   13350 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13351 {
   13352 	uint32_t ext_ctrl;
   13353 	int timeout;
   13354 
   13355 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13356 		device_xname(sc->sc_dev), __func__));
   13357 	mutex_enter(sc->sc_ich_phymtx);
   13358 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13359 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13360 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13361 			break;
   13362 		delay(1000);
   13363 	}
   13364 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13365 		printf("%s: SW has already locked the resource\n",
   13366 		    device_xname(sc->sc_dev));
   13367 		goto out;
   13368 	}
   13369 
   13370 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13371 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13372 	for (timeout = 0; timeout < 1000; timeout++) {
   13373 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13374 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13375 			break;
   13376 		delay(1000);
   13377 	}
   13378 	if (timeout >= 1000) {
   13379 		printf("%s: failed to acquire semaphore\n",
   13380 		    device_xname(sc->sc_dev));
   13381 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13382 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13383 		goto out;
   13384 	}
   13385 	return 0;
   13386 
   13387 out:
   13388 	mutex_exit(sc->sc_ich_phymtx);
   13389 	return 1;
   13390 }
   13391 
   13392 static void
   13393 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13394 {
   13395 	uint32_t ext_ctrl;
   13396 
   13397 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13398 		device_xname(sc->sc_dev), __func__));
   13399 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13400 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13401 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13402 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13403 	} else {
   13404 		printf("%s: Semaphore unexpectedly released\n",
   13405 		    device_xname(sc->sc_dev));
   13406 	}
   13407 
   13408 	mutex_exit(sc->sc_ich_phymtx);
   13409 }
   13410 
   13411 static int
   13412 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13413 {
   13414 
   13415 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13416 		device_xname(sc->sc_dev), __func__));
   13417 	mutex_enter(sc->sc_ich_nvmmtx);
   13418 
   13419 	return 0;
   13420 }
   13421 
   13422 static void
   13423 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13424 {
   13425 
   13426 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13427 		device_xname(sc->sc_dev), __func__));
   13428 	mutex_exit(sc->sc_ich_nvmmtx);
   13429 }
   13430 
   13431 static int
   13432 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13433 {
   13434 	int i = 0;
   13435 	uint32_t reg;
   13436 
   13437 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13438 		device_xname(sc->sc_dev), __func__));
   13439 
   13440 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13441 	do {
   13442 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13443 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13444 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13445 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13446 			break;
   13447 		delay(2*1000);
   13448 		i++;
   13449 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13450 
   13451 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13452 		wm_put_hw_semaphore_82573(sc);
   13453 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13454 		    device_xname(sc->sc_dev));
   13455 		return -1;
   13456 	}
   13457 
   13458 	return 0;
   13459 }
   13460 
   13461 static void
   13462 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13463 {
   13464 	uint32_t reg;
   13465 
   13466 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13467 		device_xname(sc->sc_dev), __func__));
   13468 
   13469 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13470 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13471 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13472 }
   13473 
   13474 /*
   13475  * Management mode and power management related subroutines.
   13476  * BMC, AMT, suspend/resume and EEE.
   13477  */
   13478 
   13479 #ifdef WM_WOL
   13480 static int
   13481 wm_check_mng_mode(struct wm_softc *sc)
   13482 {
   13483 	int rv;
   13484 
   13485 	switch (sc->sc_type) {
   13486 	case WM_T_ICH8:
   13487 	case WM_T_ICH9:
   13488 	case WM_T_ICH10:
   13489 	case WM_T_PCH:
   13490 	case WM_T_PCH2:
   13491 	case WM_T_PCH_LPT:
   13492 	case WM_T_PCH_SPT:
   13493 	case WM_T_PCH_CNP:
   13494 		rv = wm_check_mng_mode_ich8lan(sc);
   13495 		break;
   13496 	case WM_T_82574:
   13497 	case WM_T_82583:
   13498 		rv = wm_check_mng_mode_82574(sc);
   13499 		break;
   13500 	case WM_T_82571:
   13501 	case WM_T_82572:
   13502 	case WM_T_82573:
   13503 	case WM_T_80003:
   13504 		rv = wm_check_mng_mode_generic(sc);
   13505 		break;
   13506 	default:
   13507 		/* noting to do */
   13508 		rv = 0;
   13509 		break;
   13510 	}
   13511 
   13512 	return rv;
   13513 }
   13514 
   13515 static int
   13516 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13517 {
   13518 	uint32_t fwsm;
   13519 
   13520 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13521 
   13522 	if (((fwsm & FWSM_FW_VALID) != 0)
   13523 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13524 		return 1;
   13525 
   13526 	return 0;
   13527 }
   13528 
   13529 static int
   13530 wm_check_mng_mode_82574(struct wm_softc *sc)
   13531 {
   13532 	uint16_t data;
   13533 
   13534 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13535 
   13536 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13537 		return 1;
   13538 
   13539 	return 0;
   13540 }
   13541 
   13542 static int
   13543 wm_check_mng_mode_generic(struct wm_softc *sc)
   13544 {
   13545 	uint32_t fwsm;
   13546 
   13547 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13548 
   13549 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13550 		return 1;
   13551 
   13552 	return 0;
   13553 }
   13554 #endif /* WM_WOL */
   13555 
   13556 static int
   13557 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13558 {
   13559 	uint32_t manc, fwsm, factps;
   13560 
   13561 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13562 		return 0;
   13563 
   13564 	manc = CSR_READ(sc, WMREG_MANC);
   13565 
   13566 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13567 		device_xname(sc->sc_dev), manc));
   13568 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13569 		return 0;
   13570 
   13571 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13572 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13573 		factps = CSR_READ(sc, WMREG_FACTPS);
   13574 		if (((factps & FACTPS_MNGCG) == 0)
   13575 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13576 			return 1;
   13577 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13578 		uint16_t data;
   13579 
   13580 		factps = CSR_READ(sc, WMREG_FACTPS);
   13581 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13582 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13583 			device_xname(sc->sc_dev), factps, data));
   13584 		if (((factps & FACTPS_MNGCG) == 0)
   13585 		    && ((data & NVM_CFG2_MNGM_MASK)
   13586 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13587 			return 1;
   13588 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13589 	    && ((manc & MANC_ASF_EN) == 0))
   13590 		return 1;
   13591 
   13592 	return 0;
   13593 }
   13594 
   13595 static bool
   13596 wm_phy_resetisblocked(struct wm_softc *sc)
   13597 {
   13598 	bool blocked = false;
   13599 	uint32_t reg;
   13600 	int i = 0;
   13601 
   13602 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13603 		device_xname(sc->sc_dev), __func__));
   13604 
   13605 	switch (sc->sc_type) {
   13606 	case WM_T_ICH8:
   13607 	case WM_T_ICH9:
   13608 	case WM_T_ICH10:
   13609 	case WM_T_PCH:
   13610 	case WM_T_PCH2:
   13611 	case WM_T_PCH_LPT:
   13612 	case WM_T_PCH_SPT:
   13613 	case WM_T_PCH_CNP:
   13614 		do {
   13615 			reg = CSR_READ(sc, WMREG_FWSM);
   13616 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13617 				blocked = true;
   13618 				delay(10*1000);
   13619 				continue;
   13620 			}
   13621 			blocked = false;
   13622 		} while (blocked && (i++ < 30));
   13623 		return blocked;
   13624 		break;
   13625 	case WM_T_82571:
   13626 	case WM_T_82572:
   13627 	case WM_T_82573:
   13628 	case WM_T_82574:
   13629 	case WM_T_82583:
   13630 	case WM_T_80003:
   13631 		reg = CSR_READ(sc, WMREG_MANC);
   13632 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13633 			return true;
   13634 		else
   13635 			return false;
   13636 		break;
   13637 	default:
   13638 		/* no problem */
   13639 		break;
   13640 	}
   13641 
   13642 	return false;
   13643 }
   13644 
   13645 static void
   13646 wm_get_hw_control(struct wm_softc *sc)
   13647 {
   13648 	uint32_t reg;
   13649 
   13650 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13651 		device_xname(sc->sc_dev), __func__));
   13652 
   13653 	if (sc->sc_type == WM_T_82573) {
   13654 		reg = CSR_READ(sc, WMREG_SWSM);
   13655 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13656 	} else if (sc->sc_type >= WM_T_82571) {
   13657 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13658 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13659 	}
   13660 }
   13661 
   13662 static void
   13663 wm_release_hw_control(struct wm_softc *sc)
   13664 {
   13665 	uint32_t reg;
   13666 
   13667 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13668 		device_xname(sc->sc_dev), __func__));
   13669 
   13670 	if (sc->sc_type == WM_T_82573) {
   13671 		reg = CSR_READ(sc, WMREG_SWSM);
   13672 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13673 	} else if (sc->sc_type >= WM_T_82571) {
   13674 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13675 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13676 	}
   13677 }
   13678 
   13679 static void
   13680 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13681 {
   13682 	uint32_t reg;
   13683 
   13684 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13685 		device_xname(sc->sc_dev), __func__));
   13686 
   13687 	if (sc->sc_type < WM_T_PCH2)
   13688 		return;
   13689 
   13690 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13691 
   13692 	if (gate)
   13693 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13694 	else
   13695 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13696 
   13697 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13698 }
   13699 
   13700 static void
   13701 wm_smbustopci(struct wm_softc *sc)
   13702 {
   13703 	uint32_t fwsm, reg;
   13704 	int rv = 0;
   13705 
   13706 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13707 		device_xname(sc->sc_dev), __func__));
   13708 
   13709 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13710 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13711 
   13712 	/* Disable ULP */
   13713 	wm_ulp_disable(sc);
   13714 
   13715 	/* Acquire PHY semaphore */
   13716 	sc->phy.acquire(sc);
   13717 
   13718 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13719 	switch (sc->sc_type) {
   13720 	case WM_T_PCH_LPT:
   13721 	case WM_T_PCH_SPT:
   13722 	case WM_T_PCH_CNP:
   13723 		if (wm_phy_is_accessible_pchlan(sc))
   13724 			break;
   13725 
   13726 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13727 		reg |= CTRL_EXT_FORCE_SMBUS;
   13728 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13729 #if 0
   13730 		/* XXX Isn't this required??? */
   13731 		CSR_WRITE_FLUSH(sc);
   13732 #endif
   13733 		delay(50 * 1000);
   13734 		/* FALLTHROUGH */
   13735 	case WM_T_PCH2:
   13736 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13737 			break;
   13738 		/* FALLTHROUGH */
   13739 	case WM_T_PCH:
   13740 		if (sc->sc_type == WM_T_PCH)
   13741 			if ((fwsm & FWSM_FW_VALID) != 0)
   13742 				break;
   13743 
   13744 		if (wm_phy_resetisblocked(sc) == true) {
   13745 			printf("XXX reset is blocked(3)\n");
   13746 			break;
   13747 		}
   13748 
   13749 		wm_toggle_lanphypc_pch_lpt(sc);
   13750 
   13751 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13752 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13753 				break;
   13754 
   13755 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13756 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13757 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13758 
   13759 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13760 				break;
   13761 			rv = -1;
   13762 		}
   13763 		break;
   13764 	default:
   13765 		break;
   13766 	}
   13767 
   13768 	/* Release semaphore */
   13769 	sc->phy.release(sc);
   13770 
   13771 	if (rv == 0) {
   13772 		if (wm_phy_resetisblocked(sc)) {
   13773 			printf("XXX reset is blocked(4)\n");
   13774 			goto out;
   13775 		}
   13776 		wm_reset_phy(sc);
   13777 		if (wm_phy_resetisblocked(sc))
   13778 			printf("XXX reset is blocked(4)\n");
   13779 	}
   13780 
   13781 out:
   13782 	/*
   13783 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13784 	 */
   13785 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13786 		delay(10*1000);
   13787 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13788 	}
   13789 }
   13790 
   13791 static void
   13792 wm_init_manageability(struct wm_softc *sc)
   13793 {
   13794 
   13795 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13796 		device_xname(sc->sc_dev), __func__));
   13797 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13798 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13799 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13800 
   13801 		/* Disable hardware interception of ARP */
   13802 		manc &= ~MANC_ARP_EN;
   13803 
   13804 		/* Enable receiving management packets to the host */
   13805 		if (sc->sc_type >= WM_T_82571) {
   13806 			manc |= MANC_EN_MNG2HOST;
   13807 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   13808 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13809 		}
   13810 
   13811 		CSR_WRITE(sc, WMREG_MANC, manc);
   13812 	}
   13813 }
   13814 
   13815 static void
   13816 wm_release_manageability(struct wm_softc *sc)
   13817 {
   13818 
   13819 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13820 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13821 
   13822 		manc |= MANC_ARP_EN;
   13823 		if (sc->sc_type >= WM_T_82571)
   13824 			manc &= ~MANC_EN_MNG2HOST;
   13825 
   13826 		CSR_WRITE(sc, WMREG_MANC, manc);
   13827 	}
   13828 }
   13829 
   13830 static void
   13831 wm_get_wakeup(struct wm_softc *sc)
   13832 {
   13833 
   13834 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13835 	switch (sc->sc_type) {
   13836 	case WM_T_82573:
   13837 	case WM_T_82583:
   13838 		sc->sc_flags |= WM_F_HAS_AMT;
   13839 		/* FALLTHROUGH */
   13840 	case WM_T_80003:
   13841 	case WM_T_82575:
   13842 	case WM_T_82576:
   13843 	case WM_T_82580:
   13844 	case WM_T_I350:
   13845 	case WM_T_I354:
   13846 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13847 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13848 		/* FALLTHROUGH */
   13849 	case WM_T_82541:
   13850 	case WM_T_82541_2:
   13851 	case WM_T_82547:
   13852 	case WM_T_82547_2:
   13853 	case WM_T_82571:
   13854 	case WM_T_82572:
   13855 	case WM_T_82574:
   13856 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13857 		break;
   13858 	case WM_T_ICH8:
   13859 	case WM_T_ICH9:
   13860 	case WM_T_ICH10:
   13861 	case WM_T_PCH:
   13862 	case WM_T_PCH2:
   13863 	case WM_T_PCH_LPT:
   13864 	case WM_T_PCH_SPT:
   13865 	case WM_T_PCH_CNP:
   13866 		sc->sc_flags |= WM_F_HAS_AMT;
   13867 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13868 		break;
   13869 	default:
   13870 		break;
   13871 	}
   13872 
   13873 	/* 1: HAS_MANAGE */
   13874 	if (wm_enable_mng_pass_thru(sc) != 0)
   13875 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13876 
   13877 	/*
   13878 	 * Note that the WOL flags is set after the resetting of the eeprom
   13879 	 * stuff
   13880 	 */
   13881 }
   13882 
   13883 /*
   13884  * Unconfigure Ultra Low Power mode.
   13885  * Only for I217 and newer (see below).
   13886  */
   13887 static void
   13888 wm_ulp_disable(struct wm_softc *sc)
   13889 {
   13890 	uint32_t reg;
   13891 	int i = 0;
   13892 
   13893 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13894 		device_xname(sc->sc_dev), __func__));
   13895 	/* Exclude old devices */
   13896 	if ((sc->sc_type < WM_T_PCH_LPT)
   13897 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13898 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13899 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13900 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13901 		return;
   13902 
   13903 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13904 		/* Request ME un-configure ULP mode in the PHY */
   13905 		reg = CSR_READ(sc, WMREG_H2ME);
   13906 		reg &= ~H2ME_ULP;
   13907 		reg |= H2ME_ENFORCE_SETTINGS;
   13908 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13909 
   13910 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13911 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13912 			if (i++ == 30) {
   13913 				printf("%s timed out\n", __func__);
   13914 				return;
   13915 			}
   13916 			delay(10 * 1000);
   13917 		}
   13918 		reg = CSR_READ(sc, WMREG_H2ME);
   13919 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13920 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13921 
   13922 		return;
   13923 	}
   13924 
   13925 	/* Acquire semaphore */
   13926 	sc->phy.acquire(sc);
   13927 
   13928 	/* Toggle LANPHYPC */
   13929 	wm_toggle_lanphypc_pch_lpt(sc);
   13930 
   13931 	/* Unforce SMBus mode in PHY */
   13932 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13933 	if (reg == 0x0000 || reg == 0xffff) {
   13934 		uint32_t reg2;
   13935 
   13936 		printf("%s: Force SMBus first.\n", __func__);
   13937 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13938 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13939 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13940 		delay(50 * 1000);
   13941 
   13942 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13943 	}
   13944 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13945 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13946 
   13947 	/* Unforce SMBus mode in MAC */
   13948 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13949 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13950 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13951 
   13952 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13953 	reg |= HV_PM_CTRL_K1_ENA;
   13954 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13955 
   13956 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13957 	reg &= ~(I218_ULP_CONFIG1_IND
   13958 	    | I218_ULP_CONFIG1_STICKY_ULP
   13959 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13960 	    | I218_ULP_CONFIG1_WOL_HOST
   13961 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13962 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13963 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13964 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13965 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13966 	reg |= I218_ULP_CONFIG1_START;
   13967 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13968 
   13969 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13970 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13971 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13972 
   13973 	/* Release semaphore */
   13974 	sc->phy.release(sc);
   13975 	wm_gmii_reset(sc);
   13976 	delay(50 * 1000);
   13977 }
   13978 
   13979 /* WOL in the newer chipset interfaces (pchlan) */
   13980 static void
   13981 wm_enable_phy_wakeup(struct wm_softc *sc)
   13982 {
   13983 #if 0
   13984 	uint16_t preg;
   13985 
   13986 	/* Copy MAC RARs to PHY RARs */
   13987 
   13988 	/* Copy MAC MTA to PHY MTA */
   13989 
   13990 	/* Configure PHY Rx Control register */
   13991 
   13992 	/* Enable PHY wakeup in MAC register */
   13993 
   13994 	/* Configure and enable PHY wakeup in PHY registers */
   13995 
   13996 	/* Activate PHY wakeup */
   13997 
   13998 	/* XXX */
   13999 #endif
   14000 }
   14001 
   14002 /* Power down workaround on D3 */
   14003 static void
   14004 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14005 {
   14006 	uint32_t reg;
   14007 	int i;
   14008 
   14009 	for (i = 0; i < 2; i++) {
   14010 		/* Disable link */
   14011 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14012 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14013 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14014 
   14015 		/*
   14016 		 * Call gig speed drop workaround on Gig disable before
   14017 		 * accessing any PHY registers
   14018 		 */
   14019 		if (sc->sc_type == WM_T_ICH8)
   14020 			wm_gig_downshift_workaround_ich8lan(sc);
   14021 
   14022 		/* Write VR power-down enable */
   14023 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   14024 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14025 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14026 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   14027 
   14028 		/* Read it back and test */
   14029 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   14030 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14031 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14032 			break;
   14033 
   14034 		/* Issue PHY reset and repeat at most one more time */
   14035 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14036 	}
   14037 }
   14038 
   14039 static void
   14040 wm_enable_wakeup(struct wm_softc *sc)
   14041 {
   14042 	uint32_t reg, pmreg;
   14043 	pcireg_t pmode;
   14044 
   14045 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14046 		device_xname(sc->sc_dev), __func__));
   14047 
   14048 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14049 		&pmreg, NULL) == 0)
   14050 		return;
   14051 
   14052 	/* Advertise the wakeup capability */
   14053 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   14054 	    | CTRL_SWDPIN(3));
   14055 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   14056 
   14057 	/* ICH workaround */
   14058 	switch (sc->sc_type) {
   14059 	case WM_T_ICH8:
   14060 	case WM_T_ICH9:
   14061 	case WM_T_ICH10:
   14062 	case WM_T_PCH:
   14063 	case WM_T_PCH2:
   14064 	case WM_T_PCH_LPT:
   14065 	case WM_T_PCH_SPT:
   14066 	case WM_T_PCH_CNP:
   14067 		/* Disable gig during WOL */
   14068 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14069 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   14070 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14071 		if (sc->sc_type == WM_T_PCH)
   14072 			wm_gmii_reset(sc);
   14073 
   14074 		/* Power down workaround */
   14075 		if (sc->sc_phytype == WMPHY_82577) {
   14076 			struct mii_softc *child;
   14077 
   14078 			/* Assume that the PHY is copper */
   14079 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14080 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   14081 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   14082 				    (768 << 5) | 25, 0x0444); /* magic num */
   14083 		}
   14084 		break;
   14085 	default:
   14086 		break;
   14087 	}
   14088 
   14089 	/* Keep the laser running on fiber adapters */
   14090 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   14091 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   14092 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14093 		reg |= CTRL_EXT_SWDPIN(3);
   14094 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14095 	}
   14096 
   14097 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   14098 #if 0	/* for the multicast packet */
   14099 	reg |= WUFC_MC;
   14100 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   14101 #endif
   14102 
   14103 	if (sc->sc_type >= WM_T_PCH)
   14104 		wm_enable_phy_wakeup(sc);
   14105 	else {
   14106 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   14107 		CSR_WRITE(sc, WMREG_WUFC, reg);
   14108 	}
   14109 
   14110 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14111 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14112 		|| (sc->sc_type == WM_T_PCH2))
   14113 	    && (sc->sc_phytype == WMPHY_IGP_3))
   14114 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   14115 
   14116 	/* Request PME */
   14117 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   14118 #if 0
   14119 	/* Disable WOL */
   14120 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   14121 #else
   14122 	/* For WOL */
   14123 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   14124 #endif
   14125 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   14126 }
   14127 
   14128 /* Disable ASPM L0s and/or L1 for workaround */
   14129 static void
   14130 wm_disable_aspm(struct wm_softc *sc)
   14131 {
   14132 	pcireg_t reg, mask = 0;
   14133 	unsigned const char *str = "";
   14134 
   14135 	/*
   14136 	 *  Only for PCIe device which has PCIe capability in the PCI config
   14137 	 * space.
   14138 	 */
   14139 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   14140 		return;
   14141 
   14142 	switch (sc->sc_type) {
   14143 	case WM_T_82571:
   14144 	case WM_T_82572:
   14145 		/*
   14146 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   14147 		 * State Power management L1 State (ASPM L1).
   14148 		 */
   14149 		mask = PCIE_LCSR_ASPM_L1;
   14150 		str = "L1 is";
   14151 		break;
   14152 	case WM_T_82573:
   14153 	case WM_T_82574:
   14154 	case WM_T_82583:
   14155 		/*
   14156 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   14157 		 *
   14158 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   14159 		 * some chipset.  The document of 82574 and 82583 says that
   14160 		 * disabling L0s with some specific chipset is sufficient,
   14161 		 * but we follow as of the Intel em driver does.
   14162 		 *
   14163 		 * References:
   14164 		 * Errata 8 of the Specification Update of i82573.
   14165 		 * Errata 20 of the Specification Update of i82574.
   14166 		 * Errata 9 of the Specification Update of i82583.
   14167 		 */
   14168 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   14169 		str = "L0s and L1 are";
   14170 		break;
   14171 	default:
   14172 		return;
   14173 	}
   14174 
   14175 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14176 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   14177 	reg &= ~mask;
   14178 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14179 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   14180 
   14181 	/* Print only in wm_attach() */
   14182 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   14183 		aprint_verbose_dev(sc->sc_dev,
   14184 		    "ASPM %s disabled to workaround the errata.\n", str);
   14185 }
   14186 
   14187 /* LPLU */
   14188 
   14189 static void
   14190 wm_lplu_d0_disable(struct wm_softc *sc)
   14191 {
   14192 	struct mii_data *mii = &sc->sc_mii;
   14193 	uint32_t reg;
   14194 
   14195 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14196 		device_xname(sc->sc_dev), __func__));
   14197 
   14198 	if (sc->sc_phytype == WMPHY_IFE)
   14199 		return;
   14200 
   14201 	switch (sc->sc_type) {
   14202 	case WM_T_82571:
   14203 	case WM_T_82572:
   14204 	case WM_T_82573:
   14205 	case WM_T_82575:
   14206 	case WM_T_82576:
   14207 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   14208 		reg &= ~PMR_D0_LPLU;
   14209 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   14210 		break;
   14211 	case WM_T_82580:
   14212 	case WM_T_I350:
   14213 	case WM_T_I210:
   14214 	case WM_T_I211:
   14215 		reg = CSR_READ(sc, WMREG_PHPM);
   14216 		reg &= ~PHPM_D0A_LPLU;
   14217 		CSR_WRITE(sc, WMREG_PHPM, reg);
   14218 		break;
   14219 	case WM_T_82574:
   14220 	case WM_T_82583:
   14221 	case WM_T_ICH8:
   14222 	case WM_T_ICH9:
   14223 	case WM_T_ICH10:
   14224 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14225 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   14226 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14227 		CSR_WRITE_FLUSH(sc);
   14228 		break;
   14229 	case WM_T_PCH:
   14230 	case WM_T_PCH2:
   14231 	case WM_T_PCH_LPT:
   14232 	case WM_T_PCH_SPT:
   14233 	case WM_T_PCH_CNP:
   14234 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   14235 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   14236 		if (wm_phy_resetisblocked(sc) == false)
   14237 			reg |= HV_OEM_BITS_ANEGNOW;
   14238 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   14239 		break;
   14240 	default:
   14241 		break;
   14242 	}
   14243 }
   14244 
   14245 /* EEE */
   14246 
   14247 static void
   14248 wm_set_eee_i350(struct wm_softc *sc)
   14249 {
   14250 	uint32_t ipcnfg, eeer;
   14251 
   14252 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   14253 	eeer = CSR_READ(sc, WMREG_EEER);
   14254 
   14255 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   14256 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14257 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14258 		    | EEER_LPI_FC);
   14259 	} else {
   14260 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14261 		ipcnfg &= ~IPCNFG_10BASE_TE;
   14262 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14263 		    | EEER_LPI_FC);
   14264 	}
   14265 
   14266 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   14267 	CSR_WRITE(sc, WMREG_EEER, eeer);
   14268 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   14269 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   14270 }
   14271 
   14272 /*
   14273  * Workarounds (mainly PHY related).
   14274  * Basically, PHY's workarounds are in the PHY drivers.
   14275  */
   14276 
   14277 /* Work-around for 82566 Kumeran PCS lock loss */
   14278 static void
   14279 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   14280 {
   14281 	struct mii_data *mii = &sc->sc_mii;
   14282 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14283 	int i;
   14284 	int reg;
   14285 
   14286 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14287 		device_xname(sc->sc_dev), __func__));
   14288 
   14289 	/* If the link is not up, do nothing */
   14290 	if ((status & STATUS_LU) == 0)
   14291 		return;
   14292 
   14293 	/* Nothing to do if the link is other than 1Gbps */
   14294 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   14295 		return;
   14296 
   14297 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14298 	for (i = 0; i < 10; i++) {
   14299 		/* read twice */
   14300 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14301 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14302 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   14303 			goto out;	/* GOOD! */
   14304 
   14305 		/* Reset the PHY */
   14306 		wm_reset_phy(sc);
   14307 		delay(5*1000);
   14308 	}
   14309 
   14310 	/* Disable GigE link negotiation */
   14311 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14312 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14313 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14314 
   14315 	/*
   14316 	 * Call gig speed drop workaround on Gig disable before accessing
   14317 	 * any PHY registers.
   14318 	 */
   14319 	wm_gig_downshift_workaround_ich8lan(sc);
   14320 
   14321 out:
   14322 	return;
   14323 }
   14324 
   14325 /* WOL from S5 stops working */
   14326 static void
   14327 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   14328 {
   14329 	uint16_t kmreg;
   14330 
   14331 	/* Only for igp3 */
   14332 	if (sc->sc_phytype == WMPHY_IGP_3) {
   14333 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   14334 			return;
   14335 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   14336 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   14337 			return;
   14338 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   14339 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   14340 	}
   14341 }
   14342 
   14343 /*
   14344  * Workaround for pch's PHYs
   14345  * XXX should be moved to new PHY driver?
   14346  */
   14347 static void
   14348 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   14349 {
   14350 
   14351 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14352 		device_xname(sc->sc_dev), __func__));
   14353 	KASSERT(sc->sc_type == WM_T_PCH);
   14354 
   14355 	if (sc->sc_phytype == WMPHY_82577)
   14356 		wm_set_mdio_slow_mode_hv(sc);
   14357 
   14358 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   14359 
   14360 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   14361 
   14362 	/* 82578 */
   14363 	if (sc->sc_phytype == WMPHY_82578) {
   14364 		struct mii_softc *child;
   14365 
   14366 		/*
   14367 		 * Return registers to default by doing a soft reset then
   14368 		 * writing 0x3140 to the control register
   14369 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   14370 		 */
   14371 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14372 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   14373 			PHY_RESET(child);
   14374 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   14375 			    0x3140);
   14376 		}
   14377 	}
   14378 
   14379 	/* Select page 0 */
   14380 	sc->phy.acquire(sc);
   14381 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   14382 	sc->phy.release(sc);
   14383 
   14384 	/*
   14385 	 * Configure the K1 Si workaround during phy reset assuming there is
   14386 	 * link so that it disables K1 if link is in 1Gbps.
   14387 	 */
   14388 	wm_k1_gig_workaround_hv(sc, 1);
   14389 }
   14390 
   14391 static void
   14392 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   14393 {
   14394 
   14395 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14396 		device_xname(sc->sc_dev), __func__));
   14397 	KASSERT(sc->sc_type == WM_T_PCH2);
   14398 
   14399 	wm_set_mdio_slow_mode_hv(sc);
   14400 }
   14401 
   14402 /**
   14403  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   14404  *  @link: link up bool flag
   14405  *
   14406  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   14407  *  preventing further DMA write requests.  Workaround the issue by disabling
   14408  *  the de-assertion of the clock request when in 1Gpbs mode.
   14409  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   14410  *  speeds in order to avoid Tx hangs.
   14411  **/
   14412 static int
   14413 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   14414 {
   14415 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   14416 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14417 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   14418 	uint16_t phyreg;
   14419 	int rv;
   14420 
   14421 	if (link && (speed == STATUS_SPEED_1000)) {
   14422 		sc->phy.acquire(sc);
   14423 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14424 		    &phyreg);
   14425 		if (rv != 0)
   14426 			goto release;
   14427 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14428 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   14429 		if (rv != 0)
   14430 			goto release;
   14431 		delay(20);
   14432 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   14433 
   14434 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14435 		    &phyreg);
   14436 release:
   14437 		sc->phy.release(sc);
   14438 	} else {
   14439 		struct mii_softc *child;
   14440 
   14441 		fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   14442 
   14443 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14444 		if (((child != NULL) && (child->mii_mpd_rev > 5))
   14445 		    || !link
   14446 		    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   14447 			goto update_fextnvm6;
   14448 
   14449 		phyreg = wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL);
   14450 
   14451 		/* Clear link status transmit timeout */
   14452 		phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   14453 		if (speed == STATUS_SPEED_100) {
   14454 			/* Set inband Tx timeout to 5x10us for 100Half */
   14455 			phyreg |=
   14456 			    5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   14457 
   14458 			/* Do not extend the K1 entry latency for 100Half */
   14459 			fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   14460 		} else {
   14461 			/* Set inband Tx timeout to 50x10us for 10Full/Half */
   14462 			phyreg |=
   14463 			    50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   14464 
   14465 			/* Extend the K1 entry latency for 10 Mbps */
   14466 			fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   14467 		}
   14468 
   14469 		wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   14470 
   14471 update_fextnvm6:
   14472 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   14473 	}
   14474 
   14475 	return rv;
   14476 }
   14477 
   14478 static int
   14479 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   14480 {
   14481 	int k1_enable = sc->sc_nvm_k1_enabled;
   14482 
   14483 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14484 		device_xname(sc->sc_dev), __func__));
   14485 
   14486 	if (sc->phy.acquire(sc) != 0)
   14487 		return -1;
   14488 
   14489 	if (link) {
   14490 		k1_enable = 0;
   14491 
   14492 		/* Link stall fix for link up */
   14493 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14494 		    0x0100);
   14495 	} else {
   14496 		/* Link stall fix for link down */
   14497 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14498 		    0x4100);
   14499 	}
   14500 
   14501 	wm_configure_k1_ich8lan(sc, k1_enable);
   14502 	sc->phy.release(sc);
   14503 
   14504 	return 0;
   14505 }
   14506 
   14507 static void
   14508 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   14509 {
   14510 	uint32_t reg;
   14511 
   14512 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   14513 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   14514 	    reg | HV_KMRN_MDIO_SLOW);
   14515 }
   14516 
   14517 static void
   14518 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   14519 {
   14520 	uint32_t ctrl, ctrl_ext, tmp;
   14521 	uint16_t kmreg;
   14522 	int rv;
   14523 
   14524 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   14525 	if (rv != 0)
   14526 		return;
   14527 
   14528 	if (k1_enable)
   14529 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   14530 	else
   14531 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   14532 
   14533 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   14534 	if (rv != 0)
   14535 		return;
   14536 
   14537 	delay(20);
   14538 
   14539 	ctrl = CSR_READ(sc, WMREG_CTRL);
   14540 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14541 
   14542 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   14543 	tmp |= CTRL_FRCSPD;
   14544 
   14545 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   14546 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   14547 	CSR_WRITE_FLUSH(sc);
   14548 	delay(20);
   14549 
   14550 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   14551 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14552 	CSR_WRITE_FLUSH(sc);
   14553 	delay(20);
   14554 
   14555 	return;
   14556 }
   14557 
   14558 /* special case - for 82575 - need to do manual init ... */
   14559 static void
   14560 wm_reset_init_script_82575(struct wm_softc *sc)
   14561 {
   14562 	/*
   14563 	 * remark: this is untested code - we have no board without EEPROM
   14564 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   14565 	 */
   14566 
   14567 	/* SerDes configuration via SERDESCTRL */
   14568 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   14569 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   14570 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   14571 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   14572 
   14573 	/* CCM configuration via CCMCTL register */
   14574 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   14575 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   14576 
   14577 	/* PCIe lanes configuration */
   14578 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   14579 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   14580 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   14581 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   14582 
   14583 	/* PCIe PLL Configuration */
   14584 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   14585 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   14586 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   14587 }
   14588 
   14589 static void
   14590 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   14591 {
   14592 	uint32_t reg;
   14593 	uint16_t nvmword;
   14594 	int rv;
   14595 
   14596 	if (sc->sc_type != WM_T_82580)
   14597 		return;
   14598 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   14599 		return;
   14600 
   14601 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   14602 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   14603 	if (rv != 0) {
   14604 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   14605 		    __func__);
   14606 		return;
   14607 	}
   14608 
   14609 	reg = CSR_READ(sc, WMREG_MDICNFG);
   14610 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   14611 		reg |= MDICNFG_DEST;
   14612 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   14613 		reg |= MDICNFG_COM_MDIO;
   14614 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14615 }
   14616 
   14617 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   14618 
   14619 static bool
   14620 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   14621 {
   14622 	int i;
   14623 	uint32_t reg;
   14624 	uint16_t id1, id2;
   14625 
   14626 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14627 		device_xname(sc->sc_dev), __func__));
   14628 	id1 = id2 = 0xffff;
   14629 	for (i = 0; i < 2; i++) {
   14630 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   14631 		if (MII_INVALIDID(id1))
   14632 			continue;
   14633 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   14634 		if (MII_INVALIDID(id2))
   14635 			continue;
   14636 		break;
   14637 	}
   14638 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   14639 		goto out;
   14640 	}
   14641 
   14642 	if (sc->sc_type < WM_T_PCH_LPT) {
   14643 		sc->phy.release(sc);
   14644 		wm_set_mdio_slow_mode_hv(sc);
   14645 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   14646 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   14647 		sc->phy.acquire(sc);
   14648 	}
   14649 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   14650 		printf("XXX return with false\n");
   14651 		return false;
   14652 	}
   14653 out:
   14654 	if (sc->sc_type >= WM_T_PCH_LPT) {
   14655 		/* Only unforce SMBus if ME is not active */
   14656 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14657 			/* Unforce SMBus mode in PHY */
   14658 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   14659 			    CV_SMB_CTRL);
   14660 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14661 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   14662 			    CV_SMB_CTRL, reg);
   14663 
   14664 			/* Unforce SMBus mode in MAC */
   14665 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14666 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14667 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14668 		}
   14669 	}
   14670 	return true;
   14671 }
   14672 
   14673 static void
   14674 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   14675 {
   14676 	uint32_t reg;
   14677 	int i;
   14678 
   14679 	/* Set PHY Config Counter to 50msec */
   14680 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   14681 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   14682 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   14683 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   14684 
   14685 	/* Toggle LANPHYPC */
   14686 	reg = CSR_READ(sc, WMREG_CTRL);
   14687 	reg |= CTRL_LANPHYPC_OVERRIDE;
   14688 	reg &= ~CTRL_LANPHYPC_VALUE;
   14689 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14690 	CSR_WRITE_FLUSH(sc);
   14691 	delay(1000);
   14692 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   14693 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14694 	CSR_WRITE_FLUSH(sc);
   14695 
   14696 	if (sc->sc_type < WM_T_PCH_LPT)
   14697 		delay(50 * 1000);
   14698 	else {
   14699 		i = 20;
   14700 
   14701 		do {
   14702 			delay(5 * 1000);
   14703 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   14704 		    && i--);
   14705 
   14706 		delay(30 * 1000);
   14707 	}
   14708 }
   14709 
   14710 static int
   14711 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   14712 {
   14713 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   14714 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   14715 	uint32_t rxa;
   14716 	uint16_t scale = 0, lat_enc = 0;
   14717 	int32_t obff_hwm = 0;
   14718 	int64_t lat_ns, value;
   14719 
   14720 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14721 		device_xname(sc->sc_dev), __func__));
   14722 
   14723 	if (link) {
   14724 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   14725 		uint32_t status;
   14726 		uint16_t speed;
   14727 		pcireg_t preg;
   14728 
   14729 		status = CSR_READ(sc, WMREG_STATUS);
   14730 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   14731 		case STATUS_SPEED_10:
   14732 			speed = 10;
   14733 			break;
   14734 		case STATUS_SPEED_100:
   14735 			speed = 100;
   14736 			break;
   14737 		case STATUS_SPEED_1000:
   14738 			speed = 1000;
   14739 			break;
   14740 		default:
   14741 			device_printf(sc->sc_dev, "Unknown speed "
   14742 			    "(status = %08x)\n", status);
   14743 			return -1;
   14744 		}
   14745 
   14746 		/* Rx Packet Buffer Allocation size (KB) */
   14747 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   14748 
   14749 		/*
   14750 		 * Determine the maximum latency tolerated by the device.
   14751 		 *
   14752 		 * Per the PCIe spec, the tolerated latencies are encoded as
   14753 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   14754 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   14755 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14756 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14757 		 */
   14758 		lat_ns = ((int64_t)rxa * 1024 -
   14759 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14760 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14761 		if (lat_ns < 0)
   14762 			lat_ns = 0;
   14763 		else
   14764 			lat_ns /= speed;
   14765 		value = lat_ns;
   14766 
   14767 		while (value > LTRV_VALUE) {
   14768 			scale ++;
   14769 			value = howmany(value, __BIT(5));
   14770 		}
   14771 		if (scale > LTRV_SCALE_MAX) {
   14772 			printf("%s: Invalid LTR latency scale %d\n",
   14773 			    device_xname(sc->sc_dev), scale);
   14774 			return -1;
   14775 		}
   14776 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14777 
   14778 		/* Determine the maximum latency tolerated by the platform */
   14779 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14780 		    WM_PCI_LTR_CAP_LPT);
   14781 		max_snoop = preg & 0xffff;
   14782 		max_nosnoop = preg >> 16;
   14783 
   14784 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14785 
   14786 		if (lat_enc > max_ltr_enc) {
   14787 			lat_enc = max_ltr_enc;
   14788 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14789 			    * PCI_LTR_SCALETONS(
   14790 				    __SHIFTOUT(lat_enc,
   14791 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14792 		}
   14793 
   14794 		if (lat_ns) {
   14795 			lat_ns *= speed * 1000;
   14796 			lat_ns /= 8;
   14797 			lat_ns /= 1000000000;
   14798 			obff_hwm = (int32_t)(rxa - lat_ns);
   14799 		}
   14800 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14801 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14802 			    "(rxa = %d, lat_ns = %d)\n",
   14803 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14804 			return -1;
   14805 		}
   14806 	}
   14807 	/* Snoop and No-Snoop latencies the same */
   14808 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14809 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14810 
   14811 	/* Set OBFF high water mark */
   14812 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14813 	reg |= obff_hwm;
   14814 	CSR_WRITE(sc, WMREG_SVT, reg);
   14815 
   14816 	/* Enable OBFF */
   14817 	reg = CSR_READ(sc, WMREG_SVCR);
   14818 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14819 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14820 
   14821 	return 0;
   14822 }
   14823 
   14824 /*
   14825  * I210 Errata 25 and I211 Errata 10
   14826  * Slow System Clock.
   14827  */
   14828 static void
   14829 wm_pll_workaround_i210(struct wm_softc *sc)
   14830 {
   14831 	uint32_t mdicnfg, wuc;
   14832 	uint32_t reg;
   14833 	pcireg_t pcireg;
   14834 	uint32_t pmreg;
   14835 	uint16_t nvmword, tmp_nvmword;
   14836 	int phyval;
   14837 	bool wa_done = false;
   14838 	int i;
   14839 
   14840 	/* Save WUC and MDICNFG registers */
   14841 	wuc = CSR_READ(sc, WMREG_WUC);
   14842 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14843 
   14844 	reg = mdicnfg & ~MDICNFG_DEST;
   14845 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14846 
   14847 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14848 		nvmword = INVM_DEFAULT_AL;
   14849 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14850 
   14851 	/* Get Power Management cap offset */
   14852 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14853 		&pmreg, NULL) == 0)
   14854 		return;
   14855 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14856 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14857 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14858 
   14859 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14860 			break; /* OK */
   14861 		}
   14862 
   14863 		wa_done = true;
   14864 		/* Directly reset the internal PHY */
   14865 		reg = CSR_READ(sc, WMREG_CTRL);
   14866 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14867 
   14868 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14869 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14870 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14871 
   14872 		CSR_WRITE(sc, WMREG_WUC, 0);
   14873 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14874 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14875 
   14876 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14877 		    pmreg + PCI_PMCSR);
   14878 		pcireg |= PCI_PMCSR_STATE_D3;
   14879 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14880 		    pmreg + PCI_PMCSR, pcireg);
   14881 		delay(1000);
   14882 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14883 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14884 		    pmreg + PCI_PMCSR, pcireg);
   14885 
   14886 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14887 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14888 
   14889 		/* Restore WUC register */
   14890 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14891 	}
   14892 
   14893 	/* Restore MDICNFG setting */
   14894 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14895 	if (wa_done)
   14896 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14897 }
   14898 
   14899 static void
   14900 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14901 {
   14902 	uint32_t reg;
   14903 
   14904 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14905 		device_xname(sc->sc_dev), __func__));
   14906 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   14907 	    || (sc->sc_type == WM_T_PCH_CNP));
   14908 
   14909 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14910 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14911 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14912 
   14913 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14914 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14915 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14916 }
   14917